From f1805719161ae01be5b922e03a17f769cddd174f Mon Sep 17 00:00:00 2001 From: John Ciubuc Date: Sat, 14 Jan 2023 22:13:48 -0600 Subject: [PATCH] Fixed 'FP16 is not supported on CPU' Warning --- transcribe_demo.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/transcribe_demo.py b/transcribe_demo.py index a8f6721..60fb397 100644 --- a/transcribe_demo.py +++ b/transcribe_demo.py @@ -5,6 +5,7 @@ import io import os import speech_recognition as sr import whisper +import torch from datetime import datetime, timedelta from queue import Queue @@ -99,7 +100,7 @@ def main(): f.write(wav_data.read()) # Read the transcription. - result = audio_model.transcribe(temp_file) + result = audio_model.transcribe(temp_file, fp16=torch.cuda.is_available()) text = result['text'].strip() # If we detected a pause between recordings, add a new item to our transcripion.