File size: 2,085 Bytes
7ad2e16
 
 
 
 
 
 
 
 
c8e1e97
7a8fee0
7ad2e16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
from io import BytesIO
from langchain_openai import ChatOpenAI
from openai import OpenAI


def n_of_questions():
    n_of_questions = 25
    return n_of_questions

openai_api_key = os.environ.get("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")
openai_api_key = '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'

model = "gpt-4o-mini"

def load_model(openai_api_key):
    return ChatOpenAI(
        model_name=model,
        openai_api_key=openai_api_key,
        temperature=0.5
    )

# Initialize the OpenAI client with the API key
client = OpenAI(api_key=openai_api_key)


def convert_text_to_speech(text, output, voice):
    try:
        # Convert the final text to speech
        response = client.audio.speech.create(model="tts-1-hd", voice=voice, input=text)

        if isinstance(output, BytesIO):
            # If output is a BytesIO object, write directly to it
            for chunk in response.iter_bytes():
                output.write(chunk)
        else:
            # If output is a file path, open and write to it
            with open(output, 'wb') as f:
                for chunk in response.iter_bytes():
                    f.write(chunk)

    except Exception as e:
        print(f"An error occurred: {e}")
        # Fallback in case of error
        response = client.audio.speech.create(model="tts-1-hd", voice=voice, input='Here is my Report.')

        if isinstance(output, BytesIO):
            for chunk in response.iter_bytes():
                output.write(chunk)
        else:
            with open(output, 'wb') as f:
                for chunk in response.iter_bytes():
                    f.write(chunk)


def transcribe_audio(audio):
    audio_file = open(audio, "rb")
    transcription = client.audio.transcriptions.create(
        model="whisper-1",
        file=audio_file
    )
    return transcription.text