Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -81,16 +81,6 @@ def transcribe(audio):
|
|
81 |
return text
|
82 |
|
83 |
def model(text):
|
84 |
-
#generate_kwargs = dict(temperature=0.7, max_new_tokens=512, top_p=0.95, repetition_penalty=1, do_sample=True, seed=42, )
|
85 |
-
#formatted_prompt = system_instructions_M + text + "[Sonia]"
|
86 |
-
#stream = client.text_generation(
|
87 |
-
# formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
88 |
-
#output = ""
|
89 |
-
#for response in stream:
|
90 |
-
# if not response.token.text == "</s>":
|
91 |
-
# output += response.token.text
|
92 |
-
#return output
|
93 |
-
|
94 |
output = ""
|
95 |
formatted_prompt = [ {
|
96 |
"role": 'system', "content": system_instructions },
|
@@ -100,24 +90,13 @@ def model(text):
|
|
100 |
stream = client.chat.completions.create(
|
101 |
model=modelo,
|
102 |
messages=formatted_prompt,
|
103 |
-
temperature=0.
|
104 |
)
|
105 |
|
106 |
output = clean_text(stream.choices[0].message.content.strip())
|
107 |
return output
|
108 |
|
109 |
def respondtxt(prompt):
|
110 |
-
#generate_kwargs = dict( temperature=0.6, max_new_tokens=512, top_p=0.95, repetition_penalty=1, do_sample=False, )
|
111 |
-
#formatted_prompt = system_instructions_M + prompt + "[Sonia]"
|
112 |
-
#stream = client.text_generation(
|
113 |
-
# formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
114 |
-
#output = ""
|
115 |
-
#for response in stream:
|
116 |
-
# if not response.token.text == "</s>":
|
117 |
-
# output += response.token.text
|
118 |
-
#output_text = clean_text(output)
|
119 |
-
#return (output_text)
|
120 |
-
|
121 |
output = ""
|
122 |
formatted_prompt = [ {
|
123 |
"role": 'system', "content": system_instructions },
|
@@ -127,7 +106,7 @@ def respondtxt(prompt):
|
|
127 |
stream = client.chat.completions.create(
|
128 |
model=modelo,
|
129 |
messages=formatted_prompt,
|
130 |
-
temperature=0.
|
131 |
)
|
132 |
|
133 |
output = clean_text(stream.choices[0].message.content.strip())
|
|
|
81 |
return text
|
82 |
|
83 |
def model(text):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
output = ""
|
85 |
formatted_prompt = [ {
|
86 |
"role": 'system', "content": system_instructions },
|
|
|
90 |
stream = client.chat.completions.create(
|
91 |
model=modelo,
|
92 |
messages=formatted_prompt,
|
93 |
+
temperature=0.6,
|
94 |
)
|
95 |
|
96 |
output = clean_text(stream.choices[0].message.content.strip())
|
97 |
return output
|
98 |
|
99 |
def respondtxt(prompt):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
output = ""
|
101 |
formatted_prompt = [ {
|
102 |
"role": 'system', "content": system_instructions },
|
|
|
106 |
stream = client.chat.completions.create(
|
107 |
model=modelo,
|
108 |
messages=formatted_prompt,
|
109 |
+
temperature=0.6,
|
110 |
)
|
111 |
|
112 |
output = clean_text(stream.choices[0].message.content.strip())
|