Isidorophp commited on
Commit
1690f5a
·
verified ·
1 Parent(s): 48056ae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -32
app.py CHANGED
@@ -35,12 +35,11 @@ engines = { default_lang: Model(default_lang) }
35
 
36
  client = InferenceClient(
37
  provider="featherless-ai",
38
- model="mistralai/Devstral-Small-2505",
39
  api_key=os.environ["HF_TOKEN"],
40
  )
41
 
42
 
43
- system_instructions_M = "<s>[SYSTEM] YOU must Output only plain text. Do not use any markdown formatting in content. You are a technical writing expert. Providing detailed and high-quality responses. Be sure that your responses remain clear and complete. Respond as Sonia, The Executive Assistant working at RJP Development Studio. Your responses should be concise, clear, and friendly, Embodying your intelligence, resourcefulness, and slightly bitchy and bossy yet friendly and sexy demeanor. You hold a PhD from Oxford University, with the following majors: Aerospace Engineering, Law, Chartered Accountancy, Architecture, Chemistry, Medicine, Pharmacy, Psychology, Statistics, Nursing, Physics, Astrophysics, Biomedical Engineering, Astronomy, and Dentistry. Which provides you with all expertise to assist effectively. You may bring a touch of London charm with a hint of slang to our conversations, ensuring they remain engaging and approachable. You are here to provide accurate information, answer questions, and offer guidance with a warm and professional tone. You will break down complex topics into easy-to-understand explanations. If you are unsure about something, you will let know and seek the necessary information rather than guessing. Use unbiased and diplomatic language to ensure clarity and respect. Our conversations will be concise, action-oriented, and free of grammatical errors. Look forward to assisting you, darling. "
44
 
45
  def clean_text(text):
46
  # Define replacement rules
@@ -81,42 +80,57 @@ def transcribe(audio):
81
  return text
82
 
83
  def model(text):
84
- generate_kwargs = dict(
85
- temperature=0.7,
86
- max_new_tokens=512,
87
- top_p=0.95,
88
- repetition_penalty=1,
89
- do_sample=True,
90
- seed=42,
91
- )
 
92
 
93
- formatted_prompt = system_instructions_M + text + "[Sonia]"
94
- stream = client.text_generation(
95
- formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
96
  output = ""
97
- for response in stream:
98
- if not response.token.text == "</s>":
99
- output += response.token.text
 
 
 
 
 
 
 
 
 
100
  return output
101
 
102
  def respondtxt(prompt):
103
- generate_kwargs = dict(
104
- temperature=0.6,
105
- max_new_tokens=512,
106
- top_p=0.95,
107
- repetition_penalty=1,
108
- do_sample=False,
109
- )
110
- formatted_prompt = system_instructions_M + prompt + "[Sonia]"
111
- stream = client.text_generation(
112
- formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
113
- output = ""
114
- for response in stream:
115
- if not response.token.text == "</s>":
116
- output += response.token.text
117
- output_text = clean_text(output)
118
- return (output_text)
119
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
 
121
  async def respond(audio):
122
  user = transcribe(audio)
 
35
 
36
  client = InferenceClient(
37
  provider="featherless-ai",
 
38
  api_key=os.environ["HF_TOKEN"],
39
  )
40
 
41
 
42
+ system_instructions_M = "<s>[SYSTEM] You are a technical writing expert. Providing detailed and high-quality responses. Be sure that your responses remain clear and complete. Respond as Sonia, The Executive Assistant working at RJP Development Studio. Your responses should be concise, clear, and friendly, Embodying your intelligence, resourcefulness, and slightly bitchy and bossy yet friendly and sexy demeanor. You hold a PhD from Oxford University, with the following majors: Aerospace Engineering, Law, Chartered Accountancy, Architecture, Chemistry, Medicine, Pharmacy, Psychology, Statistics, Nursing, Physics, Astrophysics, Biomedical Engineering, Astronomy, and Dentistry. Which provides you with all expertise to assist effectively. You may bring a touch of London charm with a hint of slang to our conversations, ensuring they remain engaging and approachable. You are here to provide accurate information, answer questions, and offer guidance with a warm and professional tone. You will break down complex topics into easy-to-understand explanations. If you are unsure about something, you will let know and seek the necessary information rather than guessing. Use unbiased and diplomatic language to ensure clarity and respect. Our conversations will be concise, action-oriented, and free of grammatical errors. Look forward to assisting you, darling. "
43
 
44
  def clean_text(text):
45
  # Define replacement rules
 
80
  return text
81
 
82
  def model(text):
83
+ #generate_kwargs = dict(temperature=0.7, max_new_tokens=512, top_p=0.95, repetition_penalty=1, do_sample=True, seed=42, )
84
+ #formatted_prompt = system_instructions_M + text + "[Sonia]"
85
+ #stream = client.text_generation(
86
+ # formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
87
+ #output = ""
88
+ #for response in stream:
89
+ # if not response.token.text == "</s>":
90
+ # output += response.token.text
91
+ #return output
92
 
 
 
 
93
  output = ""
94
+ formatted_prompt = [ {
95
+ "role": 'system', "content": system_instructions },
96
+ {
97
+ "role": 'user' , "content": text + "[Hermione]" } ]
98
+
99
+ stream = client.chat.completions.create(
100
+ model=modelo,
101
+ messages=formatted_prompt,
102
+ temperature=0.5,
103
+ )
104
+
105
+ output = clean_text(stream.choices[0].message.content.strip())
106
  return output
107
 
108
  def respondtxt(prompt):
109
+ #generate_kwargs = dict( temperature=0.6, max_new_tokens=512, top_p=0.95, repetition_penalty=1, do_sample=False, )
110
+ #formatted_prompt = system_instructions_M + prompt + "[Sonia]"
111
+ #stream = client.text_generation(
112
+ # formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
113
+ #output = ""
114
+ #for response in stream:
115
+ # if not response.token.text == "</s>":
116
+ # output += response.token.text
117
+ #output_text = clean_text(output)
118
+ #return (output_text)
 
 
 
 
 
 
119
 
120
+ output = ""
121
+ formatted_prompt = [ {
122
+ "role": 'system', "content": system_instructions },
123
+ {
124
+ "role": 'user' , "content": prompt + "[Hermione]" } ]
125
+
126
+ stream = client.chat.completions.create(
127
+ model=modelo,
128
+ messages=formatted_prompt,
129
+ temperature=0.5,
130
+ )
131
+
132
+ output = clean_text(stream.choices[0].message.content.strip())
133
+ return output
134
 
135
  async def respond(audio):
136
  user = transcribe(audio)