Thendral28 commited on
Commit
5e045bd
·
verified ·
1 Parent(s): 2b5cd74

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +405 -0
app.py ADDED
@@ -0,0 +1,405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, File, UploadFile, HTTPException, Form, Request
2
+ from fastapi.responses import JSONResponse
3
+ import torch
4
+ import os
5
+ from pydub import AudioSegment
6
+ import tempfile
7
+ from transformers import pipeline as transorm_pipline,AutoModelForSpeechSeq2Seq, AutoProcessor
8
+ from openai import OpenAI
9
+ from pydantic import BaseModel
10
+ import logging
11
+ import subprocess
12
+ from dotenv import load_dotenv
13
+ import re
14
+ import requests
15
+ from typing import Optional, Dict
16
+ import time
17
+ import numpy as np
18
+ import io
19
+
20
+ app = FastAPI()
21
+ load_dotenv()
22
+ UPLOAD_FOLDER = tempfile.gettempdir()
23
+ os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
24
+ os.environ['TRANSFORMERS_CACHE'] = '/asif/cache/'
25
+
26
+ logging.basicConfig(level=logging.INFO)
27
+
28
+ fluency_pipe = transorm_pipline("audio-classification", model="megathil/fluency_prediction")
29
+
30
+ # Define device and torch_dtype
31
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
32
+ torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
33
+
34
+ # Define the Hugging Face model and processor
35
+ distil_model_id = "distil-whisper/distil-small.en"
36
+
37
+
38
+ distil_model = AutoModelForSpeechSeq2Seq.from_pretrained(
39
+ distil_model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True
40
+ )
41
+ distil_model.to(device)
42
+
43
+ processor = AutoProcessor.from_pretrained(distil_model_id)
44
+
45
+ async def hello_world():
46
+ return {
47
+ "Hello World"
48
+ }
49
+
50
+ client = OpenAI(
51
+ api_key=os.environ['AUTH_TOKEN_AI'],
52
+ base_url="https://api.deepinfra.com/v1/openai",
53
+ )
54
+
55
+
56
+ class EnhanceTextRequest(BaseModel):
57
+ text: str
58
+
59
+ class AnalyzeTextRequest(BaseModel):
60
+ text: str
61
+ question: str
62
+
63
+ class AnalyzeTextResponse(BaseModel):
64
+ relevance_rating: int
65
+ vocabulary_rating: int
66
+
67
+ class User(BaseModel):
68
+ firstName: str
69
+ lastName: Optional[str]
70
+ city: Optional[str] = None
71
+ college: Optional[str] = None
72
+ department: Optional[str] = None
73
+ course: Optional[str] = None
74
+
75
+ class ModuleModel(BaseModel):
76
+ questions: Optional[Dict[str, str]] = None
77
+ unitId: Optional[str] = None
78
+ moduleId: Optional[str] = None
79
+
80
+ class MyModel(BaseModel):
81
+ userProgressStatusId: str
82
+
83
+ # Define the enhance_text function
84
+ def enhance_text(text, question):
85
+ response = client.chat.completions.create(
86
+ model="meta-llama/Meta-Llama-3-8B-Instruct",
87
+ messages=[
88
+ {"role": "system", "content": "You will be provided with a job interview question and answer.Your task is to rewrite the answer in standard English, making it more comprehensive and suitable for an fresher in interview. Ensure the response is at least 80 words and includes any relevant standard points related to the question.Be concise and omit disclaimers."},
89
+ {"role": "user", "content": f"question:{question} \n Answer:{text}"}
90
+ ],
91
+ temperature=0.7,
92
+ max_tokens=400,
93
+ top_p=1
94
+ )
95
+ enhanced_text = response.choices[0].message.content
96
+ return enhanced_text
97
+
98
+ def extract_rating_and_reason(text, rating_pattern, reason_pattern):
99
+ rating_match = re.search(rating_pattern, text, re.IGNORECASE)
100
+ reason_match = re.search(reason_pattern, text, re.IGNORECASE | re.DOTALL)
101
+
102
+ if rating_match and reason_match:
103
+ rating = int(rating_match.group(1))
104
+ reason = reason_match.group(1).strip()
105
+ return {"rating": rating, "reason": reason}
106
+ else:
107
+ return {"rating": "", "reason": "Unable to retrieve rating and reason due to insufficient information."}
108
+
109
+ def vocabulary_text(text, question):
110
+ response = client.chat.completions.create(
111
+ model="meta-llama/Meta-Llama-3-8B-Instruct",
112
+ messages=[
113
+ {"role": "system", "content": "You will be provided with question and answer, and your task is to check the rate the Vocabulary in the answer in the range of 1-100 and reasoning (Provide answer in this format vocablary_rating : rating, reason: , in 100 words and Respond without any introductory or conclusion text)"},
114
+ {"role": "user", "content": f"question:{question} \n Answer:{text}"}
115
+ ],
116
+ temperature=0.7,
117
+ max_tokens=250,
118
+ top_p=1
119
+ )
120
+
121
+ vocabulary_rating = response.choices[0].message.content
122
+ print("vocabulary_rating-->",vocabulary_rating)
123
+ # Define patterns to match rating and reason
124
+ vocab_rating_pattern = r"(?:vocabulary\s*rating|vocab\.\s*rating|vocab\s*rating|rating)\s*:\s*(\d+)"
125
+ reason_pattern = r"reason\s*:\s*(.*)"
126
+
127
+ # Extract and format the rating and reason
128
+ result = extract_rating_and_reason(vocabulary_rating, vocab_rating_pattern, reason_pattern)
129
+ formatted_vocabulary_rating = {"Vocabulary_rating": result["rating"], "reason": result["reason"]}
130
+
131
+ return formatted_vocabulary_rating
132
+
133
+ def relevance_check(text, question):
134
+ response = client.chat.completions.create(
135
+ model="meta-llama/Meta-Llama-3-8B-Instruct",
136
+ messages=[
137
+ {"role": "system", "content": "You will be provided with question and answer from an job interview and your task is to check the Answer is relevant to the question in the range of 1-100 and reasoning (Provide answer in this format relevance_rating : rating , reason: , in 100 words and Respond without any introductory or conclusion text)"},
138
+ {"role": "user", "content": f"question:{question} \n Answer:{text}"}
139
+ ],
140
+ temperature=0.7,
141
+ max_tokens=250,
142
+ top_p=1
143
+ )
144
+ relevance_rating = response.choices[0].message.content
145
+ print("relevance_rating-->",relevance_rating)
146
+ # Define patterns to match rating and reason
147
+ rel_rating_pattern = r"(?:relevance\s*rating|rel\.\s*rating|rel\s*rating|rating)\s*:\s*(\d+)"
148
+ reason_pattern = r"reason\s*:\s*(.*)"
149
+
150
+ # Extract and format the rating and reason
151
+ result = extract_rating_and_reason(relevance_rating, rel_rating_pattern, reason_pattern)
152
+ formatted_relevance_rating = {"Relevance_rating": result["rating"], "reason": result["reason"]}
153
+
154
+ return formatted_relevance_rating
155
+
156
+ def transcribe_audio(file_path: str) -> str:
157
+ whisper = transorm_pipline(
158
+ "automatic-speech-recognition",
159
+ model=distil_model,
160
+ tokenizer=processor.tokenizer,
161
+ feature_extractor=processor.feature_extractor,
162
+ max_new_tokens=128,
163
+ torch_dtype=torch_dtype,
164
+ device=device,
165
+ )
166
+ transcription = whisper(file_path, chunk_length_s=30, stride_length_s=5, batch_size=8)
167
+ return transcription
168
+
169
+ def calculate_speech_rate(text, audio_duration):
170
+ # Calculate the word count from the text
171
+ full_transcription = text['text']
172
+ word_count = len(full_transcription.split())
173
+ logging.info(f"Total word count: {word_count}")
174
+ # Calculate the speech rate in words per minute
175
+ speech_rate_wpm = word_count / audio_duration * 60
176
+
177
+ # Define minimum and maximum speech rates for normalization
178
+ min_speech_rate = 80 # words per minute (0%)
179
+ max_speech_rate = 200 # words per minute (100%)
180
+
181
+ # Normalize the speech rate to a percentage
182
+ if speech_rate_wpm < min_speech_rate:
183
+ speech_rate_percentage = 0
184
+ elif speech_rate_wpm > max_speech_rate:
185
+ speech_rate_percentage = 100
186
+ else:
187
+ speech_rate_percentage = ((speech_rate_wpm - min_speech_rate) /
188
+ (max_speech_rate - min_speech_rate)) * 100
189
+
190
+ return speech_rate_percentage
191
+
192
+ @app.post("/process_audio_master")
193
+ async def process_audio(
194
+ request: Request,
195
+ audio_file: UploadFile = File(...),
196
+ question: Optional[str] = Form("Tell me about yourself?"),
197
+ unitId: Optional[str] = Form(""),
198
+ moduleId: Optional[str] = Form(""),
199
+ questionId: Optional[str] = Form(""),
200
+ userProgressStatusId: Optional[str] = Form(""),
201
+ firstName: Optional[str] = Form(""),
202
+ lastName: Optional[str] = Form(""),
203
+ city: Optional[str] = Form(""),
204
+ college: Optional[str] = Form(""),
205
+ department: Optional[str] = Form(""),
206
+ course: Optional[str] = Form(""),
207
+ userId: Optional[str] = Form("")
208
+
209
+ ):
210
+ # Logging the raw request data
211
+ logging.info(f"Request headers: {request.headers}")
212
+ logging.info(f"Request query params: {request.query_params}")
213
+ logging.info(f"Request path params: {request.path_params}")
214
+ logging.info(f"Request cookies: {request.cookies}")
215
+
216
+ # Logging form data and file details
217
+ logging.info(f"Received audio file: {audio_file.filename}")
218
+ logging.info(f"Received question: {question}")
219
+ user = {
220
+ "firstName": firstName,
221
+ "lastName": lastName,
222
+ "city": city,
223
+ "college": college,
224
+ "department": department,
225
+ "course": course,
226
+ }
227
+ logging.info(user)
228
+ start_time = time.time()
229
+ try:
230
+ file_path = os.path.join(UPLOAD_FOLDER, audio_file.filename)
231
+ with open(file_path, "wb") as f:
232
+ f.write(await audio_file.read())
233
+
234
+ filename = audio_file.filename
235
+ file_size_bytes = os.path.getsize(file_path)
236
+ file_size_mb = file_size_bytes / (1024 * 1024)
237
+ logging.info(f"File Size: {file_size_mb:.2f} MB")
238
+ if filename.endswith(('.mp4', '.avi', '.mkv', '.mov')):
239
+ try:
240
+ output_audio_path = os.path.join(UPLOAD_FOLDER, 'output_audio.mp3')
241
+ command = ['ffmpeg', '-i', file_path, '-vn', '-acodec', 'libmp3lame', '-y', output_audio_path]
242
+ subprocess.run(command, check=True)
243
+ audio_file_path = output_audio_path
244
+ except Exception as e:
245
+ return JSONResponse(content={'error': f'Error converting video to audio: {str(e)}'}, status_code=500)
246
+ elif filename.endswith('.mp3'):
247
+ try:
248
+ output_audio_path = os.path.join(UPLOAD_FOLDER, filename.rsplit('.', 1)[0] + '.wav')
249
+ command = ['ffmpeg', '-i', file_path, '-acodec', 'pcm_s16le', '-ar', '44100', '-ac', '2', '-y', output_audio_path]
250
+ subprocess.run(command, check=True)
251
+ audio_file_path = output_audio_path
252
+ except Exception as e:
253
+ return JSONResponse(content={'error': f'Error converting MP3 to WAV: {str(e)}'}, status_code=500)
254
+ else:
255
+ audio_file_path = file_path
256
+
257
+ audio = AudioSegment.from_file(audio_file_path)
258
+ audio_duration = audio.duration_seconds
259
+ logging.info(f"Audio Duration: {audio_duration}")
260
+
261
+ if audio_duration < 15:
262
+ data = {
263
+ "code":400,
264
+ "messageCode":"1020",
265
+ "message": "Recording Duration too short."
266
+ }
267
+
268
+ return JSONResponse(content=data)
269
+
270
+ transcribe_results = transcribe_audio(audio_file_path)
271
+
272
+ if not transcribe_results['text'].strip():
273
+ data = {
274
+ "code":400,
275
+ "messageCode":"1020",
276
+ "message": "No content in Audio"
277
+ }
278
+
279
+
280
+ elif len(transcribe_results['text'].split()) < 20:
281
+ data = {
282
+ "code":400,
283
+ "messageCode":"1020",
284
+ "message": "Audio lacks content"
285
+ }
286
+
287
+
288
+ return JSONResponse(content=data)
289
+ else:
290
+ logging.info(f"transcribed content from the audio: {transcribe_results}")
291
+
292
+ transcribed_content = transcribe_results['text']
293
+ # logging.info(f"transcribed content from the audio: {transcribed_content}")
294
+ logging.info(f"Audio Duration: {audio_duration}")
295
+ # Trim audio to 10 seconds for emotion and fluency analysis
296
+ trimmed_audio = audio[:10000] # First 10 seconds
297
+ trimmed_audio_path = os.path.join(UPLOAD_FOLDER, 'trimmed_audio.wav')
298
+ trimmed_audio.export(trimmed_audio_path, format="wav")
299
+
300
+ # Get the volume level of the audio
301
+ volume_db = get_audio_volume_level(audio)
302
+ volume_classification, score = classify_volume_level(volume_db)
303
+ emotionRecognition={
304
+ "label": volume_classification, "score": score
305
+ }
306
+ fluency_results = fluency_pipe(trimmed_audio_path)
307
+ max_result = max(fluency_results, key=lambda x: x['score'])
308
+ fluencyLevel = {
309
+ "label": max_result['label'],
310
+ "score": round(max_result['score'], 2)
311
+ }
312
+ # fluencyLevel = {
313
+ # "label": "Fluent",
314
+ # "score": 0.99
315
+ # }
316
+
317
+ enhance_text_result = enhance_text(transcribe_results['text'], question)
318
+
319
+ relevance_rating = relevance_check(transcribe_results['text'], question)
320
+ contentRelevanceRating = relevance_rating['Relevance_rating']
321
+ contentRelevanceRatingReason = relevance_rating['reason']
322
+
323
+ voacablary_level = vocabulary_text(transcribe_results['text'], question)
324
+ vocabularyRating = voacablary_level['Vocabulary_rating']
325
+ vocabularyRatingReason = voacablary_level['reason']
326
+
327
+ speech_rate = calculate_speech_rate(transcribe_results, audio_duration)
328
+
329
+ os.remove(file_path)
330
+ os.remove(trimmed_audio_path)
331
+ if audio_file_path != file_path:
332
+ os.remove(audio_file_path)
333
+
334
+ node_data = {
335
+ "aiToken": "25b0b507a8c467b4bd0d011df44fe72b6a4aa38ee60a868f3705745c45254aa6",
336
+ "userId": userId,
337
+ "userProgressStatusId": userProgressStatusId,
338
+ "unitId": unitId,
339
+ "moduleId": moduleId,
340
+ "questionId": questionId,
341
+ "aiResponseData": {
342
+ 'contentRelevanceRating':contentRelevanceRating,
343
+ 'contentRelevanceRatingReason':contentRelevanceRatingReason,
344
+ 'vocabularyRating':vocabularyRating,
345
+ 'vocabularyRatingReason':vocabularyRatingReason,
346
+ "transcription": transcribe_results,
347
+ "emotionRecognition": emotionRecognition,
348
+ "fluencyLevel": fluencyLevel,
349
+ "enhancedText": enhance_text_result,
350
+ "speechRate": speech_rate,
351
+ }
352
+ }
353
+ data = {
354
+ "code": 200,
355
+ "messageCode":"1004",
356
+ "data":{
357
+ 'contentRelevanceRating':contentRelevanceRating,
358
+ 'contentRelevanceRatingReason':contentRelevanceRatingReason,
359
+ 'vocabularyRating':vocabularyRating,
360
+ 'vocabularyRatingReason':vocabularyRatingReason,
361
+ "transcription": transcribe_results,
362
+ "emotionRecognition": emotionRecognition,
363
+ "fluencyLevel": fluencyLevel,
364
+ "enhancedText": enhance_text_result,
365
+ "speechRate": speech_rate,
366
+ },
367
+ "message": "Record(s) Found."
368
+ }
369
+ node_server_url = "https://megathil.shenll.com/processAudio/saveUserResult"
370
+ requests.post(node_server_url, json=node_data)
371
+ end_time = time.time() # Record end time
372
+ elapsed_time = end_time - start_time # Calculate elapsed time
373
+ logging.info(f"Total processing time: {elapsed_time:.2f} seconds")
374
+
375
+ return JSONResponse(content=data)
376
+
377
+ except Exception as e:
378
+ logging.error(f"Error processing request: {e}")
379
+ return JSONResponse(content={"error": str(e)}, status_code=500)
380
+
381
+ def get_audio_volume_level(audio: AudioSegment) -> float:
382
+ # Convert the audio data to a numpy array
383
+ samples = np.array(audio.get_array_of_samples(), dtype=np.float32)
384
+
385
+ # Calculate the root mean square (RMS) of the samples
386
+ rms = np.sqrt(np.mean(samples**2))
387
+
388
+ # Convert RMS to decibels (dB)
389
+ if rms == 0:
390
+ rms_db = -np.inf # Use -infinity dB for silence
391
+ else:
392
+ rms_db = 20 * np.log10(rms)
393
+
394
+ return rms_db
395
+
396
+ def classify_volume_level(volume_db: float) -> str:
397
+ """Classify the audio volume level into 'High', 'Medium', or 'Low' with specific scores."""
398
+ if volume_db == -np.inf or volume_db < 50.0:
399
+ return "Low", 40.00
400
+ elif 50.0 <= volume_db < 70.0:
401
+ return "Medium", 60.00
402
+ elif volume_db >= 70.0:
403
+ return "High", 100.00
404
+ else:
405
+ return "High", 100.00