Update src/app.py
Browse files- src/app.py +2 -2
src/app.py
CHANGED
@@ -31,7 +31,7 @@ load_dotenv()
|
|
31 |
# else: # Assume hosted on Hugging Face Spaces
|
32 |
BASE_DIR = Path(".").resolve()
|
33 |
|
34 |
-
LLM_CONFIG_FILE = BASE_DIR / "src/configs/llm/openai-gpt-
|
35 |
RESUME_PARSER_CONFIG_FILE = BASE_DIR / "src/configs/parser/llamaparse_en.yaml"
|
36 |
OUTPUT_AUDIO_FILE_EMPTY = BASE_DIR / "src/output/audio_output.wav"
|
37 |
OUTPUT_REPORT_FILE_EMPTY = BASE_DIR / "src/output/report.docx"
|
@@ -107,7 +107,7 @@ class GradioInterface:
|
|
107 |
return audio_text
|
108 |
|
109 |
def analyze_emotions(self, video_path: str) -> Optional[str]:
|
110 |
-
frames = sample_frames(video_path, sample_rate=
|
111 |
emotions = EmotionRecognition.detect_face_emotions(frames)
|
112 |
emotions_dict = EmotionRecognition.process_emotions(emotions)
|
113 |
conf_score = emotions_dict["conf"]
|
|
|
31 |
# else: # Assume hosted on Hugging Face Spaces
|
32 |
BASE_DIR = Path(".").resolve()
|
33 |
|
34 |
+
LLM_CONFIG_FILE = BASE_DIR / "src/configs/llm/openai-gpt-4o-mini.yaml"
|
35 |
RESUME_PARSER_CONFIG_FILE = BASE_DIR / "src/configs/parser/llamaparse_en.yaml"
|
36 |
OUTPUT_AUDIO_FILE_EMPTY = BASE_DIR / "src/output/audio_output.wav"
|
37 |
OUTPUT_REPORT_FILE_EMPTY = BASE_DIR / "src/output/report.docx"
|
|
|
107 |
return audio_text
|
108 |
|
109 |
def analyze_emotions(self, video_path: str) -> Optional[str]:
|
110 |
+
frames = sample_frames(video_path, sample_rate=24)
|
111 |
emotions = EmotionRecognition.detect_face_emotions(frames)
|
112 |
emotions_dict = EmotionRecognition.process_emotions(emotions)
|
113 |
conf_score = emotions_dict["conf"]
|