ferferefer commited on
Commit
8df0f98
·
verified ·
1 Parent(s): d737669

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -92
app.py DELETED
@@ -1,92 +0,0 @@
1
- import streamlit as st
2
- import sounddevice as sd
3
- import soundfile as sf
4
- import numpy as np
5
- import whisper
6
- import requests
7
- import json
8
- from datetime import datetime
9
- import os
10
- from dotenv import load_dotenv
11
-
12
- # Load environment variables
13
- load_dotenv()
14
-
15
- # Initialize Whisper model
16
- @st.cache_resource
17
- def load_whisper_model():
18
- return whisper.load_model("base")
19
-
20
- # Mixtral API call function
21
- def get_clinical_notes(transcription):
22
- API_URL = "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1"
23
- headers = {"Authorization": f"Bearer {os.getenv('HUGGINGFACE_TOKEN')}"}
24
-
25
- messages = [
26
- {"role": "system", "content": "You are a medical assistant helping to generate clinical notes from doctor-patient conversations. Format the notes in a clear, professional structure."},
27
- {"role": "user", "content": f"Generate clinical notes from this doctor-patient conversation: {transcription}"}
28
- ]
29
-
30
- payload = {
31
- "model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
32
- "messages": messages,
33
- "max_tokens": 500,
34
- "stream": False
35
- }
36
-
37
- response = requests.post(API_URL, headers=headers, json=payload)
38
- return response.json()['choices'][0]['message']['content']
39
-
40
- # Main app
41
- st.title("Medical Conversation Transcriber")
42
- st.markdown("### Created by Dr. Fernando Ly")
43
- st.markdown("This application helps medical professionals automatically generate clinical notes from patient conversations.")
44
-
45
- # Recording parameters
46
- SAMPLE_RATE = 16000
47
- recording_duration = st.slider("Recording duration (seconds)", 10, 300, 60)
48
-
49
- if "audio_data" not in st.session_state:
50
- st.session_state.audio_data = None
51
- if "transcription" not in st.session_state:
52
- st.session_state.transcription = None
53
- if "clinical_notes" not in st.session_state:
54
- st.session_state.clinical_notes = None
55
-
56
- # Record button
57
- if st.button("Start Recording"):
58
- with st.spinner("Recording..."):
59
- audio_data = sd.rec(int(recording_duration * SAMPLE_RATE),
60
- samplerate=SAMPLE_RATE,
61
- channels=1,
62
- dtype=np.float32)
63
- sd.wait()
64
- st.session_state.audio_data = audio_data
65
-
66
- # Save audio temporarily
67
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
68
- filename = f"recording_{timestamp}.wav"
69
- sf.write(filename, audio_data, SAMPLE_RATE)
70
-
71
- # Transcribe audio
72
- model = load_whisper_model()
73
- st.session_state.transcription = model.transcribe(filename)["text"]
74
-
75
- # Generate clinical notes
76
- st.session_state.clinical_notes = get_clinical_notes(st.session_state.transcription)
77
-
78
- # Clean up audio file
79
- os.remove(filename)
80
-
81
- # Display results
82
- if st.session_state.transcription:
83
- st.subheader("Transcription")
84
- st.write(st.session_state.transcription)
85
-
86
- if st.session_state.clinical_notes:
87
- st.subheader("Clinical Notes")
88
- st.write(st.session_state.clinical_notes)
89
-
90
- # Footer
91
- st.markdown("---")
92
- st.markdown("*Note: This is an AI-assisted tool. Please review and verify all generated notes.*")