import streamlit as st
import google.generativeai as genai
import os
import time
from datetime import datetime
import re
from gtts import gTTS
import tempfile
import base64
from streamlit_webrtc import webrtc_streamer, WebRtcMode, RTCConfiguration
import av
import numpy as np
import cv2
import threading
import queue
# Page configuration
st.set_page_config(
page_title="CICE 2.0 Healthcare Assessment Tool",
page_icon="๐ฅ",
layout="wide",
initial_sidebar_state="expanded"
)
# Custom CSS for styling
st.markdown("""
""", unsafe_allow_html=True)
# CICE Assessment Class
class CICE_Assessment:
def __init__(self, api_key):
if api_key:
genai.configure(api_key=api_key)
self.model = genai.GenerativeModel("gemini-2.0-flash-exp")
else:
self.model = None
def analyze_video(self, video_file):
"""Analyze video using the 18-point CICE 2.0 assessment"""
if not self.model:
raise Exception("Please provide a valid Google API key")
# Upload video to Gemini
temp_path = None
try:
# Save uploaded file to temporary location
with tempfile.NamedTemporaryFile(delete=False, suffix='.webm') as tmp_file:
tmp_file.write(video_file.read())
temp_path = tmp_file.name
# Upload to Gemini
uploaded_file = genai.upload_file(path=temp_path, display_name="healthcare_interaction")
# Wait for processing
max_wait = 300
wait_time = 0
while uploaded_file.state.name == "PROCESSING" and wait_time < max_wait:
time.sleep(3)
wait_time += 3
uploaded_file = genai.get_file(uploaded_file.name)
if uploaded_file.state.name == "FAILED":
raise Exception("Video processing failed")
# The 18-point CICE 2.0 assessment prompt
prompt = """Analyze this healthcare team interaction video and provide a comprehensive assessment based on the CICE 2.0 instrument's 18 interprofessional competencies.
For EACH of the following 18 competencies, clearly state whether it was "OBSERVED" or "NOT OBSERVED" and provide specific examples with timestamps when possible:
1. IDENTIFIES FACTORS INFLUENCING HEALTH STATUS
2. IDENTIFIES TEAM GOALS FOR THE PATIENT
3. PRIORITIZES GOALS FOCUSED ON IMPROVING HEALTH OUTCOMES
4. VERBALIZES DISCIPLINE-SPECIFIC ROLE
5. OFFERS TO SEEK GUIDANCE FROM COLLEAGUES
6. COMMUNICATES ABOUT COST-EFFECTIVE AND TIMELY CARE
7. DIRECTS QUESTIONS TO OTHER HEALTH PROFESSIONALS BASED ON EXPERTISE
8. AVOIDS DISCIPLINE-SPECIFIC TERMINOLOGY
9. EXPLAINS DISCIPLINE-SPECIFIC TERMINOLOGY WHEN NECESSARY
10. COMMUNICATES ROLES AND RESPONSIBILITIES CLEARLY
11. ENGAGES IN ACTIVE LISTENING
12. SOLICITS AND ACKNOWLEDGES PERSPECTIVES
13. RECOGNIZES APPROPRIATE CONTRIBUTIONS
14. RESPECTFUL OF OTHER TEAM MEMBERS
15. COLLABORATIVELY WORKS THROUGH INTERPROFESSIONAL CONFLICTS
16. REFLECTS ON STRENGTHS OF TEAM INTERACTIONS
17. REFLECTS ON CHALLENGES OF TEAM INTERACTIONS
18. IDENTIFIES HOW TO IMPROVE TEAM EFFECTIVENESS
STRUCTURE YOUR RESPONSE AS FOLLOWS:
## OVERALL ASSESSMENT
Provide a brief overview of the team interaction quality and professionalism.
## DETAILED COMPETENCY EVALUATION
For each of the 18 competencies, format as:
Competency [number]: [name]
Status: [OBSERVED/NOT OBSERVED]
Evidence: [Specific examples from the video, or explanation of why it wasn't observed]
## STRENGTHS
List 3-5 key strengths observed in the team interaction
## AREAS FOR IMPROVEMENT
List 3-5 specific areas where the team could improve
## RECOMMENDATIONS
Provide 3-5 actionable recommendations for enhancing team collaboration and patient care
## FINAL SCORE
Competencies Observed: X/18
Overall Performance Level: [Exemplary/Proficient/Developing/Needs Improvement]"""
response = self.model.generate_content([uploaded_file, prompt])
return response.text
finally:
# Clean up temporary file
if temp_path and os.path.exists(temp_path):
os.unlink(temp_path)
def generate_audio_feedback(self, text):
"""Convert assessment text to audio feedback"""
try:
# Clean text for speech
clean_text = re.sub(r'[#*_\[\]()]', ' ', text)
clean_text = re.sub(r'\s+', ' ', clean_text)
clean_text = re.sub(r'[-โขยท]\s+', '', clean_text)
clean_text = clean_text.strip()
# Generate audio with gTTS
tts = gTTS(text=clean_text, lang='en', slow=False)
# Save to temporary file
temp_audio = tempfile.NamedTemporaryFile(delete=False, suffix='.mp3')
tts.save(temp_audio.name)
# Read audio data
with open(temp_audio.name, 'rb') as f:
audio_data = f.read()
# Clean up temporary file
os.unlink(temp_audio.name)
return audio_data
except Exception as e:
st.error(f"โ ๏ธ Audio generation failed: {str(e)}")
return None
def parse_assessment_score(assessment_text):
"""Parse the assessment text to extract score"""
try:
# Look for pattern like "X/18" in the text
import re
pattern = r'(\d+)/18'
match = re.search(pattern, assessment_text)
if match:
observed = int(match.group(1))
percentage = (observed / 18) * 100
if percentage >= 85:
level = "Exemplary"
color = "#059669"
elif percentage >= 70:
level = "Proficient"
color = "#0891b2"
elif percentage >= 50:
level = "Developing"
color = "#f59e0b"
else:
level = "Needs Improvement"
color = "#dc2626"
return observed, percentage, level, color
except:
pass
return 0, 0, "Unknown", "#6b7280"
# Video recording state
if "recording" not in st.session_state:
st.session_state.recording = False
if "recorded_frames" not in st.session_state:
st.session_state.recorded_frames = []
if "frame_queue" not in st.session_state:
st.session_state.frame_queue = queue.Queue()
def video_frame_callback(frame):
"""Callback function to process video frames during recording"""
if st.session_state.recording:
img = frame.to_ndarray(format="bgr24")
st.session_state.frame_queue.put(img)
return frame
def save_recorded_video():
"""Save recorded frames to a video file"""
if st.session_state.recorded_frames:
# Create temporary file
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
temp_path = temp_file.name
temp_file.close()
# Video properties
height, width, _ = st.session_state.recorded_frames[0].shape
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
fps = 20 # frames per second
# Create video writer
out = cv2.VideoWriter(temp_path, fourcc, fps, (width, height))
# Write frames
for frame in st.session_state.recorded_frames:
out.write(frame)
out.release()
return temp_path
return None
def main():
# Header
st.markdown("""
๐ฅ CICE 2.0 Healthcare Assessment Tool
Analyze healthcare team interactions using the 18-point CICE 2.0 interprofessional competency framework
""", unsafe_allow_html=True)
# Sidebar for API key
with st.sidebar:
st.header("๐ Configuration")
api_key = st.text_input(
"Google Gemini API Key",
type="password",
help="Enter your Google Gemini API key to analyze videos"
)
if api_key:
st.success("โ
API Key configured")
else:
st.warning("โ ๏ธ Please enter your Google Gemini API key")
st.markdown("---")
st.markdown("""
### ๐ CICE 2.0 Competencies
1. Health Status Factors
2. Team Goals Identification
3. Goal Prioritization
4. Role Verbalization
5. Seeking Guidance
6. Cost-Effective Communication
7. Expertise-Based Questions
8. Avoiding Jargon
9. Explaining Terminology
10. Clear Role Communication
11. Active Listening
12. Soliciting Perspectives
13. Recognizing Contributions
14. Team Respect
15. Conflict Resolution
16. Strength Reflection
17. Challenge Reflection
18. Improvement Identification
""")
# Main content
col1, col2 = st.columns([2, 1])
with col1:
st.header("๐น Record or Upload Healthcare Team Video")
# Tab selection for recording vs uploading
tab1, tab2 = st.tabs(["๐ฅ Record Video", "๐ Upload Video"])
with tab1:
st.subheader("Live Video Recording")
st.markdown("Click **Start Recording** to activate your webcam and begin recording a healthcare team interaction.")
# WebRTC configuration
rtc_configuration = RTCConfiguration({
"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]
})
# Recording controls - only show when no completed recording exists
if not st.session_state.recorded_frames:
st.markdown("### ๐ฌ Recording Controls")
col_start, col_stop, col_status = st.columns([1, 1, 2])
with col_start:
if st.button("๐ด Start Recording", disabled=st.session_state.recording, type="primary", key="start_rec_btn"):
st.session_state.recording = True
st.session_state.recorded_frames = []
# Clear the queue
while not st.session_state.frame_queue.empty():
st.session_state.frame_queue.get()
st.rerun()
with col_stop:
if st.button("โน๏ธ Stop Recording", disabled=not st.session_state.recording, type="secondary", key="stop_rec_btn"):
st.session_state.recording = False
# Collect frames from queue
frames_collected = []
while not st.session_state.frame_queue.empty():
frame = st.session_state.frame_queue.get()
frames_collected.append(frame)
st.session_state.recorded_frames = frames_collected
st.success(f"Recording stopped! Captured {len(frames_collected)} frames.")
st.rerun()
with col_status:
if st.session_state.recording:
st.markdown("๐ด **Recording in progress...**")
st.markdown("*Click Stop Recording when finished*")
else:
st.markdown("โช **Ready to record**")
st.markdown("*Click Start Recording to begin*")
# Show recording completion status
if st.session_state.recorded_frames and not st.session_state.recording:
st.markdown("### โ
Recording Complete")
st.success(f"**Recording finished successfully!** Captured {len(st.session_state.recorded_frames)} frames")
st.info("๐น Your video is ready for analysis. Use the Process Video button below.")
# Only show WebRTC streamer when recording is active or about to start
if st.session_state.recording or not st.session_state.recorded_frames:
# WebRTC streamer
webrtc_ctx = webrtc_streamer(
key="video-recorder",
mode=WebRtcMode.SENDONLY,
rtc_configuration=rtc_configuration,
video_frame_callback=video_frame_callback,
media_stream_constraints={
"video": {
"width": {"ideal": 640, "max": 640},
"height": {"ideal": 480, "max": 480},
"frameRate": {"ideal": 20, "max": 30}
},
"audio": False # Disable audio for now
},
async_processing=True,
)
# Show process button only after recording is complete
if st.session_state.recorded_frames and not st.session_state.recording:
st.markdown("---")
st.subheader("๐ฏ Process Recorded Video")
col_process, col_restart = st.columns([2, 1])
with col_process:
if st.button("๐ Process Video with CICE 2.0", type="primary", use_container_width=True, key="process_recorded"):
if not api_key:
st.error("โ Please enter your Google Gemini API key in the sidebar first")
else:
with st.spinner("๐พ Converting recording to video file..."):
video_path = save_recorded_video()
if video_path:
# Read the saved video for analysis
with open(video_path, 'rb') as f:
video_bytes = f.read()
# Create a file-like object for analysis
class VideoFile:
def __init__(self, data, name):
self.data = data
self.name = name
def read(self):
return self.data
def seek(self, position):
pass
uploaded_file = VideoFile(video_bytes, "recorded_video.mp4")
# Proceed with analysis
analyze_video(uploaded_file, api_key)
# Clean up temporary file
os.unlink(video_path)
else:
st.error("โ Failed to save recording")
with col_restart:
if st.button("๐ New Recording", use_container_width=True, key="restart_recording"):
st.session_state.recorded_frames = []
st.session_state.recording = False
st.rerun()
with tab2:
st.subheader("Upload Video File")
uploaded_file = st.file_uploader(
"Choose a video file",
type=['mp4', 'webm', 'avi', 'mov'],
help="Upload a video of healthcare team interaction for CICE 2.0 assessment"
)
if uploaded_file is not None:
st.success(f"โ
Video uploaded: {uploaded_file.name}")
# Display video
st.video(uploaded_file)
# Analyze button
if st.button("๐ Analyze with CICE 2.0", type="primary"):
if not api_key:
st.error("โ Please enter your Google Gemini API key in the sidebar")
else:
analyze_video(uploaded_file, api_key)
with col2:
st.header("โน๏ธ About CICE 2.0")
st.markdown("""
The **Collaborative Interprofessional Team Environment (CICE) 2.0** instrument evaluates healthcare team interactions across 18 key competencies.
### ๐ฏ Purpose
- Assess interprofessional collaboration
- Identify team strengths
- Highlight improvement areas
- Enhance patient care quality
### ๐ Scoring Levels
- **Exemplary** (85-100%): Outstanding collaboration
- **Proficient** (70-84%): Good teamwork
- **Developing** (50-69%): Needs improvement
- **Needs Improvement** (<50%): Significant gaps
### ๐ Getting Started
1. Enter your Google Gemini API key
2. Upload a healthcare team video
3. Click "Analyze with CICE 2.0"
4. Review detailed results
5. Download assessment report
""")
def analyze_video(uploaded_file, api_key):
"""Analyze uploaded video with CICE 2.0 assessment"""
try:
assessor = CICE_Assessment(api_key)
with st.spinner("๐ค Analyzing video with CICE 2.0 framework... This may take 1-2 minutes"):
# Reset file pointer
uploaded_file.seek(0)
assessment_result = assessor.analyze_video(uploaded_file)
# Parse score
observed, percentage, level, color = parse_assessment_score(assessment_result)
# Display summary
st.markdown(f"""
CICE 2.0 Assessment Results
{observed}/18
Competencies Observed
{percentage:.0f}%
Overall Score
Performance Level: {level}
""", unsafe_allow_html=True)
# Display detailed assessment
st.markdown('', unsafe_allow_html=True)
st.markdown("### ๐ Detailed Assessment Report")
st.write(assessment_result)
st.markdown('
', unsafe_allow_html=True)
# Generate audio feedback
with st.spinner("๐ Generating audio feedback..."):
audio_data = assessor.generate_audio_feedback(assessment_result)
# Create formatted text report
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
formatted_report = f"""CICE 2.0 Healthcare Team Interaction Assessment
{'='*60}
Assessment Date: {timestamp}
Video File: {getattr(uploaded_file, 'name', 'recorded_video.mp4')}
Overall Score: {observed}/18 ({percentage:.1f}%)
Performance Level: {level}
{'='*60}
{assessment_result}
{'='*60}
Generated by CICE 2.0 Healthcare Assessment Tool
Powered by Google Gemini AI
{'='*60}"""
# Download options
st.markdown("### ๐ฅ Download Options")
col_text, col_audio = st.columns(2)
with col_text:
st.download_button(
label="๐ Download Text Report",
data=formatted_report,
file_name=f"cice_assessment_{datetime.now().strftime('%Y%m%d_%H%M%S')}.txt",
mime="text/plain",
help="Download the complete assessment report as a text file"
)
with col_audio:
if audio_data:
st.download_button(
label="๐ Download Audio Report",
data=audio_data,
file_name=f"cice_assessment_audio_{datetime.now().strftime('%Y%m%d_%H%M%S')}.mp3",
mime="audio/mpeg",
help="Download the assessment report as an audio file"
)
# Also display audio player
st.audio(audio_data, format='audio/mp3')
st.caption("๐ง Listen to the assessment report above")
else:
st.error("โ Audio generation failed")
# Interactive Q&A Section
st.markdown("### ๐ฌ Ask Questions About the Assessment")
st.markdown("You can ask specific questions about the CICE competencies and assessment results.")
# Question input
question = st.text_input(
"โ Your question:",
placeholder="e.g., 'Was active listening demonstrated?' or 'How did the team handle conflicts?'",
help="Ask specific questions about the competencies or assessment results"
)
if question and st.button("๐ค Get Answer"):
try:
with st.spinner("๐ค Analyzing your question..."):
qa_prompt = f"""Based on the CICE 2.0 assessment of this healthcare team video,
please answer this specific question: {question}
Assessment Results:
{assessment_result}
Please provide a detailed answer referring to the relevant competencies from the 18-point CICE framework."""
# Reset file pointer for Q&A
uploaded_file.seek(0)
temp_path = None
try:
# Re-upload file for Q&A
with tempfile.NamedTemporaryFile(delete=False, suffix='.webm') as tmp_file:
tmp_file.write(uploaded_file.read())
temp_path = tmp_file.name
uploaded_file_qa = genai.upload_file(path=temp_path, display_name="healthcare_interaction_qa")
# Wait for processing
max_wait = 60
wait_time = 0
while uploaded_file_qa.state.name == "PROCESSING" and wait_time < max_wait:
time.sleep(2)
wait_time += 2
uploaded_file_qa = genai.get_file(uploaded_file_qa.name)
if uploaded_file_qa.state.name == "FAILED":
raise Exception("Video processing failed for Q&A")
qa_response = assessor.model.generate_content([uploaded_file_qa, qa_prompt])
st.markdown("#### ๐ Answer:")
st.write(qa_response.text)
finally:
if temp_path and os.path.exists(temp_path):
os.unlink(temp_path)
except Exception as e:
st.error(f"โ Error processing question: {str(e)}")
# Example questions
st.markdown("**Example questions:**")
st.markdown("""
- Was active listening demonstrated by the team?
- How did the team handle interprofessional conflicts?
- What specific improvements are recommended?
- Which competencies were most lacking?
- How well did team members communicate their roles?
""")
except Exception as e:
st.error(f"โ Error during assessment: {str(e)}")
if __name__ == "__main__":
main()