Spaces:
Sleeping
Sleeping
import gradio as gr | |
import pandas as pd | |
import numpy as np | |
import io, base64, datetime, re | |
from collections import Counter | |
import plotly.express as px | |
import plotly.graph_objects as go | |
from plotly.subplots import make_subplots | |
def get_first_row_totals(df, group_column): | |
"""Get the GenAI efficiency hours from the first row of each group""" | |
first_row_totals = {} | |
for group_value in df[group_column].unique(): | |
group_rows = df[df[group_column] == group_value] | |
if not group_rows.empty: | |
first_row_totals[group_value] = group_rows.iloc[0]['GenAI Efficiency (Log time in hours)'] | |
return first_row_totals | |
def create_unique_work_items(df): | |
"""Create unique work identifiers to avoid double counting""" | |
analysis_df = df.copy() | |
if 'Key' in analysis_df.columns and 'Project' in analysis_df.columns: | |
analysis_df['UniqueWorkID'] = analysis_df.apply(lambda row: f"{row['Project']}_{row['Key']}", axis=1) | |
elif all(col in analysis_df.columns for col in ['Date', 'Worklog', 'User']): | |
analysis_df['UniqueWorkID'] = analysis_df.apply(lambda row: f"{row['Project']}_{row['Date']}_{row['Worklog']}_{row['User']}", axis=1) | |
return analysis_df | |
def calculate_champion_score(descriptions, project_data=None): | |
"""Calculate champion score based on Tools (20%), Use-case (30%), Prompt (30%), Outcome (20%)""" | |
if not descriptions or not any(pd.notnull(desc) for desc in descriptions): | |
return 0 | |
# Filter and join descriptions | |
valid_descriptions = [desc for desc in descriptions if pd.notnull(desc) and str(desc).strip()] | |
if not valid_descriptions: | |
return 0 | |
combined_desc = "\n".join(str(desc) for desc in valid_descriptions) | |
combined_desc_lower = combined_desc.lower() | |
# Tools score (20%) | |
tools_score = 0 | |
ai_tools = ['gpt', 'chatgpt', 'claude', 'gemini', 'copilot', 'dall-e', 'midjourney', 'stable diffusion', | |
'hugging face', 'llama', 'mistral', 'bard', 'anthropic'] | |
tools_mentioned = sum(1 for tool in ai_tools if re.search(r'\b' + re.escape(tool) + r'\b', combined_desc_lower)) | |
if tools_mentioned == 1: | |
tools_score = 10 | |
elif tools_mentioned >= 2: | |
tools_score = 15 | |
if re.search(r'\b(gpt-4|gpt-3.5|claude-2|claude-instant|gemini pro)\b', combined_desc_lower): | |
tools_score += 5 | |
tools_score = min(tools_score, 20) | |
# Use-case score (30%) | |
use_case_score = 0 | |
use_case_keywords = { | |
'code generation': ['code', 'coding', 'script', 'programming', 'develop'], | |
'content creation': ['content', 'write', 'writing', 'draft', 'article'], | |
'data analysis': ['data', 'analysis', 'analyze', 'metrics', 'statistics'], | |
'problem solving': ['problem', 'solution', 'solve', 'issue', 'challenge'], | |
'summarization': ['summary', 'summarize', 'summarization', 'extract'], | |
'research': ['research', 'study', 'investigate', 'literature', 'information'], | |
'automation': ['automate', 'automation', 'workflow', 'process'] | |
} | |
use_cases_found = sum(1 for _, keywords in use_case_keywords.items() | |
if any(re.search(r'\b' + re.escape(keyword) + r'\b', combined_desc_lower) for keyword in keywords)) | |
use_case_score += min(use_cases_found * 5, 15) | |
if re.search(r'\bfor\s+(a|an|the)\s+\w+', combined_desc_lower) or re.search(r'\bto\s+\w+\s+the\s+\w+', combined_desc_lower): | |
use_case_score += 5 | |
domain_terms = ['frontend', 'backend', 'api', 'database', 'ui', 'ux', 'algorithm', 'component', 'feature'] | |
if any(re.search(r'\b' + re.escape(term) + r'\b', combined_desc_lower) for term in domain_terms): | |
use_case_score += 5 | |
if re.search(r'\bproject\b|\btask\b|\bticket\b|\bissue\b|\bstory\b', combined_desc_lower): | |
use_case_score += 5 | |
use_case_score = min(use_case_score, 30) | |
# Prompt quality score (30%) | |
prompt_score = 0 | |
if len(combined_desc) > 500: | |
prompt_score += 10 | |
elif len(combined_desc) > 200: | |
prompt_score += 5 | |
if re.search(r'".*?"|\bprompt\b|\'.*?\'|\bassist\b|\bcreate\b|\bgenerate\b', combined_desc_lower): | |
prompt_score += 10 | |
prompt_techniques = ['step by step', 'chain of thought', 'few-shot', 'zero-shot', 'example'] | |
techniques_found = sum(1 for technique in prompt_techniques | |
if re.search(r'\b' + re.escape(technique) + r'\b', combined_desc_lower)) | |
prompt_score += min(techniques_found * 2, 10) | |
prompt_score = min(prompt_score, 30) | |
# Outcome/iteration score (20%) | |
outcome_score = 0 | |
outcome_keywords = ['result', 'output', 'generated', 'created', 'produced', 'improved'] | |
outcomes_found = sum(1 for keyword in outcome_keywords | |
if re.search(r'\b' + re.escape(keyword) + r'\b', combined_desc_lower)) | |
outcome_score += min(outcomes_found * 2, 10) | |
iteration_keywords = ['iteration', 'refine', 'revise', 'update', 'modify', 'enhance', 'feedback'] | |
iterations_found = sum(1 for keyword in iteration_keywords | |
if re.search(r'\b' + re.escape(keyword) + r'\b', combined_desc_lower)) | |
outcome_score += min(iterations_found * 2, 5) | |
if re.search(r'\d+%|\d+\s*hours|\d+\s*minutes|reduced by|increased by', combined_desc_lower): | |
outcome_score += 5 | |
outcome_score = min(outcome_score, 20) | |
return tools_score + use_case_score + prompt_score + outcome_score | |
def process_genai_data(df): | |
"""Process GenAI data at the user level, ensuring no duplication of hours""" | |
# Create unique users DataFrame | |
unique_users = df['User'].drop_duplicates().reset_index(drop=True) | |
result_df = pd.DataFrame(unique_users, columns=['User']) | |
# Get descriptions for each user | |
result_df['GenAI_Descriptions'] = result_df['User'].apply( | |
lambda user: "\n".join(["- " + str(desc) for desc in df[df['User'] == user]['GenAI use case description'].dropna().unique()]) | |
if len(df[df['User'] == user]['GenAI use case description'].dropna().unique()) > 0 else "" | |
) | |
# Calculate metrics using unique combinations | |
def get_unique_metric_sum(user, metric_col): | |
user_data = df[df['User'] == user].copy() | |
if all(col in user_data.columns for col in ['Project', 'Key']): | |
user_data['UniqueID'] = user_data.apply(lambda row: f"{row['Project']}_{row['Key']}", axis=1) | |
return user_data.drop_duplicates('UniqueID')[metric_col].sum() | |
elif all(col in user_data.columns for col in ['Date', 'Project', 'Worklog']): | |
user_data['UniqueID'] = user_data.apply(lambda row: f"{row['Project']}_{row['Date']}_{row['Worklog']}", axis=1) | |
return user_data.drop_duplicates('UniqueID')[metric_col].sum() | |
return user_data[metric_col].sum() | |
result_df['GenAI_Efficiency'] = result_df['User'].apply(lambda user: get_unique_metric_sum(user, 'GenAI Efficiency (Log time in hours)')) | |
if 'Logged' in df.columns: | |
result_df['Total_Logged_Hours'] = result_df['User'].apply(lambda user: get_unique_metric_sum(user, 'Logged')) | |
if 'Required' in df.columns: | |
result_df['Total_Required_Hours'] = result_df['User'].apply(lambda user: get_unique_metric_sum(user, 'Required')) | |
# Calculate utilization percentage | |
if 'Total_Logged_Hours' in result_df.columns and 'Total_Required_Hours' in result_df.columns: | |
result_df['Utilization_Percentage'] = (result_df['Total_Logged_Hours'] / result_df['Total_Required_Hours'] * 100).round(2) | |
# Get date range for each user | |
if 'Date' in df.columns: | |
result_df['Date_Range'] = result_df['User'].apply( | |
lambda user: f"{min(dates)} to {max(dates)}" if | |
len(dates := df[df['User'] == user]['Date'].dropna()) > 0 else "N/A" | |
) | |
# Add champion score for each user | |
result_df['Description_Quality_Score'] = result_df['GenAI_Descriptions'].apply( | |
lambda desc: calculate_champion_score([desc]) if isinstance(desc, str) and desc.strip() else 0 | |
) | |
# Get project and category data if available | |
if 'Project' in df.columns: | |
result_df['Projects'] = result_df['User'].apply( | |
lambda user: list(df[df['User'] == user]['Project'].dropna().unique()) | |
) | |
if 'Project Category' in df.columns: | |
result_df['Project_Categories'] = result_df['User'].apply( | |
lambda user: list(df[df['User'] == user]['Project Category'].dropna().unique()) | |
) | |
return result_df | |
def analyze_projects_by_genai_hours(df, exclude_qed42_global=False): | |
"""Analyzes projects by GenAI hours with quality metrics""" | |
if 'Project' not in df.columns: | |
return None | |
# Get first row totals for each project | |
project_totals = get_first_row_totals(df, 'Project') | |
# Calculate project data using unique work items | |
analysis_df = create_unique_work_items(df) | |
# Filter out QED42 Global projects if requested | |
if exclude_qed42_global: | |
analysis_df = analysis_df[~analysis_df['Project'].str.contains('QED42 Global', case=False, na=False)] | |
project_totals = {k: v for k, v in project_totals.items() if 'qed42 global' not in k.lower()} | |
projects_data = [] | |
for project in analysis_df['Project'].unique(): | |
if project in project_totals: | |
total_hours = project_totals[project] | |
user_count = len(analysis_df[analysis_df['Project'] == project]['User'].unique()) | |
# Get project category if available | |
project_category = 'Unknown' | |
if 'Project Category' in analysis_df.columns: | |
project_category_series = analysis_df[analysis_df['Project'] == project]['Project Category'].dropna() | |
if not project_category_series.empty: | |
project_category = project_category_series.iloc[0] | |
# Get best description for this project | |
project_descriptions = analysis_df[analysis_df['Project'] == project]['GenAI use case description'].dropna().tolist() | |
best_description = max(project_descriptions, key=lambda x: len(str(x))) if project_descriptions else "" | |
champion_score = calculate_champion_score(project_descriptions) | |
projects_data.append({ | |
'Project': project, | |
'Total_GenAI_Hours': total_hours, | |
'User_Count': user_count, | |
'Project Category': project_category, | |
'Best_Description': best_description, | |
'Champion_Score': champion_score | |
}) | |
# Create DataFrame from projects data | |
project_hours = pd.DataFrame(projects_data) if projects_data else pd.DataFrame() | |
# Add combined scores | |
if not project_hours.empty: | |
max_hours = project_hours['Total_GenAI_Hours'].max() or 1 | |
max_quality = project_hours['Champion_Score'].max() or 1 | |
project_hours['Hours_Score'] = (project_hours['Total_GenAI_Hours'] / max_hours) * 100 | |
project_hours['Quality_Score_Normalized'] = (project_hours['Champion_Score'] / max_quality) * 100 | |
project_hours['Combined_Score'] = (project_hours['Hours_Score'] * 0.6) + (project_hours['Quality_Score_Normalized'] * 0.4) | |
project_hours = project_hours.sort_values('Combined_Score', ascending=False) | |
return project_hours | |
def extract_ai_tools_from_descriptions(df): | |
"""Extracts AI tools mentioned in descriptions""" | |
ai_tools = [ | |
'chatgpt', 'gpt-4', 'gpt-3', 'gpt', 'openai', 'claude', 'anthropic', | |
'gemini', 'bard', 'google ai', 'copilot', 'github copilot', 'microsoft copilot', | |
'dall-e', 'midjourney', 'stable diffusion', 'hugging face', 'transformers', | |
'bert', 'llama', 'mistral', 'tensorflow', 'pytorch', 'ml', | |
'jupyter', 'colab', 'langchain', 'llm', 'rag' | |
] | |
tool_mapping = { | |
'gpt': 'ChatGPT/GPT', 'gpt-3': 'ChatGPT/GPT', 'gpt-4': 'ChatGPT/GPT', 'chatgpt': 'ChatGPT/GPT', | |
'openai': 'OpenAI', 'claude': 'Claude', 'anthropic': 'Claude', | |
'gemini': 'Google AI', 'bard': 'Google AI', 'google ai': 'Google AI', | |
'copilot': 'GitHub Copilot', 'github copilot': 'GitHub Copilot' | |
} | |
all_descriptions = df['GenAI use case description'].dropna() | |
if all_descriptions.empty: | |
return Counter() | |
all_descriptions_text = " ".join(all_descriptions.astype(str)).lower() | |
tool_counts = Counter() | |
for tool in ai_tools: | |
count = len(re.findall(r'\b' + re.escape(tool) + r'\b', all_descriptions_text)) | |
if count > 0: | |
normalized_tool = tool_mapping.get(tool, tool) | |
tool_counts[normalized_tool] += count | |
return tool_counts | |
def extract_use_cases_from_descriptions(df): | |
"""Analyzes use cases in GenAI descriptions""" | |
use_case_keywords = { | |
'Code Generation': ['code', 'coding', 'programming', 'script', 'develop', 'algorithm'], | |
'Content Creation': ['content', 'write', 'writing', 'draft', 'article', 'blog'], | |
'Data Analysis': ['data', 'analysis', 'analyze', 'analytics', 'statistics', 'insights'], | |
'Documentation': ['document', 'documentation', 'manual', 'guide', 'readme'], | |
'Research': ['research', 'study', 'investigate', 'explore', 'literature'], | |
'Summarization': ['summary', 'summarize', 'summarization', 'extract'], | |
'Translation': ['translate', 'translation', 'language', 'localize'] | |
} | |
descriptions = df['GenAI use case description'].dropna() | |
if descriptions.empty: | |
return Counter() | |
descriptions_list = descriptions.astype(str).tolist() | |
use_case_counts = Counter() | |
for description in descriptions_list: | |
description_lower = description.lower() | |
for use_case, keywords in use_case_keywords.items(): | |
if any(re.search(r'\b' + re.escape(keyword) + r'\b', description_lower) for keyword in keywords): | |
use_case_counts[use_case] += 1 | |
return use_case_counts | |
def create_download_excel(df): | |
"""Create Excel file for download""" | |
output = io.BytesIO() | |
with pd.ExcelWriter(output, engine='openpyxl') as writer: | |
df.to_excel(writer, index=False, sheet_name='Processed Data') | |
# Add summary sheet | |
if not df.empty: | |
summary = pd.DataFrame({ | |
'Metric': ['Total Users', 'Average GenAI Efficiency (hours)', 'Average Utilization (%)', | |
'Top GenAI User', 'Top Quality Score'], | |
'Value': [ | |
len(df), | |
round(df['GenAI_Efficiency'].mean(), 2) if 'GenAI_Efficiency' in df.columns else 'N/A', | |
round(df['Utilization_Percentage'].mean(), 2) if 'Utilization_Percentage' in df.columns else 'N/A', | |
df.loc[df['GenAI_Efficiency'].idxmax(), 'User'] if 'GenAI_Efficiency' in df.columns and not df['GenAI_Efficiency'].isna().all() else 'N/A', | |
df.loc[df['Description_Quality_Score'].idxmax(), 'User'] if 'Description_Quality_Score' in df.columns and not df['Description_Quality_Score'].isna().all() else 'N/A' | |
] | |
}) | |
summary.to_excel(writer, index=False, sheet_name='Summary') | |
return output.getvalue() | |
def create_visualizations(result_df, project_analysis, ai_tool_counts, use_case_counts): | |
"""Create visualization plots""" | |
plots = [] | |
# 1. GenAI Efficiency by User | |
if 'GenAI_Efficiency' in result_df.columns and not result_df.empty: | |
sorted_df = result_df.sort_values('GenAI_Efficiency', ascending=False).head(10) | |
fig1 = px.bar( | |
sorted_df, | |
x='User', | |
y='GenAI_Efficiency', | |
title='Top 10 Users by GenAI Efficiency Hours', | |
color='GenAI_Efficiency', | |
color_continuous_scale='Viridis' | |
) | |
fig1.update_layout(xaxis_tickangle=-45) | |
plots.append(fig1) | |
# 2. Project Analysis | |
if project_analysis is not None and not project_analysis.empty: | |
top_projects = project_analysis.head(8) | |
fig2 = px.bar( | |
top_projects, | |
x='Project', | |
y='Total_GenAI_Hours', | |
title='Top Projects by GenAI Hours', | |
color='Champion_Score', | |
color_continuous_scale='RdYlGn' | |
) | |
fig2.update_layout(xaxis_tickangle=-45) | |
plots.append(fig2) | |
# 3. AI Tools Usage | |
if ai_tool_counts: | |
ai_tools_df = pd.DataFrame({ | |
'Tool': list(ai_tool_counts.keys()), | |
'Mentions': list(ai_tool_counts.values()) | |
}).sort_values('Mentions', ascending=False).head(8) | |
fig3 = px.bar( | |
ai_tools_df, | |
x='Tool', | |
y='Mentions', | |
title='Most Mentioned AI Tools', | |
color='Mentions', | |
color_continuous_scale='Blues' | |
) | |
plots.append(fig3) | |
# 4. Use Cases Distribution | |
if use_case_counts: | |
use_cases_df = pd.DataFrame({ | |
'Use Case': list(use_case_counts.keys()), | |
'Count': list(use_case_counts.values()) | |
}).sort_values('Count', ascending=False) | |
fig4 = px.pie( | |
use_cases_df, | |
names='Use Case', | |
values='Count', | |
title='GenAI Use Cases Distribution' | |
) | |
plots.append(fig4) | |
# 5. Quality Score Distribution | |
if 'Description_Quality_Score' in result_df.columns and not result_df.empty: | |
fig5 = px.histogram( | |
result_df, | |
x='Description_Quality_Score', | |
title='Distribution of Champion Scores', | |
nbins=20, | |
color_discrete_sequence=['#2E86AB'] | |
) | |
plots.append(fig5) | |
# 6. Utilization Analysis | |
if 'Utilization_Percentage' in result_df.columns and not result_df.empty: | |
sorted_util = result_df.sort_values('Utilization_Percentage', ascending=False).head(10) | |
fig6 = px.bar( | |
sorted_util, | |
x='User', | |
y='Utilization_Percentage', | |
title='Top 10 Users by Utilization Percentage', | |
color='Utilization_Percentage', | |
color_continuous_scale='RdYlGn' | |
) | |
fig6.update_layout(xaxis_tickangle=-45) | |
plots.append(fig6) | |
return plots | |
def process_file(file): | |
"""Main processing function for Gradio""" | |
if file is None: | |
return None, "Please upload a file", None, None, None, None, None, None | |
try: | |
# Read the file | |
if file.name.endswith('.csv'): | |
df = pd.read_csv(file.name) | |
else: | |
df = pd.read_excel(file.name) | |
# Check required columns | |
required_columns = ['User', 'GenAI use case description', 'GenAI Efficiency (Log time in hours)'] | |
missing_columns = [col for col in required_columns if col not in df.columns] | |
if missing_columns: | |
return None, f"Missing required columns: {', '.join(missing_columns)}", None, None, None, None, None, None | |
# Process the data | |
result_df = process_genai_data(df) | |
project_analysis = analyze_projects_by_genai_hours(df) | |
ai_tool_counts = extract_ai_tools_from_descriptions(df) | |
use_case_counts = extract_use_cases_from_descriptions(df) | |
# Create Excel download | |
excel_data = create_download_excel(result_df) | |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") | |
excel_filename = f"genai_processed_data_{timestamp}.xlsx" | |
# Save Excel file temporarily | |
with open(excel_filename, 'wb') as f: | |
f.write(excel_data) | |
# Create visualizations | |
plots = create_visualizations(result_df, project_analysis, ai_tool_counts, use_case_counts) | |
# Create summary statistics | |
summary_stats = create_summary_stats(result_df, project_analysis, ai_tool_counts, use_case_counts) | |
# Create insights text | |
insights = create_insights_text(result_df, project_analysis, ai_tool_counts, use_case_counts) | |
return ( | |
result_df, | |
"Processing completed successfully!", | |
excel_filename, | |
summary_stats, | |
insights, | |
plots[0] if len(plots) > 0 else None, | |
plots[1] if len(plots) > 1 else None, | |
plots[2:] if len(plots) > 2 else [] | |
) | |
except Exception as e: | |
return None, f"Error processing file: {str(e)}", None, None, None, None, None, None | |
def create_summary_stats(result_df, project_analysis, ai_tool_counts, use_case_counts): | |
"""Create summary statistics""" | |
if result_df is None or result_df.empty: | |
return "No data to analyze" | |
stats = [] | |
stats.append(f"**π Summary Statistics**") | |
stats.append(f"β’ Total Users: {len(result_df)}") | |
if 'GenAI_Efficiency' in result_df.columns: | |
avg_efficiency = result_df['GenAI_Efficiency'].mean() | |
total_efficiency = result_df['GenAI_Efficiency'].sum() | |
stats.append(f"β’ Total GenAI Hours: {round(total_efficiency, 2)}") | |
stats.append(f"β’ Average GenAI Efficiency: {round(avg_efficiency, 2)} hours") | |
if 'Utilization_Percentage' in result_df.columns: | |
avg_util = result_df['Utilization_Percentage'].mean() | |
stats.append(f"β’ Average Utilization: {round(avg_util, 2)}%") | |
if 'Description_Quality_Score' in result_df.columns: | |
avg_quality = result_df['Description_Quality_Score'].mean() | |
stats.append(f"β’ Average Champion Score: {round(avg_quality, 1)}/100") | |
if ai_tool_counts: | |
top_tool = max(ai_tool_counts.items(), key=lambda x: x[1])[0] | |
stats.append(f"β’ Most Used AI Tool: {top_tool}") | |
if use_case_counts: | |
top_use_case = max(use_case_counts.items(), key=lambda x: x[1])[0] | |
stats.append(f"β’ Top Use Case: {top_use_case}") | |
if project_analysis is not None and not project_analysis.empty: | |
top_project = project_analysis.iloc[0] | |
stats.append(f"β’ Top Project: {top_project['Project']} ({round(top_project['Total_GenAI_Hours'], 2)} hours)") | |
return "\n".join(stats) | |
def create_insights_text(result_df, project_analysis, ai_tool_counts, use_case_counts): | |
"""Create insights text""" | |
if result_df is None or result_df.empty: | |
return "No insights available" | |
insights = [] | |
insights.append("**π Key Insights**") | |
# Champion user | |
if 'GenAI_Efficiency' in result_df.columns and 'Description_Quality_Score' in result_df.columns: | |
# Calculate combined score for users | |
max_hours = result_df['GenAI_Efficiency'].max() or 1 | |
max_quality = result_df['Description_Quality_Score'].max() or 1 | |
result_df['Hours_Score'] = (result_df['GenAI_Efficiency'] / max_hours) * 100 | |
result_df['Quality_Score_Normalized'] = (result_df['Description_Quality_Score'] / max_quality) * 100 | |
result_df['Combined_Score'] = (result_df['Hours_Score'] * 0.6) + (result_df['Quality_Score_Normalized'] * 0.4) | |
champion_user = result_df.loc[result_df['Combined_Score'].idxmax()] | |
insights.append(f"π **Champion User:** {champion_user['User']}") | |
insights.append(f" - GenAI Hours: {round(champion_user['GenAI_Efficiency'], 2)}") | |
insights.append(f" - Champion Score: {round(champion_user['Description_Quality_Score'], 1)}/100") | |
insights.append("") | |
# Project insights | |
if project_analysis is not None and not project_analysis.empty: | |
top_project = project_analysis.iloc[0] | |
insights.append(f"π **Top Project:** {top_project['Project']}") | |
insights.append(f" - Total Hours: {round(top_project['Total_GenAI_Hours'], 2)}") | |
insights.append(f" - Users Involved: {top_project['User_Count']}") | |
if 'Champion_Score' in top_project: | |
insights.append(f" - Champion Score: {round(top_project['Champion_Score'], 1)}/100") | |
insights.append("") | |
# Usage patterns | |
if 'GenAI_Efficiency' in result_df.columns: | |
active_users = len(result_df[result_df['GenAI_Efficiency'] > 0]) | |
usage_rate = (active_users / len(result_df)) * 100 | |
insights.append(f"π **Usage Analysis:**") | |
insights.append(f" - Users with GenAI activity: {active_users}/{len(result_df)} ({round(usage_rate, 1)}%)") | |
if active_users > 0: | |
high_users = len(result_df[result_df['GenAI_Efficiency'] >= 10]) | |
insights.append(f" - High-usage users (β₯10 hours): {high_users}") | |
insights.append("") | |
# Tool and use case insights | |
if ai_tool_counts and use_case_counts: | |
insights.append("π οΈ **Technology Adoption:**") | |
top_3_tools = dict(sorted(ai_tool_counts.items(), key=lambda x: x[1], reverse=True)[:3]) | |
for tool, count in top_3_tools.items(): | |
insights.append(f" - {tool}: {count} mentions") | |
insights.append("") | |
insights.append("π‘ **Primary Use Cases:**") | |
top_3_cases = dict(sorted(use_case_counts.items(), key=lambda x: x[1], reverse=True)[:3]) | |
for case, count in top_3_cases.items(): | |
insights.append(f" - {case}: {count} instances") | |
return "\n".join(insights) | |
# Create Gradio interface | |
def create_gradio_app(): | |
with gr.Blocks(title="GenAI Worklog Processor", theme=gr.themes.Soft()) as app: | |
gr.Markdown(""" | |
# π€ GenAI Worklog Data Processor v1.1 | |
This application processes worklog data to extract insights about GenAI usage: | |
β Creates a list of unique users | |
β Concatenates GenAI use case descriptions for each user | |
β Captures GenAI efficiency values and metrics | |
β Identifies projects with highest GenAI usage | |
β Analyzes AI tools and use cases | |
β Identifies prompt champions based on quality metrics | |
**Required columns:** User, GenAI use case description, GenAI Efficiency (Log time in hours) | |
**Optional columns:** Required, Logged, Date, Project, Project Category, Epic, Key | |
""") | |
with gr.Row(): | |
with gr.Column(scale=1): | |
file_input = gr.File( | |
label="π Upload CSV or Excel File", | |
file_types=[".csv", ".xlsx", ".xls"], | |
type="filepath" | |
) | |
process_btn = gr.Button("π Process Data", variant="primary", size="lg") | |
with gr.Column(scale=1): | |
status_output = gr.Textbox( | |
label="π Processing Status", | |
interactive=False, | |
lines=3 | |
) | |
with gr.Tabs(): | |
with gr.TabItem("π Processed Data"): | |
processed_data = gr.Dataframe( | |
label="Processed Results", | |
interactive=False, | |
wrap=True | |
) | |
download_file = gr.File( | |
label="πΎ Download Excel Report", | |
interactive=False | |
) | |
with gr.TabItem("π Summary & Insights"): | |
with gr.Row(): | |
with gr.Column(): | |
summary_stats = gr.Markdown(label="Summary Statistics") | |
with gr.Column(): | |
insights_text = gr.Markdown(label="Key Insights") | |
with gr.TabItem("π Visualizations"): | |
with gr.Row(): | |
plot1 = gr.Plot(label="GenAI Efficiency by User") | |
plot2 = gr.Plot(label="Project Analysis") | |
with gr.Row(): | |
plot3 = gr.Plot(label="AI Tools Usage") | |
plot4 = gr.Plot(label="Use Cases Distribution") | |
with gr.Row(): | |
plot5 = gr.Plot(label="Quality Score Distribution") | |
plot6 = gr.Plot(label="Utilization Analysis") | |
with gr.TabItem("βΉοΈ How Champion Scores Work"): | |
gr.Markdown(""" | |
## π Champion Score Calculation | |
The Champion Score evaluates the quality and comprehensiveness of GenAI usage descriptions on a scale of 0-100: | |
### π οΈ Tools (20 points) | |
- **Basic mention** (10 pts): References one AI tool (GPT, Claude, etc.) | |
- **Multiple tools** (15 pts): Uses 2+ different AI tools | |
- **Specific versions** (+5 pts): Mentions specific models (GPT-4, Claude-2, etc.) | |
### π‘ Use Case (30 points) | |
- **Category identification** (5 pts each): Code generation, content creation, data analysis, etc. | |
- **Context specificity** (+5 pts): Clear "for/to" statements showing purpose | |
- **Domain expertise** (+5 pts): Technical terms (API, database, algorithm, etc.) | |
- **Work integration** (+5 pts): References projects, tasks, tickets, stories | |
### βοΈ Prompt Quality (30 points) | |
- **Length bonus**: 200+ chars (5 pts), 500+ chars (10 pts) | |
- **Prompt indicators** (10 pts): Quotes, mentions "prompt", "assist", "create", "generate" | |
- **Advanced techniques** (2 pts each): Step-by-step, chain of thought, few-shot, examples | |
### π― Outcomes & Iteration (20 points) | |
- **Results mentioned** (2 pts each): "result", "output", "generated", "created", "improved" | |
- **Iteration indicators** (2 pts each): "refine", "revise", "update", "feedback" | |
- **Quantified impact** (+5 pts): Percentages, time saved, metrics | |
### π Score Interpretation | |
- **π₯ 90-100**: Exceptional - Comprehensive usage with advanced techniques | |
- **π₯ 70-89**: Strong - Good tool usage with clear outcomes | |
- **π₯ 50-69**: Moderate - Basic usage with some detail | |
- **π 30-49**: Basic - Simple usage descriptions | |
- **β οΈ 0-29**: Minimal - Very basic or unclear usage | |
Higher scores indicate more sophisticated and effective GenAI adoption! | |
""") | |
# Event handlers | |
def process_and_update(file): | |
if file is None: | |
return ( | |
None, "Please upload a file first", None, | |
"No data to display", "No insights available", | |
None, None, None, None, None, None | |
) | |
try: | |
# Read the file | |
if file.endswith('.csv'): | |
df = pd.read_csv(file) | |
else: | |
df = pd.read_excel(file) | |
# Check required columns | |
required_columns = ['User', 'GenAI use case description', 'GenAI Efficiency (Log time in hours)'] | |
missing_columns = [col for col in required_columns if col not in df.columns] | |
if missing_columns: | |
return ( | |
None, f"β Missing required columns: {', '.join(missing_columns)}", None, | |
"Cannot process data", "Missing required columns", | |
None, None, None, None, None, None | |
) | |
# Process the data | |
result_df = process_genai_data(df) | |
project_analysis = analyze_projects_by_genai_hours(df) | |
ai_tool_counts = extract_ai_tools_from_descriptions(df) | |
use_case_counts = extract_use_cases_from_descriptions(df) | |
# Create Excel download | |
excel_data = create_download_excel(result_df) | |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") | |
excel_filename = f"genai_processed_data_{timestamp}.xlsx" | |
# Save Excel file temporarily | |
with open(excel_filename, 'wb') as f: | |
f.write(excel_data) | |
# Create visualizations | |
plots = create_visualizations(result_df, project_analysis, ai_tool_counts, use_case_counts) | |
# Create summary statistics and insights | |
summary_stats = create_summary_stats(result_df, project_analysis, ai_tool_counts, use_case_counts) | |
insights = create_insights_text(result_df, project_analysis, ai_tool_counts, use_case_counts) | |
return ( | |
result_df, | |
"β Processing completed successfully!", | |
excel_filename, | |
summary_stats, | |
insights, | |
plots[0] if len(plots) > 0 else None, | |
plots[1] if len(plots) > 1 else None, | |
plots[2] if len(plots) > 2 else None, | |
plots[3] if len(plots) > 3 else None, | |
plots[4] if len(plots) > 4 else None, | |
plots[5] if len(plots) > 5 else None | |
) | |
except Exception as e: | |
error_msg = f"β Error processing file: {str(e)}" | |
return ( | |
None, error_msg, None, | |
"Error occurred", error_msg, | |
None, None, None, None, None, None | |
) | |
process_btn.click( | |
fn=process_and_update, | |
inputs=[file_input], | |
outputs=[ | |
processed_data, status_output, download_file, | |
summary_stats, insights_text, | |
plot1, plot2, plot3, plot4, plot5, plot6 | |
] | |
) | |
# Note: Examples removed since we don't have sample files | |
# Users should upload their own CSV/Excel files | |
gr.Markdown(""" | |
--- | |
**Enhanced GenAI Worklog Processor** β’ Built with Gradio and Pandas | |
π‘ **Tips for best results:** | |
- Ensure your CSV/Excel file has the required columns | |
- GenAI descriptions should be detailed for better Champion Scores | |
- Include project information for comprehensive analysis | |
""") | |
return app | |
# Helper function to assign team categories (referenced in original code) | |
def assign_team_category(row, max_quality, max_hours): | |
"""Assign team category based on usage patterns""" | |
quality_score = row['Champion_Score'] | |
hours = row['GenAI_Efficiency'] | |
# Normalize scores | |
quality_norm = (quality_score / max_quality) * 100 if max_quality > 0 else 0 | |
hours_norm = (hours / max_hours) * 100 if max_hours > 0 else 0 | |
if quality_norm >= 70 and hours_norm >= 50: | |
return "π Power Users", "High quality usage with significant hours" | |
elif quality_norm >= 70: | |
return "π― Quality Champions", "Excellent usage quality, moderate hours" | |
elif hours_norm >= 70: | |
return "β‘ High Volume", "Heavy usage, opportunity for quality improvement" | |
elif quality_norm >= 40 or hours_norm >= 30: | |
return "π Growing Users", "Developing GenAI skills and usage" | |
elif hours > 0: | |
return "π± Beginners", "Starting GenAI journey" | |
else: | |
return "π€ Inactive", "No recorded GenAI usage" | |
# Launch the app | |
if __name__ == "__main__": | |
app = create_gradio_app() | |
app.launch( | |
share=True, | |
server_name="0.0.0.0", | |
server_port=7860, | |
show_error=True | |
) |