import gradio as gr from huggingface_hub import from_pretrained_keras from PIL import Image import numpy as np # Load models idpred = from_pretrained_keras("aegishield/idpred") fingpred = from_pretrained_keras("aegishield/fingpred") def show_fingername(fingernum): if fingernum>=5: fingername = "right " fingernum -= 5 else: fingername = "left " if fingernum==0: fingername += "little" elif fingernum==1: fingername += "ring" elif fingernum==2: fingername += "middle" elif fingernum==3: fingername += "index" else: fingername += "thumb" return fingername def predict_image(img, image_file): # Ensure the image is a PIL Image if not isinstance(img, Image.Image): img = Image.fromarray(img) # Resize the image and convert to grayscale img_resized = img.resize((96, 96)).convert('L') # Convert the resized grayscale image to a numpy array img_array = np.array(img_resized) # Add a channel dimension since the model expects (96, 96, 1) img_array = np.expand_dims(img_array, axis=-1) # Add batch dimension img_array = np.expand_dims(img_array, axis=0) # Predictions y_SubjectID_pred = idpred.predict(img_array) y_fingerNum_pred = fingpred.predict(img_array) image_name = image_file.name if image_file is not None else "No file name" # Extract prediction and confidence subject_id = np.argmax(y_SubjectID_pred, axis=1)[0] + 1 finger_num = np.argmax(y_fingerNum_pred, axis=1)[0] subject_confidence = np.max(y_SubjectID_pred) * 100 finger_confidence = np.max(y_fingerNum_pred) * 100 return (f'Filename: {image_name} , Predicted Subject ID: {subject_id + 1} (Confidence: {subject_confidence:.2f}%)', f'Predicted Finger Number: {show_fingername(finger_num)} (Confidence: {finger_confidence:.2f}%)') # Create Gradio interface iface = gr.Interface(fn=predict_image, inputs="image", outputs=["text", "text"]) # Launch interface iface.launch()