Spaces:
Sleeping
Sleeping
File size: 5,812 Bytes
7505319 55acd9b 7505319 8c20f44 7505319 8c20f44 7505319 55acd9b 7505319 55acd9b 7505319 55acd9b 7505319 d5b3766 7505319 d5b3766 55acd9b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import logging
import io
import pickle
from pathlib import Path
from fastapi import FastAPI, File, UploadFile, HTTPException
from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse
from pydantic import BaseModel
from PIL import Image
import torch
import torch.nn.functional as F
from transformers import BertTokenizer, BertForSequenceClassification
import tensorflow as tf
import numpy as np
from huggingface_hub import hf_hub_download
# --- Configuration ---
logging.basicConfig(level=logging.INFO)
STATIC_DIR = Path("static")
# --- Device Configuration ---
device = torch.device('cpu')
try:
tf.config.set_visible_devices([], 'GPU')
logging.info("TensorFlow GPU disabled. Using CPU.")
except (RuntimeError, ValueError) as e:
logging.warning(f"Could not disable GPU for TensorFlow: {e}")
# --- Model Loading ---
def load_models():
logging.info("Loading all models from the Hub...")
try:
tokenizer = BertTokenizer.from_pretrained("muhalwan/sental")
sentiment_model = BertForSequenceClassification.from_pretrained("muhalwan/sental")
sentiment_model.to(device)
logging.info("Sentiment analysis model loaded successfully.")
except Exception as e:
tokenizer, sentiment_model = None, None
logging.error(f"Error loading sentiment model: {e}")
try:
model_path = hf_hub_download(repo_id="muhalwan/catndog", filename="catdog_best.keras")
cat_dog_model = tf.keras.models.load_model(model_path, compile=False)
logging.info("Cat & Dog classifier model loaded successfully.")
except Exception as e:
cat_dog_model = None
logging.error(f"Error loading cat & dog model: {e}")
try:
xgb_model_path = hf_hub_download(repo_id="muhalwan/california_housing_price_predictor", filename="xgb_model.pkl")
with open(xgb_model_path, "rb") as f:
housing_model = pickle.load(f)
scaler_path = hf_hub_download(repo_id="muhalwan/california_housing_price_predictor", filename="scaler.pkl")
with open(scaler_path, "rb") as f:
housing_scaler = pickle.load(f)
logging.info("Housing price model and scaler loaded successfully.")
except Exception as e:
housing_model, housing_scaler = None, None
logging.error(f"Error loading housing price model: {e}")
return tokenizer, sentiment_model, cat_dog_model, housing_model, housing_scaler
# --- FastAPI App Initialization ---
app = FastAPI()
tokenizer, sentiment_model, cat_dog_model, housing_model, housing_scaler = load_models()
app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
class SentimentRequest(BaseModel):
text: str
class HousingRequest(BaseModel):
MedInc: float
HouseAge: float
AveRooms: float
AveBedrms: float
Population: float
AveOccup: float
Latitude: float
Longitude: float
# --- API Endpoints ---
@app.get("/")
async def read_root():
return FileResponse('index.html')
@app.post("/predict/sentiment")
async def predict_sentiment(request: SentimentRequest):
if not tokenizer or not sentiment_model:
raise HTTPException(status_code=503, detail="Sentiment model is not available.")
try:
inputs = tokenizer(request.text, return_tensors='pt', truncation=True, max_length=512)
inputs = {k: v.to(device) for k, v in inputs.items()}
with torch.no_grad():
outputs = sentiment_model(**inputs)
probabilities = F.softmax(outputs.logits, dim=-1).squeeze()
labels = ['Bearish', 'Bullish']
prediction = labels[torch.argmax(probabilities).item()]
return {"prediction": prediction}
except Exception as e:
logging.error(f"Sentiment prediction error: {e}")
raise HTTPException(status_code=500, detail="An error occurred during sentiment analysis.")
@app.post("/predict/catdog")
async def predict_catdog(file: UploadFile = File(...)):
if not cat_dog_model:
raise HTTPException(status_code=503, detail="Cat & Dog model is not available.")
try:
contents = await file.read()
image = Image.open(io.BytesIO(contents))
_, height, width, _ = cat_dog_model.input_shape
img_resized = image.resize((width, height))
if img_resized.mode == 'RGBA':
img_resized = img_resized.convert('RGB')
img_array = tf.keras.utils.img_to_array(img_resized)
img_array = tf.keras.applications.efficientnet.preprocess_input(img_array)
img_array = np.expand_dims(img_array, axis=0)
prob = cat_dog_model.predict(img_array, verbose=0)[0, 0]
label = "Dog" if prob >= 0.5 else "Cat"
return {"prediction": label}
except Exception as e:
logging.error(f"Cat/Dog prediction error: {e}")
raise HTTPException(status_code=500, detail="An error occurred during image classification.")
@app.post("/predict/housing")
async def predict_housing(request: HousingRequest):
if not housing_model or not housing_scaler:
raise HTTPException(status_code=503, detail="Housing model is not available.")
try:
input_data = np.array([[
request.MedInc, request.HouseAge, request.AveRooms, request.AveBedrms,
request.Population, request.AveOccup, request.Latitude, request.Longitude
]])
data_scaled = housing_scaler.transform(input_data)
raw_prediction = housing_model.predict(data_scaled)[0]
final_prediction = raw_prediction * 100000
return {"prediction": f"${final_prediction:,.2f}"}
except Exception as e:
logging.error(f"Housing prediction error: {e}")
raise HTTPException(status_code=500, detail="An error occurred during housing price prediction.")
|