#!/usr/bin/env python3 """ Simple deployment script for Skin Disease AI API """ import os import sys import subprocess def create_demo_model(): """Create a demo model for testing""" print("Creating demo model...") try: from transformers import AutoTokenizer, AutoModelForCausalLM model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" print(f"Downloading {model_name}...") tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token os.makedirs("tinyllama-finetuned-skin", exist_ok=True) tokenizer.save_pretrained("tinyllama-finetuned-skin") model.save_pretrained("tinyllama-finetuned-skin") print("āœ… Demo model created successfully!") return True except Exception as e: print(f"āŒ Error creating model: {e}") return False def start_server(): """Start the API server""" print("šŸš€ Starting API server on http://localhost:8000") print("Press Ctrl+C to stop") try: subprocess.run([ sys.executable, "-m", "uvicorn", "API:app", "--host", "0.0.0.0", "--port", "8000", "--reload" ]) except KeyboardInterrupt: print("\nšŸ›‘ Server stopped.") def main(): print("šŸš€ Skin Disease AI Deployment") print("=" * 40) # Check if model exists if not os.path.exists("tinyllama-finetuned-skin"): print("Model not found. Creating demo model...") if not create_demo_model(): print("Failed to create model. Exiting.") return else: print("āœ… Model found!") # Start server start_server() if __name__ == "__main__": main()