File size: 1,865 Bytes
217a100 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
#!/usr/bin/env python3
"""
Simple deployment script for Skin Disease AI API
"""
import os
import sys
import subprocess
def create_demo_model():
"""Create a demo model for testing"""
print("Creating demo model...")
try:
from transformers import AutoTokenizer, AutoModelForCausalLM
model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
print(f"Downloading {model_name}...")
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
os.makedirs("tinyllama-finetuned-skin", exist_ok=True)
tokenizer.save_pretrained("tinyllama-finetuned-skin")
model.save_pretrained("tinyllama-finetuned-skin")
print("β
Demo model created successfully!")
return True
except Exception as e:
print(f"β Error creating model: {e}")
return False
def start_server():
"""Start the API server"""
print("π Starting API server on http://localhost:8000")
print("Press Ctrl+C to stop")
try:
subprocess.run([
sys.executable, "-m", "uvicorn", "API:app",
"--host", "0.0.0.0", "--port", "8000", "--reload"
])
except KeyboardInterrupt:
print("\nπ Server stopped.")
def main():
print("π Skin Disease AI Deployment")
print("=" * 40)
# Check if model exists
if not os.path.exists("tinyllama-finetuned-skin"):
print("Model not found. Creating demo model...")
if not create_demo_model():
print("Failed to create model. Exiting.")
return
else:
print("β
Model found!")
# Start server
start_server()
if __name__ == "__main__":
main()
|