Spaces:
Sleeping
Sleeping
Rename app (2).py to app.py
Browse files- app (2).py β app.py +28 -21
app (2).py β app.py
RENAMED
@@ -1,40 +1,51 @@
|
|
1 |
from fastapi import FastAPI
|
2 |
from pydantic import BaseModel
|
3 |
-
import uvicorn
|
4 |
-
from diffusers import StableDiffusionPipeline
|
5 |
import torch
|
|
|
|
|
6 |
|
7 |
app = FastAPI()
|
8 |
|
9 |
-
# β
|
10 |
-
|
11 |
-
|
12 |
-
# β
Check for GPU, else use CPU (Important for Render!)
|
13 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
14 |
|
15 |
-
# β
Load the AI Model ONCE (so it doesnβt reload every request)
|
16 |
-
print("Loading Cartoon Model...")
|
17 |
pipe = StableDiffusionPipeline.from_pretrained(
|
18 |
-
|
19 |
)
|
20 |
pipe.to(device)
|
21 |
-
print("Model Loaded!")
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
class ImageRequest(BaseModel):
|
24 |
story: str
|
25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
@app.post("/generate_image")
|
27 |
def generate_image(request: ImageRequest):
|
28 |
-
"""Generate a
|
29 |
-
|
30 |
-
cartoon_prompt = (
|
31 |
-
f"A colorful, cartoon-style illustration of: {request.story}, "
|
32 |
-
"storybook fantasy world, highly detailed, vibrant colors, soft edges, clear lighting"
|
33 |
-
)
|
34 |
|
35 |
print("Generating Image for:", cartoon_prompt[:100])
|
36 |
|
37 |
-
# β
Generate Image
|
38 |
image = pipe(cartoon_prompt, width=1024, height=1024).images[0]
|
39 |
|
40 |
# β
Save Image
|
@@ -42,8 +53,4 @@ def generate_image(request: ImageRequest):
|
|
42 |
image.save(image_path)
|
43 |
|
44 |
print("Image Saved!")
|
45 |
-
|
46 |
return {"message": "Image generated successfully!", "image_path": image_path}
|
47 |
-
|
48 |
-
if __name__ == "__main__":
|
49 |
-
uvicorn.run(app, host="0.0.0.0", port=8000)
|
|
|
1 |
from fastapi import FastAPI
|
2 |
from pydantic import BaseModel
|
|
|
|
|
3 |
import torch
|
4 |
+
from diffusers import StableDiffusionPipeline
|
5 |
+
import story_generator # Import Story Generator
|
6 |
|
7 |
app = FastAPI()
|
8 |
|
9 |
+
# β
Load Image Model (Arcane-Diffusion for Cartoon-Style Images)
|
10 |
+
print("Loading Cartoon Model...")
|
11 |
+
image_model_id = "nitrosocke/Arcane-Diffusion"
|
|
|
12 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
13 |
|
|
|
|
|
14 |
pipe = StableDiffusionPipeline.from_pretrained(
|
15 |
+
image_model_id, torch_dtype=torch.float16 if device == "cuda" else torch.float32
|
16 |
)
|
17 |
pipe.to(device)
|
18 |
+
print("Image Model Loaded!")
|
19 |
+
|
20 |
+
# β
Define API Request Formats
|
21 |
+
class StoryRequest(BaseModel):
|
22 |
+
theme: str
|
23 |
+
reading_level: str
|
24 |
|
25 |
class ImageRequest(BaseModel):
|
26 |
story: str
|
27 |
|
28 |
+
# β
Story Generation Endpoint
|
29 |
+
@app.post("/generate_story")
|
30 |
+
def generate_story_endpoint(request: StoryRequest):
|
31 |
+
"""Generate a story based on theme and reading level."""
|
32 |
+
result = story_generator.generate_story_and_questions(request.theme, request.reading_level)
|
33 |
+
return {
|
34 |
+
"theme": request.theme,
|
35 |
+
"reading_level": request.reading_level,
|
36 |
+
"story": result["story"],
|
37 |
+
"questions": result["questions"]
|
38 |
+
}
|
39 |
+
|
40 |
+
# β
Image Generation Endpoint
|
41 |
@app.post("/generate_image")
|
42 |
def generate_image(request: ImageRequest):
|
43 |
+
"""Generate a cartoon-style AI image based on the story text."""
|
44 |
+
cartoon_prompt = f"A colorful, cartoon-style illustration of: {request.story}, vibrant colors, highly detailed, storybook fantasy."
|
|
|
|
|
|
|
|
|
45 |
|
46 |
print("Generating Image for:", cartoon_prompt[:100])
|
47 |
|
48 |
+
# β
Generate Image
|
49 |
image = pipe(cartoon_prompt, width=1024, height=1024).images[0]
|
50 |
|
51 |
# β
Save Image
|
|
|
53 |
image.save(image_path)
|
54 |
|
55 |
print("Image Saved!")
|
|
|
56 |
return {"message": "Image generated successfully!", "image_path": image_path}
|
|
|
|
|
|