Spaces:
Running
Running
Upload 26 files
Browse files- app.py +414 -0
- core/__pycache__/background_animations.cpython-311.pyc +0 -0
- core/__pycache__/character_animations.cpython-311.pyc +0 -0
- core/__pycache__/fx_animations.cpython-311.pyc +0 -0
- core/__pycache__/infographic_animations.cpython-311.pyc +0 -0
- core/__pycache__/logo_animations.cpython-311.pyc +0 -0
- core/__pycache__/overlay_animations.cpython-311.pyc +0 -0
- core/__pycache__/pipeline.cpython-311.pyc +0 -0
- core/__pycache__/shape_animations.cpython-311.pyc +0 -0
- core/__pycache__/text_animations.cpython-311.pyc +0 -0
- core/__pycache__/transitions.cpython-311.pyc +0 -0
- core/__pycache__/vectorize1.cpython-311.pyc +0 -0
- core/__pycache__/vectorizer.cpython-311.pyc +0 -0
- core/background_animations.py +25 -0
- core/character_animations.py +27 -0
- core/fx_animations.py +45 -0
- core/infographic_animations.py +25 -0
- core/logo_animations.py +442 -0
- core/overlay_animations.py +25 -0
- core/pipeline.py +1070 -0
- core/shape_animations.py +54 -0
- core/text_animations.py +410 -0
- core/transitions.py +26 -0
- core/vectorizer.py +868 -0
- requirements.txt +0 -0
- task_queue.py +399 -0
app.py
ADDED
|
@@ -0,0 +1,414 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# # app.py
|
| 2 |
+
# import os
|
| 3 |
+
# import uuid
|
| 4 |
+
# import shutil
|
| 5 |
+
# import tempfile
|
| 6 |
+
# import asyncio
|
| 7 |
+
# from pathlib import Path
|
| 8 |
+
# from fastapi import FastAPI, File, UploadFile, Form, HTTPException
|
| 9 |
+
# from fastapi.responses import FileResponse, JSONResponse
|
| 10 |
+
# from fastapi.middleware.cors import CORSMiddleware
|
| 11 |
+
|
| 12 |
+
# # local imports
|
| 13 |
+
# from task_queue import TaskQueue, TaskStatus
|
| 14 |
+
# from core.pipeline import process_image_pipeline # your real pipeline
|
| 15 |
+
|
| 16 |
+
# # App config
|
| 17 |
+
# APP_NAME = "manim_render_service"
|
| 18 |
+
# TMP_ROOT = Path(tempfile.gettempdir()) / APP_NAME
|
| 19 |
+
# TASKS_DIR = TMP_ROOT / "tasks"
|
| 20 |
+
# OUTPUTS_DIR = TMP_ROOT / "outputs"
|
| 21 |
+
# TASKS_DIR.mkdir(parents=True, exist_ok=True)
|
| 22 |
+
# OUTPUTS_DIR.mkdir(parents=True, exist_ok=True)
|
| 23 |
+
|
| 24 |
+
# # instantiate queue (file-backed)
|
| 25 |
+
# queue = TaskQueue(base_dir=TMP_ROOT, max_workers=os.cpu_count() or 2)
|
| 26 |
+
|
| 27 |
+
# app = FastAPI(title="Manim Render Service")
|
| 28 |
+
# app.add_middleware(
|
| 29 |
+
# CORSMiddleware,
|
| 30 |
+
# allow_origins=["*"], # change in prod
|
| 31 |
+
# allow_credentials=True,
|
| 32 |
+
# allow_methods=["*"],
|
| 33 |
+
# allow_headers=["*"],
|
| 34 |
+
# )
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
# @app.on_event("startup")
|
| 38 |
+
# async def startup_event():
|
| 39 |
+
# # start background workers (non-blocking)
|
| 40 |
+
# await queue.start(processor=process_image_pipeline)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
# @app.on_event("shutdown")
|
| 44 |
+
# async def shutdown_event():
|
| 45 |
+
# await queue.stop()
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
# def _make_task_dir(task_id: str) -> Path:
|
| 49 |
+
# p = TASKS_DIR / task_id
|
| 50 |
+
# p.mkdir(parents=True, exist_ok=True)
|
| 51 |
+
# return p
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
# def _secure_filename(filename: str) -> str:
|
| 55 |
+
# # Minimal safe filename normalizer
|
| 56 |
+
# return "".join(c for c in filename if c.isalnum() or c in "._-").strip("_")
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# @app.post("/render", status_code=202)
|
| 60 |
+
# async def submit_render(
|
| 61 |
+
# image: UploadFile = File(...),
|
| 62 |
+
# style: str = Form("fade-in"),
|
| 63 |
+
# quality: str = Form("final"), # preview or final
|
| 64 |
+
# ):
|
| 65 |
+
# # Basic validation
|
| 66 |
+
# if image.content_type.split("/")[0] != "image":
|
| 67 |
+
# raise HTTPException(status_code=400, detail="Uploaded file must be an image.")
|
| 68 |
+
|
| 69 |
+
# task_id = uuid.uuid4().hex
|
| 70 |
+
# task_dir = _make_task_dir(task_id)
|
| 71 |
+
|
| 72 |
+
# # Save upload to tmp task directory
|
| 73 |
+
# safe_name = _secure_filename(image.filename or f"{task_id}.png")
|
| 74 |
+
# uploaded_path = task_dir / safe_name
|
| 75 |
+
# try:
|
| 76 |
+
# with uploaded_path.open("wb") as f:
|
| 77 |
+
# content = await image.read()
|
| 78 |
+
# # Limit size for safety (example: 25 MB)
|
| 79 |
+
# if len(content) > 25 * 1024 * 1024:
|
| 80 |
+
# raise HTTPException(status_code=413, detail="File too large (max 25MB).")
|
| 81 |
+
# f.write(content)
|
| 82 |
+
# finally:
|
| 83 |
+
# await image.close()
|
| 84 |
+
|
| 85 |
+
# # Compose metadata
|
| 86 |
+
# meta = {
|
| 87 |
+
# "task_id": task_id,
|
| 88 |
+
# "input_image": str(uploaded_path),
|
| 89 |
+
# "style": style,
|
| 90 |
+
# "quality": quality,
|
| 91 |
+
# "task_dir": str(task_dir),
|
| 92 |
+
# }
|
| 93 |
+
|
| 94 |
+
# # Enqueue the task
|
| 95 |
+
# queue.enqueue(meta)
|
| 96 |
+
|
| 97 |
+
# return JSONResponse({"task_id": task_id, "status": "queued"})
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
# @app.get("/status/{task_id}")
|
| 101 |
+
# async def status(task_id: str):
|
| 102 |
+
# st = queue.get_status(task_id)
|
| 103 |
+
# if st is None:
|
| 104 |
+
# raise HTTPException(status_code=404, detail="Task not found")
|
| 105 |
+
# return JSONResponse({"task_id": task_id, "status": st.name})
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
# @app.get("/result/{task_id}")
|
| 109 |
+
# async def result(task_id: str):
|
| 110 |
+
# info = queue.get_task_info(task_id)
|
| 111 |
+
# if info is None:
|
| 112 |
+
# raise HTTPException(status_code=404, detail="Task not found")
|
| 113 |
+
|
| 114 |
+
# status = queue.get_status(task_id)
|
| 115 |
+
# if status != TaskStatus.COMPLETED:
|
| 116 |
+
# return JSONResponse({"task_id": task_id, "status": status.name})
|
| 117 |
+
|
| 118 |
+
# output_path = Path(info.get("output_path", ""))
|
| 119 |
+
# if not output_path.exists():
|
| 120 |
+
# raise HTTPException(status_code=404, detail="Output not found on disk")
|
| 121 |
+
|
| 122 |
+
# return FileResponse(path=str(output_path), filename=output_path.name, media_type="video/mp4")
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
# @app.delete("/task/{task_id}")
|
| 126 |
+
# async def delete_task(task_id: str):
|
| 127 |
+
# info = queue.get_task_info(task_id)
|
| 128 |
+
# if info:
|
| 129 |
+
# # attempt cleanup
|
| 130 |
+
# task_dir = Path(info.get("task_dir", ""))
|
| 131 |
+
# if task_dir.exists():
|
| 132 |
+
# shutil.rmtree(task_dir, ignore_errors=True)
|
| 133 |
+
# queue.remove_task(task_id)
|
| 134 |
+
# return JSONResponse({"task_id": task_id, "status": "removed"})
|
| 135 |
+
# else:
|
| 136 |
+
# raise HTTPException(status_code=404, detail="Task not found")
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
# app.py
|
| 142 |
+
import os
|
| 143 |
+
import uuid
|
| 144 |
+
import shutil
|
| 145 |
+
import tempfile
|
| 146 |
+
import asyncio
|
| 147 |
+
from pathlib import Path
|
| 148 |
+
from fastapi import FastAPI, File, UploadFile, Form, HTTPException
|
| 149 |
+
from fastapi.responses import FileResponse, JSONResponse
|
| 150 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 151 |
+
import subprocess
|
| 152 |
+
# local imports
|
| 153 |
+
from task_queue import TaskQueue, TaskStatus
|
| 154 |
+
from core.pipeline import process_image_pipeline # your real pipeline
|
| 155 |
+
from fastapi.responses import FileResponse, JSONResponse, Response
|
| 156 |
+
from fastapi.responses import JSONResponse, Response, FileResponse
|
| 157 |
+
from fastapi import HTTPException
|
| 158 |
+
from pathlib import Path
|
| 159 |
+
import base64
|
| 160 |
+
import base64
|
| 161 |
+
from pathlib import Path
|
| 162 |
+
from moviepy.video.io.VideoFileClip import VideoFileClip
|
| 163 |
+
|
| 164 |
+
# --------------------------------------------------
|
| 165 |
+
# Logging Setup
|
| 166 |
+
# --------------------------------------------------
|
| 167 |
+
import logging
|
| 168 |
+
|
| 169 |
+
logging.basicConfig(
|
| 170 |
+
level=logging.DEBUG,
|
| 171 |
+
format="π [%(asctime)s] [%(levelname)s] %(message)s",
|
| 172 |
+
datefmt="%H:%M:%S",
|
| 173 |
+
)
|
| 174 |
+
logger = logging.getLogger("manim_render_service")
|
| 175 |
+
|
| 176 |
+
# --------------------------------------------------
|
| 177 |
+
# App config
|
| 178 |
+
# --------------------------------------------------
|
| 179 |
+
APP_NAME = "manim_render_service"
|
| 180 |
+
TMP_ROOT = Path("tmp")/ APP_NAME
|
| 181 |
+
TASKS_DIR = TMP_ROOT / "tasks"
|
| 182 |
+
OUTPUTS_DIR = TMP_ROOT / "outputs"
|
| 183 |
+
TASKS_DIR.mkdir(parents=True, exist_ok=True)
|
| 184 |
+
OUTPUTS_DIR.mkdir(parents=True, exist_ok=True)
|
| 185 |
+
|
| 186 |
+
queue = TaskQueue(base_dir=TMP_ROOT, max_workers=os.cpu_count() or 2)
|
| 187 |
+
|
| 188 |
+
app = FastAPI(title="Manim Render Service")
|
| 189 |
+
app.add_middleware(
|
| 190 |
+
CORSMiddleware,
|
| 191 |
+
allow_origins=["*"],
|
| 192 |
+
allow_credentials=True,
|
| 193 |
+
allow_methods=["*"],
|
| 194 |
+
allow_headers=["*"],
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
# --------------------------------------------------
|
| 198 |
+
# Lifecycle Events
|
| 199 |
+
# --------------------------------------------------
|
| 200 |
+
@app.on_event("startup")
|
| 201 |
+
async def startup_event():
|
| 202 |
+
logger.info("π Starting up backend...")
|
| 203 |
+
logger.debug(f"Temporary root: {TMP_ROOT}")
|
| 204 |
+
await queue.start(processor=process_image_pipeline)
|
| 205 |
+
logger.info("β
Queue system initialized and worker started.")
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
@app.on_event("shutdown")
|
| 209 |
+
async def shutdown_event():
|
| 210 |
+
logger.info("π§Ή Shutting down backend...")
|
| 211 |
+
await queue.stop()
|
| 212 |
+
logger.info("π Queue stopped gracefully.")
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
# --------------------------------------------------
|
| 216 |
+
# Helpers
|
| 217 |
+
# --------------------------------------------------
|
| 218 |
+
def _make_task_dir(task_id: str) -> Path:
|
| 219 |
+
p = TASKS_DIR / task_id
|
| 220 |
+
p.mkdir(parents=True, exist_ok=True)
|
| 221 |
+
logger.debug(f"π Created task directory: {p}")
|
| 222 |
+
return p
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def _secure_filename(filename: str) -> str:
|
| 226 |
+
safe = "".join(c for c in filename if c.isalnum() or c in "._-").strip("_")
|
| 227 |
+
logger.debug(f"π Secured filename: {filename} β {safe}")
|
| 228 |
+
return safe
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
# --------------------------------------------------
|
| 232 |
+
# Routes
|
| 233 |
+
# --------------------------------------------------
|
| 234 |
+
@app.post("/render", status_code=202)
|
| 235 |
+
async def submit_render(
|
| 236 |
+
image: UploadFile = File(...),
|
| 237 |
+
style: str = Form("fade-in"),
|
| 238 |
+
quality: str = Form("final"),
|
| 239 |
+
):
|
| 240 |
+
logger.info(f"π¨ Received new render request | style={style}, quality={quality}")
|
| 241 |
+
logger.debug(f"Uploaded file info: {image.filename}, type={image.content_type}")
|
| 242 |
+
|
| 243 |
+
if image.content_type.split("/")[0] != "image":
|
| 244 |
+
logger.error("β Invalid file type, not an image.")
|
| 245 |
+
raise HTTPException(status_code=400, detail="Uploaded file must be an image.")
|
| 246 |
+
|
| 247 |
+
task_id = uuid.uuid4().hex
|
| 248 |
+
task_dir = _make_task_dir(task_id)
|
| 249 |
+
logger.info(f"π Generated Task ID: {task_id}")
|
| 250 |
+
|
| 251 |
+
safe_name = _secure_filename(image.filename or f"{task_id}.png")
|
| 252 |
+
uploaded_path = task_dir / safe_name
|
| 253 |
+
logger.debug(f"π Saving upload to {uploaded_path}")
|
| 254 |
+
|
| 255 |
+
try:
|
| 256 |
+
with uploaded_path.open("wb") as f:
|
| 257 |
+
content = await image.read()
|
| 258 |
+
logger.debug(f"π¦ File size: {len(content)/1024:.2f} KB")
|
| 259 |
+
if len(content) > 25 * 1024 * 1024:
|
| 260 |
+
logger.warning("β οΈ Upload too large (>25MB). Rejecting.")
|
| 261 |
+
raise HTTPException(status_code=413, detail="File too large (max 25MB).")
|
| 262 |
+
f.write(content)
|
| 263 |
+
finally:
|
| 264 |
+
await image.close()
|
| 265 |
+
logger.debug("π Image file closed after writing.")
|
| 266 |
+
|
| 267 |
+
meta = {
|
| 268 |
+
"task_id": task_id,
|
| 269 |
+
"input_image": str(uploaded_path),
|
| 270 |
+
"style": style,
|
| 271 |
+
"quality": quality,
|
| 272 |
+
"task_dir": str(task_dir),
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
logger.debug(f"π§Ύ Task metadata: {meta}")
|
| 276 |
+
queue.enqueue(meta)
|
| 277 |
+
logger.info(f"π€ Task {task_id} successfully enqueued.")
|
| 278 |
+
|
| 279 |
+
return JSONResponse({"task_id": task_id, "status": "queued"})
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
@app.get("/status/{task_id}")
|
| 283 |
+
async def status(task_id: str):
|
| 284 |
+
logger.debug(f"Status check for task: {task_id}")
|
| 285 |
+
st = queue.get_status(task_id)
|
| 286 |
+
if st is None:
|
| 287 |
+
logger.warning(f"Task {task_id} not found")
|
| 288 |
+
raise HTTPException(status_code=404, detail="Task not found")
|
| 289 |
+
|
| 290 |
+
# Get additional info
|
| 291 |
+
task_info = queue.get_task_info(task_id)
|
| 292 |
+
logger.info(f"Task {task_id} status: {st.name} | info: {task_info}")
|
| 293 |
+
|
| 294 |
+
return JSONResponse({
|
| 295 |
+
"task_id": task_id,
|
| 296 |
+
"status": st.name,
|
| 297 |
+
"details": task_info
|
| 298 |
+
})
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
# async def result(task_id: str):
|
| 302 |
+
# logger.debug(f"π¦ Fetching result for task: {task_id}")
|
| 303 |
+
# info = queue.get_task_info(task_id)
|
| 304 |
+
# if info is None:
|
| 305 |
+
# logger.warning(f"β οΈ Task info not found for {task_id}")
|
| 306 |
+
# raise HTTPException(status_code=404, detail="Task not found")
|
| 307 |
+
|
| 308 |
+
# status = queue.get_status(task_id)
|
| 309 |
+
# logger.debug(f"π Task {task_id} current status: {status.name}")
|
| 310 |
+
|
| 311 |
+
# if status != TaskStatus.COMPLETED:
|
| 312 |
+
# logger.info(f"β³ Task {task_id} still in progress ({status.name})")
|
| 313 |
+
# return JSONResponse({"task_id": task_id, "status": status.name})
|
| 314 |
+
|
| 315 |
+
# output_path = Path(info.get("output_path", ""))
|
| 316 |
+
# logger.debug(f"π§© Checking output path: {output_path}")
|
| 317 |
+
# # if not output_path.exists():
|
| 318 |
+
# # logger.error(f"β Output file missing for task {task_id}")
|
| 319 |
+
# # raise HTTPException(status_code=404, detail="Output not found on disk")
|
| 320 |
+
|
| 321 |
+
# info = queue.get_task_info(task_id)
|
| 322 |
+
# output_bytes = info.get("output_bytes")
|
| 323 |
+
|
| 324 |
+
# if output_bytes:
|
| 325 |
+
# logger.info(f"π¬ Returning in-memory video for task {task_id}")
|
| 326 |
+
# return Response(content=output_bytes, media_type="video")
|
| 327 |
+
|
| 328 |
+
# # fallback to disk if memory missing
|
| 329 |
+
# if not output_path.exists():
|
| 330 |
+
# logger.error(f"β Output file missing for task {task_id}")
|
| 331 |
+
# raise HTTPException(status_code=404, detail="Output not found on disk")
|
| 332 |
+
|
| 333 |
+
# logger.info(f"π¬ Returning result video from disk for task {task_id}")
|
| 334 |
+
# return FileResponse(
|
| 335 |
+
# path=str(output_path), filename=output_path.name, media_type="video"
|
| 336 |
+
# )
|
| 337 |
+
@app.get("/result/{task_id}")
|
| 338 |
+
async def result(task_id: str):
|
| 339 |
+
logger.debug(f"π¦ Fetching result for task: {task_id}")
|
| 340 |
+
info = queue.get_task_info(task_id)
|
| 341 |
+
if info is None:
|
| 342 |
+
logger.warning(f"β οΈ Task info not found for {task_id}")
|
| 343 |
+
raise HTTPException(status_code=404, detail="Task not found")
|
| 344 |
+
|
| 345 |
+
status = queue.get_status(task_id)
|
| 346 |
+
logger.debug(f"π Task {task_id} current status: {status.name}")
|
| 347 |
+
|
| 348 |
+
if status != TaskStatus.COMPLETED:
|
| 349 |
+
logger.info(f"β³ Task {task_id} still in progress ({status.name})")
|
| 350 |
+
return JSONResponse({"task_id": task_id, "status": status.name})
|
| 351 |
+
|
| 352 |
+
info = queue.get_task_info(task_id)
|
| 353 |
+
output_path = Path(info.get("output_path", "")) # MOV path
|
| 354 |
+
|
| 355 |
+
if not output_path.exists():
|
| 356 |
+
logger.error(f"β Output file missing for task {task_id}")
|
| 357 |
+
raise HTTPException(status_code=404, detail="Output not found")
|
| 358 |
+
|
| 359 |
+
# Convert MOV to WEBM with alpha if needed
|
| 360 |
+
webm_path = output_path.with_suffix(".webm")
|
| 361 |
+
if output_path.suffix.lower() == ".mov" and not webm_path.exists():
|
| 362 |
+
try:
|
| 363 |
+
logger.info(f"ποΈ Converting .mov β .webm (keeping transparency)...")
|
| 364 |
+
cmd = [
|
| 365 |
+
"ffmpeg",
|
| 366 |
+
"-y",
|
| 367 |
+
"-i", str(output_path),
|
| 368 |
+
"-c:v", "libvpx-vp9",
|
| 369 |
+
"-pix_fmt", "yuva420p", # keep alpha channel
|
| 370 |
+
"-b:v", "4M",
|
| 371 |
+
"-auto-alt-ref", "0",
|
| 372 |
+
str(webm_path)
|
| 373 |
+
]
|
| 374 |
+
subprocess.run(cmd, check=True, capture_output=True)
|
| 375 |
+
logger.info(f"β
Converted successfully β {webm_path}")
|
| 376 |
+
except Exception as e:
|
| 377 |
+
logger.error(f"β οΈ MOVβWEBM conversion failed: {e}")
|
| 378 |
+
raise HTTPException(status_code=500, detail=f"Conversion failed: {e}")
|
| 379 |
+
|
| 380 |
+
# Read both MOV and WEBM as bytes
|
| 381 |
+
mov_bytes = output_path.read_bytes()
|
| 382 |
+
webm_bytes = webm_path.read_bytes()
|
| 383 |
+
|
| 384 |
+
logger.info(f"β
Returning both MOV + WEBM for task {task_id}")
|
| 385 |
+
return JSONResponse({
|
| 386 |
+
"task_id": task_id,
|
| 387 |
+
"status": "COMPLETED",
|
| 388 |
+
"results": [
|
| 389 |
+
{
|
| 390 |
+
"format": "mov",
|
| 391 |
+
"data": base64.b64encode(mov_bytes).decode("utf-8"),
|
| 392 |
+
},
|
| 393 |
+
{
|
| 394 |
+
"format": "webm",
|
| 395 |
+
"data": base64.b64encode(webm_bytes).decode("utf-8"),
|
| 396 |
+
},
|
| 397 |
+
],
|
| 398 |
+
})
|
| 399 |
+
|
| 400 |
+
@app.delete("/task/{task_id}")
|
| 401 |
+
async def delete_task(task_id: str):
|
| 402 |
+
logger.info(f"π Request to delete task: {task_id}")
|
| 403 |
+
info = queue.get_task_info(task_id)
|
| 404 |
+
if info:
|
| 405 |
+
task_dir = Path(info.get("task_dir", ""))
|
| 406 |
+
if task_dir.exists():
|
| 407 |
+
logger.debug(f"π§Ή Removing directory: {task_dir}")
|
| 408 |
+
shutil.rmtree(task_dir, ignore_errors=True)
|
| 409 |
+
queue.remove_task(task_id)
|
| 410 |
+
logger.info(f"β
Task {task_id} removed successfully.")
|
| 411 |
+
return JSONResponse({"task_id": task_id, "status": "removed"})
|
| 412 |
+
else:
|
| 413 |
+
logger.warning(f"β οΈ Task {task_id} not found for deletion.")
|
| 414 |
+
raise HTTPException(status_code=404, detail="Task not found")
|
core/__pycache__/background_animations.cpython-311.pyc
ADDED
|
Binary file (1.21 kB). View file
|
|
|
core/__pycache__/character_animations.cpython-311.pyc
ADDED
|
Binary file (1.3 kB). View file
|
|
|
core/__pycache__/fx_animations.cpython-311.pyc
ADDED
|
Binary file (5.68 kB). View file
|
|
|
core/__pycache__/infographic_animations.cpython-311.pyc
ADDED
|
Binary file (1.14 kB). View file
|
|
|
core/__pycache__/logo_animations.cpython-311.pyc
ADDED
|
Binary file (5.71 kB). View file
|
|
|
core/__pycache__/overlay_animations.cpython-311.pyc
ADDED
|
Binary file (1.16 kB). View file
|
|
|
core/__pycache__/pipeline.cpython-311.pyc
ADDED
|
Binary file (9.08 kB). View file
|
|
|
core/__pycache__/shape_animations.cpython-311.pyc
ADDED
|
Binary file (2.3 kB). View file
|
|
|
core/__pycache__/text_animations.cpython-311.pyc
ADDED
|
Binary file (9.63 kB). View file
|
|
|
core/__pycache__/transitions.cpython-311.pyc
ADDED
|
Binary file (1.2 kB). View file
|
|
|
core/__pycache__/vectorize1.cpython-311.pyc
ADDED
|
Binary file (6.21 kB). View file
|
|
|
core/__pycache__/vectorizer.cpython-311.pyc
ADDED
|
Binary file (5.35 kB). View file
|
|
|
core/background_animations.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/core/animations/background_animations.py
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from typing import Dict
|
| 4 |
+
|
| 5 |
+
def animated_gradient(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 6 |
+
scene_name = "AnimatedGradient"
|
| 7 |
+
scene_code = f'''
|
| 8 |
+
from manim import *
|
| 9 |
+
class {scene_name}(Scene):
|
| 10 |
+
def construct(self):
|
| 11 |
+
# TODO: animate gradient backgrounds (possibly pre-render or use many rectangles)
|
| 12 |
+
self.wait(0.5)
|
| 13 |
+
'''
|
| 14 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 15 |
+
|
| 16 |
+
def particle_motion(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 17 |
+
scene_name = "ParticleMotion"
|
| 18 |
+
scene_code = f'''
|
| 19 |
+
from manim import *
|
| 20 |
+
class {scene_name}(Scene):
|
| 21 |
+
def construct(self):
|
| 22 |
+
# TODO: implement particle motions like snow or bubbles
|
| 23 |
+
self.wait(0.5)
|
| 24 |
+
'''
|
| 25 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
core/character_animations.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/core/animations/character_animations.py
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from typing import Dict
|
| 4 |
+
|
| 5 |
+
def walk_cycle(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 6 |
+
scene_name = "WalkCycle"
|
| 7 |
+
scene_code = f'''
|
| 8 |
+
from manim import *
|
| 9 |
+
class {scene_name}(Scene):
|
| 10 |
+
def construct(self):
|
| 11 |
+
# TODO: Implement a basic walk cycle using several frames or interpolated parts
|
| 12 |
+
self.wait(0.5)
|
| 13 |
+
'''
|
| 14 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 15 |
+
|
| 16 |
+
def bobbing(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 17 |
+
scene_name = "Bobbing"
|
| 18 |
+
scene_code = f'''
|
| 19 |
+
from manim import *
|
| 20 |
+
class {scene_name}(Scene):
|
| 21 |
+
def construct(self):
|
| 22 |
+
svg = SVGMobject("{svg_path.name}")
|
| 23 |
+
self.play(svg.animate.shift(UP*0.2), run_time=0.4)
|
| 24 |
+
self.play(svg.animate.shift(DOWN*0.2), run_time=0.4)
|
| 25 |
+
self.wait(0.5)
|
| 26 |
+
'''
|
| 27 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
core/fx_animations.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/core/animations/fx_animations.py
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from typing import Dict
|
| 4 |
+
|
| 5 |
+
def particle_burst(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 6 |
+
scene_name = "ParticleBurst"
|
| 7 |
+
scene_code = f'''
|
| 8 |
+
from manim import *
|
| 9 |
+
class {scene_name}(Scene):
|
| 10 |
+
def construct(self):
|
| 11 |
+
svg = SVGMobject("{svg_path.name}")
|
| 12 |
+
self.add(svg)
|
| 13 |
+
# TODO: Implement particle burst using many small dots animated outward
|
| 14 |
+
self.wait(0.5)
|
| 15 |
+
'''
|
| 16 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def smoke_effect(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 22 |
+
scene_name = "SmokeFX"
|
| 23 |
+
scene_code = f'''
|
| 24 |
+
from manim import *
|
| 25 |
+
class {scene_name}(Scene):
|
| 26 |
+
def construct(self):
|
| 27 |
+
svg = SVGMobject("{svg_path.name}")
|
| 28 |
+
# TODO: Add smoke overlay using alpha and per-frame masks or pre-rendered assets
|
| 29 |
+
self.play(FadeIn(svg), run_time=0.8)
|
| 30 |
+
self.wait(0.5)
|
| 31 |
+
'''
|
| 32 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 33 |
+
|
| 34 |
+
def light_rays(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 35 |
+
scene_name = "LightRays"
|
| 36 |
+
scene_code = f'''
|
| 37 |
+
from manim import *
|
| 38 |
+
class {scene_name}(Scene):
|
| 39 |
+
def construct(self):
|
| 40 |
+
svg = SVGMobject("{svg_path.name}")
|
| 41 |
+
# TODO: Create rays using triangles/gradients and animate opacity
|
| 42 |
+
self.play(FadeIn(svg), run_time=0.6)
|
| 43 |
+
self.wait(0.5)
|
| 44 |
+
'''
|
| 45 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
core/infographic_animations.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/core/animations/infographic_animations.py
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from typing import Dict
|
| 4 |
+
|
| 5 |
+
def bar_chart_grow(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 6 |
+
scene_name = "BarChartGrow"
|
| 7 |
+
scene_code = f'''
|
| 8 |
+
from manim import *
|
| 9 |
+
class {scene_name}(Scene):
|
| 10 |
+
def construct(self):
|
| 11 |
+
# TODO: build bars and animate heights 0->value
|
| 12 |
+
self.wait(0.5)
|
| 13 |
+
'''
|
| 14 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 15 |
+
|
| 16 |
+
def number_count(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 17 |
+
scene_name = "NumberCount"
|
| 18 |
+
scene_code = f'''
|
| 19 |
+
from manim import *
|
| 20 |
+
class {scene_name}(Scene):
|
| 21 |
+
def construct(self):
|
| 22 |
+
# TODO: animate number counting
|
| 23 |
+
self.wait(0.5)
|
| 24 |
+
'''
|
| 25 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
core/logo_animations.py
ADDED
|
@@ -0,0 +1,442 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/core/animations/logo_animations.py
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from typing import Dict
|
| 4 |
+
import random
|
| 5 |
+
|
| 6 |
+
# def logo_build_lines(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 7 |
+
# scene_name = "LogoBuildLines"
|
| 8 |
+
# scene_code = f'''
|
| 9 |
+
# from manim import *
|
| 10 |
+
# class {scene_name}(Scene):
|
| 11 |
+
# def construct(self):
|
| 12 |
+
# # TODO: Implement logo build from lines/paths
|
| 13 |
+
# svg = SVGMobject("{svg_path.name}")
|
| 14 |
+
# self.play(Write(svg), run_time=1)
|
| 15 |
+
# self.wait(0.5)
|
| 16 |
+
# '''
|
| 17 |
+
# return {"scene_name": scene_name, "scene_code": scene_code}
|
| 18 |
+
def logo_build_lines(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 19 |
+
scene_name = "LogoBuildLines"
|
| 20 |
+
scene_code = f'''
|
| 21 |
+
from manim import *
|
| 22 |
+
import random
|
| 23 |
+
|
| 24 |
+
class {scene_name}(Scene):
|
| 25 |
+
def construct(self):
|
| 26 |
+
# Load SVG logo
|
| 27 |
+
svg = SVGMobject("{svg_path.name}")
|
| 28 |
+
svg.set(width=6)
|
| 29 |
+
svg.set_stroke(width=2)
|
| 30 |
+
svg.set_fill(opacity=1)
|
| 31 |
+
svg.move_to(ORIGIN)
|
| 32 |
+
|
| 33 |
+
# Split into subpaths for individual animation
|
| 34 |
+
subpaths = svg.family_members_with_points()
|
| 35 |
+
|
| 36 |
+
# Start each subpath invisible & slightly smaller
|
| 37 |
+
for sub in subpaths:
|
| 38 |
+
sub.set_opacity(0.05)
|
| 39 |
+
sub.scale(0.98)
|
| 40 |
+
|
| 41 |
+
# Animate each path individually with cinematic timing
|
| 42 |
+
for sub in subpaths:
|
| 43 |
+
delay = random.uniform(0, 0.05) # tiny stagger for natural feel
|
| 44 |
+
self.wait(delay)
|
| 45 |
+
self.play(
|
| 46 |
+
Create(sub),
|
| 47 |
+
sub.animate.set_opacity(1).scale(1.0),
|
| 48 |
+
run_time=0.35,
|
| 49 |
+
rate_func=smooth
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
# Optional: final gentle pop to settle logo
|
| 53 |
+
self.play(svg.animate.scale(1.02), run_time=0.15, rate_func=there_and_back)
|
| 54 |
+
self.wait(0.5)
|
| 55 |
+
'''
|
| 56 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 57 |
+
|
| 58 |
+
# def logo_fade_particle(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 59 |
+
# scene_name = "LogoFadeParticle"
|
| 60 |
+
# scene_code = f'''
|
| 61 |
+
# from manim import *
|
| 62 |
+
# class {scene_name}(Scene):
|
| 63 |
+
# def construct(self):
|
| 64 |
+
# svg = SVGMobject("{svg_path.name}")
|
| 65 |
+
# self.play(FadeIn(svg), run_time=0.8)
|
| 66 |
+
# # TODO: Add particles using small dots or flocking
|
| 67 |
+
# self.wait(0.5)
|
| 68 |
+
# '''
|
| 69 |
+
# return {"scene_name": scene_name, "scene_code": scene_code}
|
| 70 |
+
# def logo_fade_particle(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 71 |
+
# scene_name = "LogoFadeParticle"
|
| 72 |
+
# scene_code = f'''
|
| 73 |
+
# from manim import *
|
| 74 |
+
# import random
|
| 75 |
+
|
| 76 |
+
# class {scene_name}(Scene):
|
| 77 |
+
# def construct(self):
|
| 78 |
+
# # Load SVG logo
|
| 79 |
+
# svg = SVGMobject("{svg_path.name}")
|
| 80 |
+
# svg.set(width=6)
|
| 81 |
+
# svg.set_stroke(width=2)
|
| 82 |
+
# svg.set_fill(opacity=1)
|
| 83 |
+
# svg.move_to(ORIGIN)
|
| 84 |
+
# svg.set_opacity(0) # start invisible
|
| 85 |
+
|
| 86 |
+
# # Smooth fade-in with subtle scale pop
|
| 87 |
+
# self.play(svg.animate.set_opacity(1).scale(1.05), run_time=0.8, rate_func=smooth)
|
| 88 |
+
# self.play(svg.animate.scale(0.98), run_time=0.15, rate_func=there_and_back)
|
| 89 |
+
|
| 90 |
+
# # Particle effect from the logo edges
|
| 91 |
+
# points = svg.get_all_points()
|
| 92 |
+
# particles = VGroup(*[Dot(point, radius=0.04, color=WHITE, fill_opacity=0.6) for point in points])
|
| 93 |
+
|
| 94 |
+
# # Animate particles floating outward
|
| 95 |
+
# for particle in particles:
|
| 96 |
+
# offset = [random.uniform(-1,1), random.uniform(-1,1), 0]
|
| 97 |
+
# self.play(particle.animate.shift(offset).set_opacity(0), run_time=random.uniform(0.5,1.0), rate_func=smooth, lag_ratio=0.1)
|
| 98 |
+
|
| 99 |
+
# self.wait(0.5)
|
| 100 |
+
# '''
|
| 101 |
+
# return {"scene_name": scene_name, "scene_code": scene_code}
|
| 102 |
+
# def logo_fade_particle(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 103 |
+
# scene_name = "LogoParticleBreak"
|
| 104 |
+
|
| 105 |
+
# scene_code = f"""
|
| 106 |
+
# from manim import *
|
| 107 |
+
# import random
|
| 108 |
+
|
| 109 |
+
# class {scene_name}(Scene):
|
| 110 |
+
# def construct(self):
|
| 111 |
+
# # Load logo
|
| 112 |
+
# svg = SVGMobject(r"{svg_path.name}")
|
| 113 |
+
# svg.set(width=6)
|
| 114 |
+
# svg.set_stroke(width=2)
|
| 115 |
+
# svg.set_fill(opacity=1)
|
| 116 |
+
# svg.move_to(ORIGIN)
|
| 117 |
+
# svg.set_opacity(0)
|
| 118 |
+
|
| 119 |
+
# # Fade-in + pop
|
| 120 |
+
# self.play(
|
| 121 |
+
# svg.animate.set_opacity(1).scale(1.05),
|
| 122 |
+
# run_time=0.8,
|
| 123 |
+
# rate_func=smooth
|
| 124 |
+
# )
|
| 125 |
+
# self.play(
|
| 126 |
+
# svg.animate.scale(0.97),
|
| 127 |
+
# run_time=0.25,
|
| 128 |
+
# rate_func=there_and_back
|
| 129 |
+
# )
|
| 130 |
+
|
| 131 |
+
# # Sample SVG points
|
| 132 |
+
# raw_points = svg.get_all_points()
|
| 133 |
+
# points = raw_points[::2] # skip every 2nd for density + speed
|
| 134 |
+
|
| 135 |
+
# # Create particles on edges
|
| 136 |
+
# particles = VGroup(
|
| 137 |
+
# *[
|
| 138 |
+
# Dot(point, radius=0.035, color=WHITE, fill_opacity=0.9)
|
| 139 |
+
# for point in points
|
| 140 |
+
# ]
|
| 141 |
+
# )
|
| 142 |
+
# particles.set_opacity(0)
|
| 143 |
+
# self.add(particles)
|
| 144 |
+
|
| 145 |
+
# # Dissolve logo into particles
|
| 146 |
+
# self.play(
|
| 147 |
+
# svg.animate.set_opacity(0),
|
| 148 |
+
# particles.animate.set_opacity(1),
|
| 149 |
+
# run_time=0.6,
|
| 150 |
+
# rate_func=smooth
|
| 151 |
+
# )
|
| 152 |
+
|
| 153 |
+
# # Outward particle spread
|
| 154 |
+
# anims = []
|
| 155 |
+
# for p in particles:
|
| 156 |
+
# offset = [
|
| 157 |
+
# random.uniform(-1.2, 1.2),
|
| 158 |
+
# random.uniform(-1.2, 1.2),
|
| 159 |
+
# 0
|
| 160 |
+
# ]
|
| 161 |
+
# anims.append(p.animate.shift(offset).set_opacity(0))
|
| 162 |
+
|
| 163 |
+
# self.play(
|
| 164 |
+
# LaggedStart(*anims, lag_ratio=0.008),
|
| 165 |
+
# run_time=1.3,
|
| 166 |
+
# rate_func=smooth
|
| 167 |
+
# )
|
| 168 |
+
|
| 169 |
+
# self.wait(0.3)
|
| 170 |
+
# """
|
| 171 |
+
|
| 172 |
+
# return {"scene_name": scene_name, "scene_code": scene_code}
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
# def logo_fade_particle(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 177 |
+
# scene_name = "LogoParticleBreak"
|
| 178 |
+
|
| 179 |
+
# scene_code = f"""
|
| 180 |
+
# from manim import *
|
| 181 |
+
# import random
|
| 182 |
+
|
| 183 |
+
# class {scene_name}(Scene):
|
| 184 |
+
# def construct(self):
|
| 185 |
+
# # Load SVG as grouped paths
|
| 186 |
+
# svg = SVGMobject(r"{svg_path.name}")
|
| 187 |
+
# svg.set(width=6)
|
| 188 |
+
# svg.set_stroke(width=2)
|
| 189 |
+
# svg.set_fill(opacity=1)
|
| 190 |
+
# svg.move_to(ORIGIN)
|
| 191 |
+
# svg.set_opacity(0)
|
| 192 |
+
|
| 193 |
+
# # Fade-in + pop
|
| 194 |
+
# self.play(svg.animate.set_opacity(1).scale(1.05), run_time=0.8, rate_func=smooth)
|
| 195 |
+
# self.play(svg.animate.scale(0.97), run_time=0.25, rate_func=there_and_back)
|
| 196 |
+
|
| 197 |
+
# # Collect points & colors from subobjects
|
| 198 |
+
# points = []
|
| 199 |
+
# colors = []
|
| 200 |
+
|
| 201 |
+
# for sub in svg.family_members_with_points():
|
| 202 |
+
# pts = sub.get_points()
|
| 203 |
+
# col = sub.get_fill_color()
|
| 204 |
+
# for p in pts[::3]: # skip for smoother density
|
| 205 |
+
# points.append(p)
|
| 206 |
+
# colors.append(col)
|
| 207 |
+
|
| 208 |
+
# # Create cinematic glowing particles
|
| 209 |
+
# particles = VGroup()
|
| 210 |
+
# for p, col in zip(points, colors):
|
| 211 |
+
# part = Circle(radius=0.03, color=col, fill_color=col, fill_opacity=1)
|
| 212 |
+
# part.move_to(p)
|
| 213 |
+
# part.set_opacity(0)
|
| 214 |
+
# part.scale(random.uniform(0.6, 1.2)) # natural variation
|
| 215 |
+
# particles.add(part)
|
| 216 |
+
|
| 217 |
+
# self.add(particles)
|
| 218 |
+
|
| 219 |
+
# # Logo dissolves β particles appear
|
| 220 |
+
# self.play(
|
| 221 |
+
# svg.animate.set_opacity(0),
|
| 222 |
+
# particles.animate.set_opacity(1),
|
| 223 |
+
# run_time=0.5,
|
| 224 |
+
# rate_func=smooth,
|
| 225 |
+
# )
|
| 226 |
+
|
| 227 |
+
# # Particle outward scatter
|
| 228 |
+
# anims = []
|
| 229 |
+
# for p in particles:
|
| 230 |
+
# offset = [
|
| 231 |
+
# random.uniform(-1.3, 1.3),
|
| 232 |
+
# random.uniform(-1.3, 1.3),
|
| 233 |
+
# 0
|
| 234 |
+
# ]
|
| 235 |
+
# anims.append(
|
| 236 |
+
# p.animate.shift(offset)
|
| 237 |
+
# .scale(random.uniform(0.6, 1.8))
|
| 238 |
+
# .set_opacity(0)
|
| 239 |
+
# )
|
| 240 |
+
|
| 241 |
+
# self.play(
|
| 242 |
+
# LaggedStart(*anims, lag_ratio=0.006),
|
| 243 |
+
# run_time=1.4,
|
| 244 |
+
# rate_func=smooth
|
| 245 |
+
# )
|
| 246 |
+
|
| 247 |
+
# self.wait(0.3)
|
| 248 |
+
# """
|
| 249 |
+
|
| 250 |
+
# return {"scene_name": scene_name, "scene_code": scene_code}
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
# def logo_fade_particle(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 256 |
+
# scene_name = "LogoParticlesFullPoints"
|
| 257 |
+
|
| 258 |
+
# scene_code = f"""
|
| 259 |
+
# from manim import *
|
| 260 |
+
# import numpy as np
|
| 261 |
+
# import random
|
| 262 |
+
|
| 263 |
+
# class {scene_name}(Scene):
|
| 264 |
+
# def construct(self):
|
| 265 |
+
# # Load SVG
|
| 266 |
+
# svg = SVGMobject(r"{svg_path.name}")
|
| 267 |
+
# svg.set(width=6)
|
| 268 |
+
# svg.set_stroke(width=2)
|
| 269 |
+
# svg.set_fill(opacity=1)
|
| 270 |
+
# svg.move_to(ORIGIN)
|
| 271 |
+
# svg.scale(0.8) # start smaller for pop effect
|
| 272 |
+
# svg.set_opacity(0)
|
| 273 |
+
|
| 274 |
+
# # Smooth fade-in with tiny pop
|
| 275 |
+
# self.play(
|
| 276 |
+
# svg.animate.set_opacity(1).scale(1.05),
|
| 277 |
+
# run_time=0.8,
|
| 278 |
+
# rate_func=smooth
|
| 279 |
+
# )
|
| 280 |
+
# self.play(svg.animate.scale(0.97), run_time=0.2, rate_func=smooth)
|
| 281 |
+
|
| 282 |
+
# # Use all points from the entire SVG
|
| 283 |
+
# points, colors = [], []
|
| 284 |
+
# for sub in svg.family_members_with_points():
|
| 285 |
+
# pts = sub.get_all_points() # all points of the subpath
|
| 286 |
+
# if len(pts) == 0:
|
| 287 |
+
# continue
|
| 288 |
+
# col = sub.get_fill_color()
|
| 289 |
+
# for p in pts:
|
| 290 |
+
# points.append(p)
|
| 291 |
+
# colors.append(col)
|
| 292 |
+
|
| 293 |
+
# # Create particles at all points
|
| 294 |
+
# particles = VGroup()
|
| 295 |
+
# for p, col in zip(points, colors):
|
| 296 |
+
# part = Dot(point=p, radius=0.03, color=col)
|
| 297 |
+
# part.set_opacity(1) # start visible
|
| 298 |
+
# particles.add(part)
|
| 299 |
+
|
| 300 |
+
# self.add(particles)
|
| 301 |
+
|
| 302 |
+
# # Transform SVG into particles (SVG fades as particles scatter)
|
| 303 |
+
# scatter_anims = []
|
| 304 |
+
# for part in particles:
|
| 305 |
+
# offset = np.array([
|
| 306 |
+
# random.uniform(-1.5, 1.5),
|
| 307 |
+
# random.uniform(-1.5, 1.5),
|
| 308 |
+
# 0
|
| 309 |
+
# ])
|
| 310 |
+
# scatter_anims.append(
|
| 311 |
+
# part.animate.shift(offset)
|
| 312 |
+
# .scale(random.uniform(0.5, 1.5))
|
| 313 |
+
# .set_opacity(0)
|
| 314 |
+
# )
|
| 315 |
+
|
| 316 |
+
# self.play(
|
| 317 |
+
# svg.animate.set_opacity(0),
|
| 318 |
+
# LaggedStart(*scatter_anims, lag_ratio=0.003),
|
| 319 |
+
# run_time=1.5,
|
| 320 |
+
# rate_func=smooth
|
| 321 |
+
# )
|
| 322 |
+
|
| 323 |
+
# self.wait(0.5)
|
| 324 |
+
# """
|
| 325 |
+
|
| 326 |
+
# return {"scene_name": scene_name, "scene_code": scene_code}
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
def logo_fade_particle(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 330 |
+
scene_name = "LogoParticlesBlast"
|
| 331 |
+
|
| 332 |
+
scene_code = f"""
|
| 333 |
+
from manim import *
|
| 334 |
+
import numpy as np
|
| 335 |
+
import random
|
| 336 |
+
|
| 337 |
+
class {scene_name}(Scene):
|
| 338 |
+
def construct(self):
|
| 339 |
+
# Load SVG
|
| 340 |
+
svg = SVGMobject(r"{svg_path.name}")
|
| 341 |
+
svg.set(width=6)
|
| 342 |
+
svg.set_stroke(width=2)
|
| 343 |
+
svg.set_fill(opacity=1)
|
| 344 |
+
svg.move_to(ORIGIN)
|
| 345 |
+
svg.scale(0.8) # start smaller for pop effect
|
| 346 |
+
svg.set_opacity(0)
|
| 347 |
+
|
| 348 |
+
# Smooth fade-in with tiny pop
|
| 349 |
+
self.play(
|
| 350 |
+
svg.animate.set_opacity(1).scale(1.05),
|
| 351 |
+
run_time=0.8,
|
| 352 |
+
rate_func=smooth
|
| 353 |
+
)
|
| 354 |
+
self.play(svg.animate.scale(0.97), run_time=0.2, rate_func=smooth)
|
| 355 |
+
|
| 356 |
+
# Use all points from the SVG
|
| 357 |
+
points, colors = [], []
|
| 358 |
+
for sub in svg.family_members_with_points():
|
| 359 |
+
pts = sub.get_all_points()
|
| 360 |
+
if len(pts) == 0:
|
| 361 |
+
continue
|
| 362 |
+
col = sub.get_fill_color()
|
| 363 |
+
for p in pts:
|
| 364 |
+
points.append(p)
|
| 365 |
+
colors.append(col)
|
| 366 |
+
|
| 367 |
+
# Create particles at all points
|
| 368 |
+
particles = VGroup()
|
| 369 |
+
for p, col in zip(points, colors):
|
| 370 |
+
part = Dot(point=p, radius=0.03, color=col)
|
| 371 |
+
part.set_opacity(1)
|
| 372 |
+
particles.add(part)
|
| 373 |
+
|
| 374 |
+
self.add(particles)
|
| 375 |
+
|
| 376 |
+
# SVG transforms into particles with blast effect
|
| 377 |
+
blast_anims = []
|
| 378 |
+
for part in particles:
|
| 379 |
+
# Random direction vector
|
| 380 |
+
angle = random.uniform(0, 2 * np.pi)
|
| 381 |
+
distance = random.uniform(1.0, 3.0) # how far it flies
|
| 382 |
+
offset = np.array([distance * np.cos(angle),
|
| 383 |
+
distance * np.sin(angle),
|
| 384 |
+
random.uniform(-0.3, 0.3)])
|
| 385 |
+
# Particle animation: move, fade, and scale
|
| 386 |
+
blast_anims.append(
|
| 387 |
+
part.animate.shift(offset)
|
| 388 |
+
.scale(random.uniform(0.5, 1.5))
|
| 389 |
+
.set_opacity(0)
|
| 390 |
+
)
|
| 391 |
+
|
| 392 |
+
# Play SVG fade + particle blast together
|
| 393 |
+
self.play(
|
| 394 |
+
svg.animate.set_opacity(0),
|
| 395 |
+
LaggedStart(*blast_anims, lag_ratio=0.002),
|
| 396 |
+
run_time=2.0, # slow motion blast
|
| 397 |
+
rate_func=smooth
|
| 398 |
+
)
|
| 399 |
+
|
| 400 |
+
self.wait(0.5)
|
| 401 |
+
"""
|
| 402 |
+
|
| 403 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 404 |
+
|
| 405 |
+
def logo_spin_scale(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 406 |
+
scene_name = "LogoSpinScale"
|
| 407 |
+
scene_code = f'''
|
| 408 |
+
from manim import *
|
| 409 |
+
class {scene_name}(Scene):
|
| 410 |
+
def construct(self):
|
| 411 |
+
svg = SVGMobject("{svg_path.name}")
|
| 412 |
+
svg.scale(0.5)
|
| 413 |
+
self.play(Rotate(svg, angle=TAU), svg.animate.scale(1.2), run_time=1)
|
| 414 |
+
self.wait(0.5)
|
| 415 |
+
'''
|
| 416 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 417 |
+
|
| 418 |
+
def logo_stroke_draw(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 419 |
+
scene_name = "LogoStrokeDraw"
|
| 420 |
+
scene_code = f'''
|
| 421 |
+
from manim import *
|
| 422 |
+
class {scene_name}(Scene):
|
| 423 |
+
def construct(self):
|
| 424 |
+
# TODO: Animate stroke drawing by tracing paths
|
| 425 |
+
svg = SVGMobject("{svg_path.name}")
|
| 426 |
+
self.play(Write(svg), run_time=1)
|
| 427 |
+
self.wait(0.5)
|
| 428 |
+
'''
|
| 429 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 430 |
+
|
| 431 |
+
def logo_glitch(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 432 |
+
scene_name = "LogoGlitch"
|
| 433 |
+
scene_code = f'''
|
| 434 |
+
from manim import *
|
| 435 |
+
class {scene_name}(Scene):
|
| 436 |
+
def construct(self):
|
| 437 |
+
svg = SVGMobject("{svg_path.name}")
|
| 438 |
+
self.play(FadeIn(svg), run_time=0.4)
|
| 439 |
+
# TODO: Implement quick offset copies to simulate glitch
|
| 440 |
+
self.wait(0.5)
|
| 441 |
+
'''
|
| 442 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
core/overlay_animations.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/core/animations/overlay_animations.py
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from typing import Dict
|
| 4 |
+
|
| 5 |
+
def confetti(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 6 |
+
scene_name = "Confetti"
|
| 7 |
+
scene_code = f'''
|
| 8 |
+
from manim import *
|
| 9 |
+
class {scene_name}(Scene):
|
| 10 |
+
def construct(self):
|
| 11 |
+
# TODO: implement confetti using many small shapes with gravity-like animation
|
| 12 |
+
self.wait(0.5)
|
| 13 |
+
'''
|
| 14 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 15 |
+
|
| 16 |
+
def checkmark_tick(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 17 |
+
scene_name = "CheckmarkTick"
|
| 18 |
+
scene_code = f'''
|
| 19 |
+
from manim import *
|
| 20 |
+
class {scene_name}(Scene):
|
| 21 |
+
def construct(self):
|
| 22 |
+
# TODO: show checkmark animation
|
| 23 |
+
self.wait(0.5)
|
| 24 |
+
'''
|
| 25 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
core/pipeline.py
ADDED
|
@@ -0,0 +1,1070 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# # app/core/pipeline.py
|
| 2 |
+
# import os
|
| 3 |
+
# import shutil
|
| 4 |
+
# import subprocess
|
| 5 |
+
# import sys
|
| 6 |
+
# import tempfile
|
| 7 |
+
# import time
|
| 8 |
+
# from pathlib import Path
|
| 9 |
+
# from typing import Dict, Optional, Tuple
|
| 10 |
+
|
| 11 |
+
# from core.vectorizer import vectorize_image
|
| 12 |
+
|
| 13 |
+
# # Import animation modules (each file inside core/)
|
| 14 |
+
# import core.text_animations as text_animations
|
| 15 |
+
# import core.logo_animations as logo_animations
|
| 16 |
+
# import core.shape_animations as shape_animations
|
| 17 |
+
# import core.fx_animations as fx_animations
|
| 18 |
+
# import core.transitions as transitions
|
| 19 |
+
# import core.infographic_animations as infographic_animations
|
| 20 |
+
# import core.character_animations as character_animations
|
| 21 |
+
# import core.background_animations as background_animations
|
| 22 |
+
# import core.overlay_animations as overlay_animations
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
# # Global outputs dir (stable)
|
| 26 |
+
# GLOBAL_OUTPUTS_DIR = Path(tempfile.gettempdir()) / "manim_render_service" / "outputs"
|
| 27 |
+
# GLOBAL_OUTPUTS_DIR.mkdir(parents=True, exist_ok=True)
|
| 28 |
+
|
| 29 |
+
# # Manim installation directory under tmp (pip --target here)
|
| 30 |
+
# MANIM_TARGET_DIR = Path(tempfile.gettempdir()) / "manim_render_service" / "manim_env"
|
| 31 |
+
# MANIM_TARGET_DIR.mkdir(parents=True, exist_ok=True)
|
| 32 |
+
|
| 33 |
+
# # Map style strings to animation functions across modules.
|
| 34 |
+
# STYLE_MAP = {
|
| 35 |
+
# # Text basics
|
| 36 |
+
# "fade-in": text_animations.fade_in,
|
| 37 |
+
# "slide-in-left": text_animations.slide_in_left,
|
| 38 |
+
# "pop-bounce": text_animations.pop_bounce,
|
| 39 |
+
# "zoom-in": text_animations.zoom_in,
|
| 40 |
+
# "typewriter": text_animations.typewriter,
|
| 41 |
+
# "wipe": text_animations.wipe_mask,
|
| 42 |
+
# "flip": text_animations.flip_rotate,
|
| 43 |
+
# "blur-in": text_animations.blur_in,
|
| 44 |
+
# "scale-up": text_animations.scale_up,
|
| 45 |
+
# # Text premium
|
| 46 |
+
# "neon-glow": text_animations.neon_glow,
|
| 47 |
+
# "gradient-fill": text_animations.gradient_fill,
|
| 48 |
+
# "wave-ripple": text_animations.wave_ripple,
|
| 49 |
+
# "split-text": text_animations.split_text,
|
| 50 |
+
# # Logo
|
| 51 |
+
# "logo-build": logo_animations.logo_build_lines,
|
| 52 |
+
# "logo-particle": logo_animations.logo_fade_particle,
|
| 53 |
+
# "logo-spin": logo_animations.logo_spin_scale,
|
| 54 |
+
# "logo-stroke": logo_animations.logo_stroke_draw,
|
| 55 |
+
# "logo-glitch": logo_animations.logo_glitch,
|
| 56 |
+
# # Shapes
|
| 57 |
+
# "line-draw": shape_animations.line_draw,
|
| 58 |
+
# "shape-morph": shape_animations.shape_morph,
|
| 59 |
+
# "grow-center": shape_animations.grow_center,
|
| 60 |
+
# "floating": shape_animations.floating_bounce,
|
| 61 |
+
# # FX
|
| 62 |
+
# "particle-burst": fx_animations.particle_burst,
|
| 63 |
+
# "smoke": fx_animations.smoke_effect,
|
| 64 |
+
# "light-rays": fx_animations.light_rays,
|
| 65 |
+
# # Transitions & infographic & character etc (examples)
|
| 66 |
+
# "fade-transition": transitions.fade_transition,
|
| 67 |
+
# "bar-chart": infographic_animations.bar_chart_grow,
|
| 68 |
+
# "count-number": infographic_animations.number_count,
|
| 69 |
+
# "walk-cycle": character_animations.walk_cycle,
|
| 70 |
+
# "bobbing": character_animations.bobbing,
|
| 71 |
+
# "animated-gradient": background_animations.animated_gradient,
|
| 72 |
+
# "particle-motion": background_animations.particle_motion,
|
| 73 |
+
# "confetti": overlay_animations.confetti,
|
| 74 |
+
# "checkmark": overlay_animations.checkmark_tick,
|
| 75 |
+
# }
|
| 76 |
+
|
| 77 |
+
# # Helper: detect manim in PATH or in current python env
|
| 78 |
+
# def _manim_executable_available() -> bool:
|
| 79 |
+
# # 1) check for manim binary in PATH
|
| 80 |
+
# if shutil.which("manim"):
|
| 81 |
+
# return True
|
| 82 |
+
# # 2) check if importable in current python env
|
| 83 |
+
# try:
|
| 84 |
+
# import manim # noqa: F401
|
| 85 |
+
# return True
|
| 86 |
+
# except Exception:
|
| 87 |
+
# return False
|
| 88 |
+
|
| 89 |
+
# def _ensure_manim_installed(log_file: Path) -> tuple[bool, str]:
|
| 90 |
+
# """
|
| 91 |
+
# Ensure Manim is available. Strategy:
|
| 92 |
+
# - If system has manim in PATH or importable, OK.
|
| 93 |
+
# - Otherwise, run pip install --target MANIM_TARGET_DIR manim
|
| 94 |
+
# and set PYTHONPATH to include MANIM_TARGET_DIR when invoking manim via `python -m manim`.
|
| 95 |
+
# Returns (success, message). Writes pip logs to log_file.
|
| 96 |
+
# NOTE: pip install requires network and may take time; call this once at startup or first run.
|
| 97 |
+
# """
|
| 98 |
+
# if _manim_executable_available():
|
| 99 |
+
# return True, "manim-available"
|
| 100 |
+
|
| 101 |
+
# try:
|
| 102 |
+
# log_file.parent.mkdir(parents=True, exist_ok=True)
|
| 103 |
+
# with log_file.open("ab") as lf:
|
| 104 |
+
# # pip install into target dir
|
| 105 |
+
# cmd = [sys.executable, "-m", "pip", "install", "--upgrade", "--target", str(MANIM_TARGET_DIR), "manim"]
|
| 106 |
+
# proc = subprocess.run(cmd, stdout=lf, stderr=lf, check=False, timeout=1800)
|
| 107 |
+
# if proc.returncode != 0:
|
| 108 |
+
# return False, f"pip-install-failed:{proc.returncode}"
|
| 109 |
+
# except Exception as e:
|
| 110 |
+
# return False, f"pip-install-exception:{e}"
|
| 111 |
+
|
| 112 |
+
# # final check
|
| 113 |
+
# if _manim_executable_available():
|
| 114 |
+
# return True, "manim-installed-system"
|
| 115 |
+
# # Even if not on PATH, we can still run via python -m manim with PYTHONPATH pointing to MANIM_TARGET_DIR
|
| 116 |
+
# return True, "manim-installed-target"
|
| 117 |
+
|
| 118 |
+
# def _run_manim(scene_py_path: Path, scene_class: str, work_dir: Path, log_file: Path, quality_flag: str = "-ql", timeout: int = 300) -> Optional[Path]:
|
| 119 |
+
# """
|
| 120 |
+
# Run manim to render the scene class from scene_py_path inside work_dir.
|
| 121 |
+
# - Uses python -m manim with PYTHONPATH including MANIM_TARGET_DIR to pick up target install.
|
| 122 |
+
# - quality_flag example: "-ql" quick low, "-qm" medium, "-qh" high.
|
| 123 |
+
# - Always uses "-t" for transparent background.
|
| 124 |
+
# Returns Path to produced mp4 or None on failure.
|
| 125 |
+
# """
|
| 126 |
+
# # Build base command. prefer system manim if available (shutil.which)
|
| 127 |
+
# if shutil.which("manim"):
|
| 128 |
+
# base = ["manim"]
|
| 129 |
+
# else:
|
| 130 |
+
# base = [sys.executable, "-m", "manim"]
|
| 131 |
+
# # Ensure output name deterministic: use scene_py stem + scene_class
|
| 132 |
+
# out_name = f"{scene_py_path.stem}_{scene_class}"
|
| 133 |
+
# cmd = [*base, quality_flag, "-t", str(scene_py_path), scene_class, "-o", out_name]
|
| 134 |
+
# env = os.environ.copy()
|
| 135 |
+
# # include MANIM_TARGET_DIR in PYTHONPATH so python -m manim can import if installed with --target
|
| 136 |
+
# env_py = env.get("PYTHONPATH", "")
|
| 137 |
+
# new_py = str(MANIM_TARGET_DIR)
|
| 138 |
+
# if env_py:
|
| 139 |
+
# new_py = new_py + os.pathsep + env_py
|
| 140 |
+
# env["PYTHONPATH"] = new_py
|
| 141 |
+
|
| 142 |
+
# try:
|
| 143 |
+
# with log_file.open("ab") as lf:
|
| 144 |
+
# start = time.time()
|
| 145 |
+
# proc = subprocess.run(cmd, cwd=str(work_dir), env=env, stdout=lf, stderr=lf, timeout=timeout)
|
| 146 |
+
# elapsed = time.time() - start
|
| 147 |
+
# if proc.returncode != 0:
|
| 148 |
+
# return None
|
| 149 |
+
# except Exception:
|
| 150 |
+
# return None
|
| 151 |
+
|
| 152 |
+
# # search for produced mp4 under work_dir (manim writes into media/videos/... usually)
|
| 153 |
+
# mp4s = list(work_dir.rglob("*.mp4"))
|
| 154 |
+
# if not mp4s:
|
| 155 |
+
# return None
|
| 156 |
+
# mp4s.sort(key=lambda p: p.stat().st_mtime, reverse=True)
|
| 157 |
+
# return mp4s[0]
|
| 158 |
+
|
| 159 |
+
# def process_image_pipeline(task_meta: Dict) -> Dict:
|
| 160 |
+
# """
|
| 161 |
+
# Synchronous pipeline entry. Steps:
|
| 162 |
+
# - set up task_dir/working/outputs/logs
|
| 163 |
+
# - vectorize input image -> svg
|
| 164 |
+
# - call animation placeholder (get scene_code)
|
| 165 |
+
# - write scene file & copy svg into working dir
|
| 166 |
+
# - ensure manim installed (pip --target if needed)
|
| 167 |
+
# - run manim with -t -> get mp4
|
| 168 |
+
# - copy mp4 to GLOBAL_OUTPUTS_DIR and return path
|
| 169 |
+
# """
|
| 170 |
+
# task_id = task_meta.get("task_id")
|
| 171 |
+
# task_dir = Path(task_meta.get("task_dir", ""))
|
| 172 |
+
# if not task_id or not task_dir.exists():
|
| 173 |
+
# return {"success": False, "error": "invalid-task"}
|
| 174 |
+
|
| 175 |
+
# working = task_dir / "working"
|
| 176 |
+
# outputs = task_dir / "outputs"
|
| 177 |
+
# logs = task_dir / "logs"
|
| 178 |
+
# working.mkdir(parents=True, exist_ok=True)
|
| 179 |
+
# outputs.mkdir(parents=True, exist_ok=True)
|
| 180 |
+
# logs.mkdir(parents=True, exist_ok=True)
|
| 181 |
+
|
| 182 |
+
# log_file = logs / f"{task_id}.log"
|
| 183 |
+
|
| 184 |
+
# try:
|
| 185 |
+
# # 1) copy input to working folder
|
| 186 |
+
# input_image = Path(task_meta.get("input_image"))
|
| 187 |
+
# if not input_image.exists():
|
| 188 |
+
# return {"success": False, "error": "input-missing"}
|
| 189 |
+
|
| 190 |
+
# safe_input = working / input_image.name
|
| 191 |
+
# shutil.copy2(input_image, safe_input)
|
| 192 |
+
|
| 193 |
+
# # 2) vectorize -> svg
|
| 194 |
+
# svg_path = working / f"{task_id}.svg"
|
| 195 |
+
# vec_res = vectorize_image(safe_input, svg_path, options={"quality": task_meta.get("quality")})
|
| 196 |
+
# if not vec_res.get("success"):
|
| 197 |
+
# return {"success": False, "error": f"vectorize-failed:{vec_res.get('error')}"}
|
| 198 |
+
|
| 199 |
+
# # 3) pick animation function
|
| 200 |
+
# style = task_meta.get("style", "fade-in")
|
| 201 |
+
# anim_fn = STYLE_MAP.get(style)
|
| 202 |
+
# if anim_fn is None:
|
| 203 |
+
# # fallback to fade-in
|
| 204 |
+
# anim_fn = text_animations.fade_in
|
| 205 |
+
|
| 206 |
+
# # 4) animation function returns scene info (name + code)
|
| 207 |
+
# scene_info = anim_fn(svg_path, working, task_meta)
|
| 208 |
+
# if not scene_info or "scene_name" not in scene_info or "scene_code" not in scene_info:
|
| 209 |
+
# return {"success": False, "error": "animation-fn-invalid"}
|
| 210 |
+
|
| 211 |
+
# scene_name = scene_info["scene_name"]
|
| 212 |
+
# scene_code = scene_info["scene_code"]
|
| 213 |
+
|
| 214 |
+
# # 5) write scene python file and ensure svg is in same folder
|
| 215 |
+
# scene_py = working / f"scene_{task_id}.py"
|
| 216 |
+
# scene_py.write_text(scene_code, encoding="utf-8")
|
| 217 |
+
# # ensure svg copied into working dir
|
| 218 |
+
# try:
|
| 219 |
+
# shutil.copy2(svg_path, working / svg_path.name)
|
| 220 |
+
# except Exception:
|
| 221 |
+
# pass
|
| 222 |
+
|
| 223 |
+
# # 6) ensure manim is available (installs into MANIM_TARGET_DIR if not)
|
| 224 |
+
# ok, msg = _ensure_manim_installed(log_file)
|
| 225 |
+
# if not ok:
|
| 226 |
+
# return {"success": False, "error": f"manim-install-failed:{msg}"}
|
| 227 |
+
|
| 228 |
+
# # 7) run manim to render scene with transparent background
|
| 229 |
+
# # quality: map task_meta quality -> flag
|
| 230 |
+
# q = task_meta.get("quality", "preview")
|
| 231 |
+
# quality_flag = "-ql" if q == "preview" else "-qh" if q == "final" else "-qm"
|
| 232 |
+
# rendered_mp4 = _run_manim(scene_py, scene_name, working, log_file, quality_flag=quality_flag, timeout=900)
|
| 233 |
+
# if not rendered_mp4:
|
| 234 |
+
# # attach log path for debugging
|
| 235 |
+
# return {"success": False, "error": "manim-render-failed", "log": str(log_file)}
|
| 236 |
+
|
| 237 |
+
# # 8) copy to GLOBAL_OUTPUTS_DIR
|
| 238 |
+
# dest = GLOBAL_OUTPUTS_DIR / f"{task_id}_{rendered_mp4.name}"
|
| 239 |
+
# try:
|
| 240 |
+
# if dest.exists():
|
| 241 |
+
# dest.unlink()
|
| 242 |
+
# shutil.copy2(rendered_mp4, dest)
|
| 243 |
+
# except Exception as e:
|
| 244 |
+
# return {"success": False, "error": f"copy-failed:{e}", "log": str(log_file)}
|
| 245 |
+
|
| 246 |
+
# # optional: cleanup working to save disk
|
| 247 |
+
# try:
|
| 248 |
+
# shutil.rmtree(working)
|
| 249 |
+
# except Exception:
|
| 250 |
+
# pass
|
| 251 |
+
|
| 252 |
+
# return {"success": True, "output_path": str(dest), "log": str(log_file)}
|
| 253 |
+
|
| 254 |
+
# except Exception as e:
|
| 255 |
+
# # keep task_dir for debug
|
| 256 |
+
# return {"success": False, "error": str(e), "log": str(log_file)}
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
# # app/core/pipeline.py
|
| 262 |
+
# import os
|
| 263 |
+
# import shutil
|
| 264 |
+
# import subprocess
|
| 265 |
+
# import sys
|
| 266 |
+
# import tempfile
|
| 267 |
+
# import time
|
| 268 |
+
# from pathlib import Path
|
| 269 |
+
# from typing import Dict, Optional, Tuple
|
| 270 |
+
|
| 271 |
+
# from core.vectorizer import vectorize_image
|
| 272 |
+
|
| 273 |
+
# # Import animation modules (each file inside core/)
|
| 274 |
+
# import core.text_animations as text_animations
|
| 275 |
+
# import core.logo_animations as logo_animations
|
| 276 |
+
# import core.shape_animations as shape_animations
|
| 277 |
+
# import core.fx_animations as fx_animations
|
| 278 |
+
# import core.transitions as transitions
|
| 279 |
+
# import core.infographic_animations as infographic_animations
|
| 280 |
+
# import core.character_animations as character_animations
|
| 281 |
+
# import core.background_animations as background_animations
|
| 282 |
+
# import core.overlay_animations as overlay_animations
|
| 283 |
+
|
| 284 |
+
# # Global outputs dir (stable)
|
| 285 |
+
# GLOBAL_OUTPUTS_DIR = Path(tempfile.gettempdir()) / "manim_render_service" / "outputs"
|
| 286 |
+
# GLOBAL_OUTPUTS_DIR.mkdir(parents=True, exist_ok=True)
|
| 287 |
+
|
| 288 |
+
# # Map style strings to animation functions across modules.
|
| 289 |
+
# STYLE_MAP = {
|
| 290 |
+
# # Text basics
|
| 291 |
+
# "fade-in": text_animations.fade_in,
|
| 292 |
+
# "slide-in-left": text_animations.slide_in_left,
|
| 293 |
+
# "pop-bounce": text_animations.pop_bounce,
|
| 294 |
+
# "zoom-in": text_animations.zoom_in,
|
| 295 |
+
# "typewriter": text_animations.typewriter,
|
| 296 |
+
# "wipe": text_animations.wipe_mask,
|
| 297 |
+
# "flip": text_animations.flip_rotate,
|
| 298 |
+
# "blur-in": text_animations.blur_in,
|
| 299 |
+
# "scale-up": text_animations.scale_up,
|
| 300 |
+
# # Text premium
|
| 301 |
+
# "neon-glow": text_animations.neon_glow,
|
| 302 |
+
# "gradient-fill": text_animations.gradient_fill,
|
| 303 |
+
# "wave-ripple": text_animations.wave_ripple,
|
| 304 |
+
# "split-text": text_animations.split_text,
|
| 305 |
+
# # Logo
|
| 306 |
+
# "logo-build": logo_animations.logo_build_lines,
|
| 307 |
+
# "logo-particle": logo_animations.logo_fade_particle,
|
| 308 |
+
# "logo-spin": logo_animations.logo_spin_scale,
|
| 309 |
+
# "logo-stroke": logo_animations.logo_stroke_draw,
|
| 310 |
+
# "logo-glitch": logo_animations.logo_glitch,
|
| 311 |
+
# # Shapes
|
| 312 |
+
# "line-draw": shape_animations.line_draw,
|
| 313 |
+
# "shape-morph": shape_animations.shape_morph,
|
| 314 |
+
# "grow-center": shape_animations.grow_center,
|
| 315 |
+
# "floating": shape_animations.floating_bounce,
|
| 316 |
+
# # FX
|
| 317 |
+
# "particle-burst": fx_animations.particle_burst,
|
| 318 |
+
# "smoke": fx_animations.smoke_effect,
|
| 319 |
+
# "light-rays": fx_animations.light_rays,
|
| 320 |
+
# # Transitions & infographic & character etc (examples)
|
| 321 |
+
# "fade-transition": transitions.fade_transition,
|
| 322 |
+
# "bar-chart": infographic_animations.bar_chart_grow,
|
| 323 |
+
# "count-number": infographic_animations.number_count,
|
| 324 |
+
# "walk-cycle": character_animations.walk_cycle,
|
| 325 |
+
# "bobbing": character_animations.bobbing,
|
| 326 |
+
# "animated-gradient": background_animations.animated_gradient,
|
| 327 |
+
# "particle-motion": background_animations.particle_motion,
|
| 328 |
+
# "confetti": overlay_animations.confetti,
|
| 329 |
+
# "checkmark": overlay_animations.checkmark_tick,
|
| 330 |
+
# }
|
| 331 |
+
|
| 332 |
+
# def _run_manim(scene_py_path: Path, scene_class: str, work_dir: Path, log_file: Path, quality_flag: str = "-ql", timeout: int = 300) -> Optional[Path]:
|
| 333 |
+
# """
|
| 334 |
+
# Run manim to render the scene class from scene_py_path inside work_dir.
|
| 335 |
+
# - Uses python -m manim command.
|
| 336 |
+
# - Always uses "-t" for transparent background.
|
| 337 |
+
# Returns Path to produced mp4 or None on failure.
|
| 338 |
+
# """
|
| 339 |
+
# base = [sys.executable, "-m", "manim"]
|
| 340 |
+
# out_name = f"{scene_py_path.stem}_{scene_class}"
|
| 341 |
+
# cmd = [*base, quality_flag, "-t", str(scene_py_path), scene_class, "-o", out_name]
|
| 342 |
+
|
| 343 |
+
# try:
|
| 344 |
+
# with log_file.open("ab") as lf:
|
| 345 |
+
# start = time.time()
|
| 346 |
+
# proc = subprocess.run(cmd, cwd=str(work_dir), stdout=lf, stderr=lf, timeout=timeout)
|
| 347 |
+
# elapsed = time.time() - start
|
| 348 |
+
# if proc.returncode != 0:
|
| 349 |
+
# return None
|
| 350 |
+
# except Exception:
|
| 351 |
+
# return None
|
| 352 |
+
|
| 353 |
+
# mp4s = list(work_dir.rglob("*.mp4"))
|
| 354 |
+
# if not mp4s:
|
| 355 |
+
# return None
|
| 356 |
+
# mp4s.sort(key=lambda p: p.stat().st_mtime, reverse=True)
|
| 357 |
+
# return mp4s[0]
|
| 358 |
+
|
| 359 |
+
# def process_image_pipeline(task_meta: Dict) -> Dict:
|
| 360 |
+
# """
|
| 361 |
+
# Synchronous pipeline entry. Steps:
|
| 362 |
+
# - set up task_dir/working/outputs/logs
|
| 363 |
+
# - vectorize input image -> svg
|
| 364 |
+
# - call animation placeholder (get scene_code)
|
| 365 |
+
# - write scene file & copy svg into working dir
|
| 366 |
+
# - run manim with -t -> get mp4
|
| 367 |
+
# - copy mp4 to GLOBAL_OUTPUTS_DIR and return path
|
| 368 |
+
# """
|
| 369 |
+
# task_id = task_meta.get("task_id")
|
| 370 |
+
# task_dir = Path(task_meta.get("task_dir", ""))
|
| 371 |
+
# if not task_id or not task_dir.exists():
|
| 372 |
+
# return {"success": False, "error": "invalid-task"}
|
| 373 |
+
|
| 374 |
+
# working = task_dir / "working"
|
| 375 |
+
# outputs = task_dir / "outputs"
|
| 376 |
+
# logs = task_dir / "logs"
|
| 377 |
+
# working.mkdir(parents=True, exist_ok=True)
|
| 378 |
+
# outputs.mkdir(parents=True, exist_ok=True)
|
| 379 |
+
# logs.mkdir(parents=True, exist_ok=True)
|
| 380 |
+
|
| 381 |
+
# log_file = logs / f"{task_id}.log"
|
| 382 |
+
|
| 383 |
+
# try:
|
| 384 |
+
# input_image = Path(task_meta.get("input_image"))
|
| 385 |
+
# if not input_image.exists():
|
| 386 |
+
# return {"success": False, "error": "input-missing"}
|
| 387 |
+
|
| 388 |
+
# safe_input = working / input_image.name
|
| 389 |
+
# shutil.copy2(input_image, safe_input)
|
| 390 |
+
|
| 391 |
+
# # Step 1: Prepare vectorization
|
| 392 |
+
# svg_path = working / f"{task_id}.svg"
|
| 393 |
+
|
| 394 |
+
# # Run vectorization β returns {"success": True, "svg": "<svg>...</svg>"}
|
| 395 |
+
# vec_res = vectorize_image(safe_input, options={"quality": task_meta.get("quality")})
|
| 396 |
+
# if not vec_res.get("success"):
|
| 397 |
+
# return {"success": False, "error": f"vectorize-failed:{vec_res.get('error')}"}
|
| 398 |
+
|
| 399 |
+
# # Step 2: Write SVG to file (since later parts of pipeline expect a file path)
|
| 400 |
+
# svg_content = vec_res.get("svg")
|
| 401 |
+
# if not svg_content:
|
| 402 |
+
# return {"success": False, "error": "vectorizer-did-not-return-svg"}
|
| 403 |
+
|
| 404 |
+
# svg_path.write_text(svg_content, encoding="utf-8")
|
| 405 |
+
|
| 406 |
+
# style = task_meta.get("style", "fade-in")
|
| 407 |
+
# anim_fn = STYLE_MAP.get(style, text_animations.fade_in)
|
| 408 |
+
|
| 409 |
+
# scene_info = anim_fn(svg_path, working, task_meta)
|
| 410 |
+
# if not scene_info or "scene_name" not in scene_info or "scene_code" not in scene_info:
|
| 411 |
+
# return {"success": False, "error": "animation-fn-invalid"}
|
| 412 |
+
|
| 413 |
+
# scene_name = scene_info["scene_name"]
|
| 414 |
+
# scene_code = scene_info["scene_code"]
|
| 415 |
+
|
| 416 |
+
# scene_py = working / f"scene_{task_id}.py"
|
| 417 |
+
# scene_py.write_text(scene_code, encoding="utf-8")
|
| 418 |
+
|
| 419 |
+
# try:
|
| 420 |
+
# shutil.copy2(svg_path, working / svg_path.name)
|
| 421 |
+
# except Exception:
|
| 422 |
+
# pass
|
| 423 |
+
|
| 424 |
+
# q = task_meta.get("quality", "preview")
|
| 425 |
+
# quality_flag = "-ql" if q == "preview" else "-qh" if q == "final" else "-qm"
|
| 426 |
+
# rendered_mp4 = _run_manim(scene_py, scene_name, working, log_file, quality_flag=quality_flag, timeout=900)
|
| 427 |
+
# if not rendered_mp4:
|
| 428 |
+
# return {"success": False, "error": "manim-render-failed", "log": str(log_file)}
|
| 429 |
+
|
| 430 |
+
# dest = GLOBAL_OUTPUTS_DIR / f"{task_id}_{rendered_mp4.name}"
|
| 431 |
+
# try:
|
| 432 |
+
# if dest.exists():
|
| 433 |
+
# dest.unlink()
|
| 434 |
+
# shutil.copy2(rendered_mp4, dest)
|
| 435 |
+
# except Exception as e:
|
| 436 |
+
# return {"success": False, "error": f"copy-failed:{e}", "log": str(log_file)}
|
| 437 |
+
|
| 438 |
+
# try:
|
| 439 |
+
# shutil.rmtree(working)
|
| 440 |
+
# except Exception:
|
| 441 |
+
# pass
|
| 442 |
+
|
| 443 |
+
# return {"success": True, "output_path": str(dest), "log": str(log_file)}
|
| 444 |
+
|
| 445 |
+
# except Exception as e:
|
| 446 |
+
# return {"success": False, "error": str(e), "log": str(log_file)}
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
# # app/core/pipeline.py
|
| 451 |
+
# import os
|
| 452 |
+
# import shutil
|
| 453 |
+
# import subprocess
|
| 454 |
+
# import sys
|
| 455 |
+
# import tempfile
|
| 456 |
+
# import time
|
| 457 |
+
# from pathlib import Path
|
| 458 |
+
# from typing import Dict, Optional, Tuple
|
| 459 |
+
|
| 460 |
+
# from core.vectorizer import vectorize_image
|
| 461 |
+
|
| 462 |
+
# # Import animation modules (each file inside core/)
|
| 463 |
+
# import core.text_animations as text_animations
|
| 464 |
+
# import core.logo_animations as logo_animations
|
| 465 |
+
# import core.shape_animations as shape_animations
|
| 466 |
+
# import core.fx_animations as fx_animations
|
| 467 |
+
# import core.transitions as transitions
|
| 468 |
+
# import core.infographic_animations as infographic_animations
|
| 469 |
+
# import core.character_animations as character_animations
|
| 470 |
+
# import core.background_animations as background_animations
|
| 471 |
+
# import core.overlay_animations as overlay_animations
|
| 472 |
+
|
| 473 |
+
# # Global outputs dir (stable)
|
| 474 |
+
# GLOBAL_OUTPUTS_DIR = Path("tmp")/ "manim_render_service" / "outputs"
|
| 475 |
+
# GLOBAL_OUTPUTS_DIR.mkdir(parents=True, exist_ok=True)
|
| 476 |
+
|
| 477 |
+
# # Map style strings to animation functions across modules.
|
| 478 |
+
# STYLE_MAP = {
|
| 479 |
+
# # Text basics
|
| 480 |
+
# "fade-in": text_animations.fade_in,
|
| 481 |
+
# "slide-in-left": text_animations.slide_in_left,
|
| 482 |
+
# "pop-bounce": text_animations.pop_bounce,
|
| 483 |
+
# "zoom-in": text_animations.zoom_in,
|
| 484 |
+
# "typewriter": text_animations.typewriter,
|
| 485 |
+
# "wipe": text_animations.wipe_mask,
|
| 486 |
+
# "flip": text_animations.flip_rotate,
|
| 487 |
+
# "blur-in": text_animations.blur_in,
|
| 488 |
+
# "scale-up": text_animations.scale_up,
|
| 489 |
+
# # Text premium
|
| 490 |
+
# "neon-glow": text_animations.neon_glow,
|
| 491 |
+
# "gradient-fill": text_animations.gradient_fill,
|
| 492 |
+
# "wave-ripple": text_animations.wave_ripple,
|
| 493 |
+
# "split-text": text_animations.split_text,
|
| 494 |
+
# # Logo
|
| 495 |
+
# "logo-build": logo_animations.logo_build_lines,
|
| 496 |
+
# "logo-particle": logo_animations.logo_fade_particle,
|
| 497 |
+
# "logo-spin": logo_animations.logo_spin_scale,
|
| 498 |
+
# "logo-stroke": logo_animations.logo_stroke_draw,
|
| 499 |
+
# "logo-glitch": logo_animations.logo_glitch,
|
| 500 |
+
# # Shapes
|
| 501 |
+
# "line-draw": shape_animations.line_draw,
|
| 502 |
+
# "shape-morph": shape_animations.shape_morph,
|
| 503 |
+
# "grow-center": shape_animations.grow_center,
|
| 504 |
+
# "floating": shape_animations.floating_bounce,
|
| 505 |
+
# # FX
|
| 506 |
+
# "particle-burst": fx_animations.particle_burst,
|
| 507 |
+
# "smoke": fx_animations.smoke_effect,
|
| 508 |
+
# "light-rays": fx_animations.light_rays,
|
| 509 |
+
# # Transitions & infographic & character etc (examples)
|
| 510 |
+
# "fade-transition": transitions.fade_transition,
|
| 511 |
+
# "bar-chart": infographic_animations.bar_chart_grow,
|
| 512 |
+
# "count-number": infographic_animations.number_count,
|
| 513 |
+
# "walk-cycle": character_animations.walk_cycle,
|
| 514 |
+
# "bobbing": character_animations.bobbing,
|
| 515 |
+
# "animated-gradient": background_animations.animated_gradient,
|
| 516 |
+
# "particle-motion": background_animations.particle_motion,
|
| 517 |
+
# "confetti": overlay_animations.confetti,
|
| 518 |
+
# "checkmark": overlay_animations.checkmark_tick,
|
| 519 |
+
# }
|
| 520 |
+
|
| 521 |
+
# # def _run_manim(scene_py_path: Path, scene_class: str, work_dir: Path, log_file: Path, quality_flag: str = "-ql", timeout: int = 300) -> Optional[Path]:
|
| 522 |
+
# # print(f"[DEBUG] Running manim render for scene '{scene_class}' in '{work_dir}'...")
|
| 523 |
+
# # base = [sys.executable, "-m", "manim"]
|
| 524 |
+
# # out_name = f"{scene_py_path.stem}_{scene_class}"
|
| 525 |
+
# # cmd = [*base, quality_flag, "-t", str(scene_py_path), scene_class, "-o", out_name]
|
| 526 |
+
# # print(f"[DEBUG] Command: {' '.join(cmd)}")
|
| 527 |
+
|
| 528 |
+
# # try:
|
| 529 |
+
# # with log_file.open("ab") as lf:
|
| 530 |
+
# # start = time.time()
|
| 531 |
+
# # proc = subprocess.run(cmd, cwd=str(work_dir), stdout=lf, stderr=lf, timeout=timeout)
|
| 532 |
+
# # elapsed = time.time() - start
|
| 533 |
+
# # print(f"[DEBUG] Manim completed in {elapsed:.2f}s with return code {proc.returncode}")
|
| 534 |
+
# # if proc.returncode != 0:
|
| 535 |
+
# # print("[ERROR] Manim render failed.")
|
| 536 |
+
# # return None
|
| 537 |
+
# # except Exception as e:
|
| 538 |
+
# # print(f"[ERROR] Exception during manim run: {e}")
|
| 539 |
+
# # return None
|
| 540 |
+
|
| 541 |
+
# # mp4s = list(work_dir.rglob("*.mp4"))
|
| 542 |
+
# # if not mp4s:
|
| 543 |
+
# # print("[ERROR] No MP4 files found after rendering.")
|
| 544 |
+
# # return None
|
| 545 |
+
# # mp4s.sort(key=lambda p: p.stat().st_mtime, reverse=True)
|
| 546 |
+
# # print(f"[DEBUG] Rendered MP4: {mp4s[0]}")
|
| 547 |
+
# # return mp4s[0]
|
| 548 |
+
|
| 549 |
+
|
| 550 |
+
# def _run_manim(
|
| 551 |
+
# scene_py_path: Path,
|
| 552 |
+
# scene_class: str,
|
| 553 |
+
# work_dir: Path,
|
| 554 |
+
# log_file: Path,
|
| 555 |
+
# quality_flag: str = "-ql",
|
| 556 |
+
# timeout: int = 300
|
| 557 |
+
# ) -> Optional[Path]:
|
| 558 |
+
# """
|
| 559 |
+
# Run a Manim render inside a temp working directory and return the final video path (.mp4 or .mov).
|
| 560 |
+
# Logs full debug details and ensures the output is stored inside `work_dir`.
|
| 561 |
+
# """
|
| 562 |
+
# import sys, subprocess, shutil, time
|
| 563 |
+
|
| 564 |
+
# print(f"\n[DEBUG] π¬ Starting Manim render for scene '{scene_class}'...")
|
| 565 |
+
# print(f"[DEBUG] Working directory: {work_dir}")
|
| 566 |
+
# print(f"[DEBUG] Scene file: {scene_py_path}")
|
| 567 |
+
|
| 568 |
+
# base = [sys.executable, "-m", "manim"]
|
| 569 |
+
# out_name = f"{scene_py_path.stem}_{scene_class}"
|
| 570 |
+
|
| 571 |
+
# # β
Expected output path (any format)
|
| 572 |
+
# expected_output = work_dir / f"{out_name}"
|
| 573 |
+
# print(f"[DEBUG] Expected output file prefix: {expected_output}")
|
| 574 |
+
|
| 575 |
+
# # β
Manim command (saves media inside tmp/media/)
|
| 576 |
+
# scene_filename = Path(scene_py_path).name # only file name, not full path
|
| 577 |
+
# cmd = [*base, quality_flag, "-t", scene_filename, scene_class, "-o", out_name]
|
| 578 |
+
|
| 579 |
+
# print(f"[DEBUG] Command: {' '.join(cmd)}")
|
| 580 |
+
|
| 581 |
+
# # β
Run the process
|
| 582 |
+
# try:
|
| 583 |
+
# with log_file.open("ab") as lf:
|
| 584 |
+
# start = time.time()
|
| 585 |
+
# proc = subprocess.run(cmd, cwd=str(work_dir), stdout=lf, stderr=lf, timeout=timeout)
|
| 586 |
+
# elapsed = time.time() - start
|
| 587 |
+
# print(f"[DEBUG] Manim completed in {elapsed:.2f}s with code {proc.returncode}")
|
| 588 |
+
# if proc.returncode != 0:
|
| 589 |
+
# print("[ERROR] β Manim render failed.")
|
| 590 |
+
# return None
|
| 591 |
+
# except Exception as e:
|
| 592 |
+
# print(f"[ERROR] β Exception during manim run: {e}")
|
| 593 |
+
# return None
|
| 594 |
+
|
| 595 |
+
# # β
Search for rendered video files (.mp4 or .mov)
|
| 596 |
+
# print("[DEBUG] Scanning for video outputs...")
|
| 597 |
+
# search_dirs = [
|
| 598 |
+
# work_dir,
|
| 599 |
+
# work_dir / "media",
|
| 600 |
+
# work_dir / "media" / "videos"
|
| 601 |
+
# ]
|
| 602 |
+
|
| 603 |
+
# videos = []
|
| 604 |
+
# for folder in search_dirs:
|
| 605 |
+
# if folder.exists():
|
| 606 |
+
# found = list(folder.rglob("*.mp4")) + list(folder.rglob("*.mov"))
|
| 607 |
+
# videos.extend(found)
|
| 608 |
+
# print(f"[DEBUG] β {folder}: {len(found)} video(s) found")
|
| 609 |
+
|
| 610 |
+
# if not videos:
|
| 611 |
+
# print("[ERROR] β No video file found after scanning all media directories.")
|
| 612 |
+
# return None
|
| 613 |
+
|
| 614 |
+
# videos.sort(key=lambda p: p.stat().st_mtime, reverse=True)
|
| 615 |
+
# final_video = videos[0]
|
| 616 |
+
|
| 617 |
+
# print(f"[SUCCESS] β
Rendered video detected at: {final_video}")
|
| 618 |
+
|
| 619 |
+
# # β
Copy file back to work_dir (for consistent access)
|
| 620 |
+
# dest = work_dir / final_video.name
|
| 621 |
+
# if final_video != dest:
|
| 622 |
+
# try:
|
| 623 |
+
# shutil.copy2(final_video, dest)
|
| 624 |
+
# print(f"[DEBUG] Copied render result β {dest}")
|
| 625 |
+
# except Exception as e:
|
| 626 |
+
# print(f"[WARN] β οΈ Could not copy final video: {e}")
|
| 627 |
+
|
| 628 |
+
# print(f"[INFO] π Final render saved successfully at:\n {dest}")
|
| 629 |
+
# print("-" * 70)
|
| 630 |
+
# return dest
|
| 631 |
+
|
| 632 |
+
# def process_image_pipeline(task_meta: Dict) -> Dict:
|
| 633 |
+
# print("\n================= PIPELINE START =================")
|
| 634 |
+
# print(f"[INFO] Task metadata received: {task_meta}")
|
| 635 |
+
# task_id = task_meta.get("task_id")
|
| 636 |
+
# task_dir = Path(task_meta.get("task_dir", ""))
|
| 637 |
+
# if not task_id or not task_dir.exists():
|
| 638 |
+
# print("[ERROR] Invalid task: Missing task_id or task_dir.")
|
| 639 |
+
# return {"success": False, "error": "invalid-task"}
|
| 640 |
+
|
| 641 |
+
# working = task_dir / "working"
|
| 642 |
+
# outputs = task_dir / "outputs"
|
| 643 |
+
# logs = task_dir / "logs"
|
| 644 |
+
# working.mkdir(parents=True, exist_ok=True)
|
| 645 |
+
# outputs.mkdir(parents=True, exist_ok=True)
|
| 646 |
+
# logs.mkdir(parents=True, exist_ok=True)
|
| 647 |
+
# print(f"[DEBUG] Working directories prepared under {task_dir}")
|
| 648 |
+
|
| 649 |
+
# log_file = logs / f"{task_id}.log"
|
| 650 |
+
|
| 651 |
+
# try:
|
| 652 |
+
# input_image = Path(task_meta.get("input_image"))
|
| 653 |
+
# if not input_image.exists():
|
| 654 |
+
# print(f"[ERROR] Input image missing: {input_image}")
|
| 655 |
+
# return {"success": False, "error": "input-missing"}
|
| 656 |
+
|
| 657 |
+
# safe_input = working / input_image.name
|
| 658 |
+
# shutil.copy2(input_image, safe_input)
|
| 659 |
+
# print(f"[DEBUG] Input image copied to working directory: {safe_input}")
|
| 660 |
+
|
| 661 |
+
# svg_path = working / f"{task_id}.svg"
|
| 662 |
+
# print("[DEBUG] Starting vectorization...")
|
| 663 |
+
|
| 664 |
+
# vec_res = vectorize_image(safe_input, options={"quality": task_meta.get("quality")})
|
| 665 |
+
# print(f"[DEBUG] Vectorization result: {vec_res.keys()}")
|
| 666 |
+
|
| 667 |
+
# if not vec_res.get("success"):
|
| 668 |
+
# print(f"[ERROR] Vectorization failed: {vec_res.get('error')}")
|
| 669 |
+
# return {"success": False, "error": f"vectorize-failed:{vec_res.get('error')}"}
|
| 670 |
+
|
| 671 |
+
# svg_content = vec_res.get("svg")
|
| 672 |
+
# if not svg_content:
|
| 673 |
+
# print("[ERROR] Vectorizer returned empty SVG content.")
|
| 674 |
+
# return {"success": False, "error": "vectorizer-did-not-return-svg"}
|
| 675 |
+
|
| 676 |
+
# svg_path.write_text(svg_content, encoding="utf-8")
|
| 677 |
+
# print(f"[DEBUG] SVG written to: {svg_path}")
|
| 678 |
+
|
| 679 |
+
# style = task_meta.get("style", "fade-in")
|
| 680 |
+
# anim_fn = STYLE_MAP.get(style, text_animations.fade_in)
|
| 681 |
+
# print(f"[DEBUG] Selected animation style: {style}")
|
| 682 |
+
|
| 683 |
+
# scene_info = anim_fn(svg_path, working, task_meta)
|
| 684 |
+
# if not scene_info or "scene_name" not in scene_info or "scene_code" not in scene_info:
|
| 685 |
+
# print("[ERROR] Invalid animation function result.")
|
| 686 |
+
# return {"success": False, "error": "animation-fn-invalid"}
|
| 687 |
+
|
| 688 |
+
# scene_name = scene_info["scene_name"]
|
| 689 |
+
# scene_code = scene_info["scene_code"]
|
| 690 |
+
|
| 691 |
+
# scene_py = working / f"scene_{task_id}.py"
|
| 692 |
+
# scene_py.write_text(scene_code, encoding="utf-8")
|
| 693 |
+
# print(f"[DEBUG] Scene file written: {scene_py}")
|
| 694 |
+
|
| 695 |
+
# try:
|
| 696 |
+
# shutil.copy2(svg_path, working / svg_path.name)
|
| 697 |
+
# except Exception as e:
|
| 698 |
+
# print(f"[WARN] Could not copy SVG: {e}")
|
| 699 |
+
|
| 700 |
+
# q = task_meta.get("quality", "preview")
|
| 701 |
+
# quality_flag = "-ql" if q == "preview" else "-qh" if q == "final" else "-qm"
|
| 702 |
+
# print(f"[DEBUG] Render quality flag: {quality_flag}")
|
| 703 |
+
|
| 704 |
+
# rendered_mp4 = _run_manim(scene_py, scene_name, working, log_file, quality_flag=quality_flag, timeout=900)
|
| 705 |
+
# if not rendered_mp4:
|
| 706 |
+
# print("[ERROR] Manim render failed.")
|
| 707 |
+
# return {"success": False, "error": "manim-render-failed", "log": str(log_file)}
|
| 708 |
+
|
| 709 |
+
# dest = GLOBAL_OUTPUTS_DIR / f"{task_id}_{rendered_mp4.name}"
|
| 710 |
+
# try:
|
| 711 |
+
# if dest.exists():
|
| 712 |
+
# dest.unlink()
|
| 713 |
+
# shutil.copy2(rendered_mp4, dest)
|
| 714 |
+
# print(f"[DEBUG] Final output copied to global outputs dir: {dest}")
|
| 715 |
+
# except Exception as e:
|
| 716 |
+
# print(f"[ERROR] Failed to copy output: {e}")
|
| 717 |
+
# return {"success": False, "error": f"copy-failed:{e}", "log": str(log_file)}
|
| 718 |
+
|
| 719 |
+
# try:
|
| 720 |
+
# shutil.rmtree(working)
|
| 721 |
+
# print("[DEBUG] Cleaned up working directory.")
|
| 722 |
+
# except Exception as e:
|
| 723 |
+
# print(f"[WARN] Could not remove working directory: {e}")
|
| 724 |
+
|
| 725 |
+
# print(f"[SUCCESS] Task {task_id} completed successfully.")
|
| 726 |
+
# print("================= PIPELINE END =================\n")
|
| 727 |
+
# return {"success": True, "output_path": str(dest), "log": str(log_file),"output_bytes": dest.read_bytes(),}
|
| 728 |
+
|
| 729 |
+
# except Exception as e:
|
| 730 |
+
# print(f"[ERROR] Unexpected exception in pipeline: {e}")
|
| 731 |
+
# print("================= PIPELINE FAILED =================\n")
|
| 732 |
+
# return {"success": False, "error": str(e), "log": str(log_file)}
|
| 733 |
+
|
| 734 |
+
|
| 735 |
+
|
| 736 |
+
|
| 737 |
+
# app/core/pipeline.py
|
| 738 |
+
|
| 739 |
+
import sys
|
| 740 |
+
import shutil
|
| 741 |
+
import subprocess
|
| 742 |
+
import time
|
| 743 |
+
from pathlib import Path
|
| 744 |
+
from typing import Dict, Optional
|
| 745 |
+
from core.vectorizer import vectorize_image
|
| 746 |
+
# type: ignore
|
| 747 |
+
from moviepy.video.io.VideoFileClip import VideoFileClip
|
| 748 |
+
# Animation modules
|
| 749 |
+
import core.text_animations as text_animations
|
| 750 |
+
import core.logo_animations as logo_animations
|
| 751 |
+
import core.shape_animations as shape_animations
|
| 752 |
+
import core.fx_animations as fx_animations
|
| 753 |
+
import core.transitions as transitions
|
| 754 |
+
import core.infographic_animations as infographic_animations
|
| 755 |
+
import core.character_animations as character_animations
|
| 756 |
+
import core.background_animations as background_animations
|
| 757 |
+
import core.overlay_animations as overlay_animations
|
| 758 |
+
|
| 759 |
+
|
| 760 |
+
# === GLOBAL OUTPUT DIR ===
|
| 761 |
+
TMP_DIR = Path("tmp")
|
| 762 |
+
TMP_DIR.mkdir(exist_ok=True)
|
| 763 |
+
GLOBAL_OUTPUTS_DIR = TMP_DIR / "outputs"
|
| 764 |
+
GLOBAL_OUTPUTS_DIR.mkdir(parents=True, exist_ok=True)
|
| 765 |
+
|
| 766 |
+
# === STYLE MAP ===
|
| 767 |
+
STYLE_MAP = {
|
| 768 |
+
"fade-in": text_animations.fade_in,
|
| 769 |
+
"slide-in-left": text_animations.slide_in_left,
|
| 770 |
+
"pop-bounce": text_animations.pop_bounce,
|
| 771 |
+
"zoom-in": text_animations.zoom_in,
|
| 772 |
+
"typewriter": text_animations.typewriter,
|
| 773 |
+
"wipe": text_animations.wipe_mask,
|
| 774 |
+
"flip": text_animations.flip_rotate,
|
| 775 |
+
"blur-in": text_animations.blur_in,
|
| 776 |
+
"scale-up": text_animations.scale_up,
|
| 777 |
+
"neon-glow": text_animations.neon_glow,
|
| 778 |
+
"gradient-fill": text_animations.gradient_fill,
|
| 779 |
+
"wave-ripple": text_animations.wave_ripple,
|
| 780 |
+
"split-text": text_animations.split_text,
|
| 781 |
+
"logo-build": logo_animations.logo_build_lines,
|
| 782 |
+
"logo-particle": logo_animations.logo_fade_particle,
|
| 783 |
+
"logo-spin": logo_animations.logo_spin_scale,
|
| 784 |
+
"logo-stroke": logo_animations.logo_stroke_draw,
|
| 785 |
+
"logo-glitch": logo_animations.logo_glitch,
|
| 786 |
+
"line-draw": shape_animations.line_draw,
|
| 787 |
+
"shape-morph": shape_animations.shape_morph,
|
| 788 |
+
"grow-center": shape_animations.grow_center,
|
| 789 |
+
"floating": shape_animations.floating_bounce,
|
| 790 |
+
"particle-burst": fx_animations.particle_burst,
|
| 791 |
+
"smoke": fx_animations.smoke_effect,
|
| 792 |
+
"light-rays": fx_animations.light_rays,
|
| 793 |
+
"fade-transition": transitions.fade_transition,
|
| 794 |
+
"bar-chart": infographic_animations.bar_chart_grow,
|
| 795 |
+
"count-number": infographic_animations.number_count,
|
| 796 |
+
"walk-cycle": character_animations.walk_cycle,
|
| 797 |
+
"bobbing": character_animations.bobbing,
|
| 798 |
+
"animated-gradient": background_animations.animated_gradient,
|
| 799 |
+
"particle-motion": background_animations.particle_motion,
|
| 800 |
+
"confetti": overlay_animations.confetti,
|
| 801 |
+
"checkmark": overlay_animations.checkmark_tick,
|
| 802 |
+
}
|
| 803 |
+
|
| 804 |
+
|
| 805 |
+
# # === MANIM RUNNER ===
|
| 806 |
+
# def _run_manim(scene_py_path: Path, scene_class: str, quality_flag: str = "-ql", timeout: int = 300) -> Optional[Path]:
|
| 807 |
+
# """Run Manim and return the rendered video path."""
|
| 808 |
+
# print(f"\n㪠Running Manim for scene: {scene_class}")
|
| 809 |
+
|
| 810 |
+
# cmd = [
|
| 811 |
+
# sys.executable,
|
| 812 |
+
# "-m",
|
| 813 |
+
# "manim",
|
| 814 |
+
# quality_flag,
|
| 815 |
+
# "-t",
|
| 816 |
+
# scene_py_path.name,
|
| 817 |
+
# scene_class,
|
| 818 |
+
# "-o",
|
| 819 |
+
# f"{scene_class}_output",
|
| 820 |
+
# ]
|
| 821 |
+
|
| 822 |
+
# print(f"β Command: {' '.join(cmd)}")
|
| 823 |
+
|
| 824 |
+
# try:
|
| 825 |
+
# subprocess.run(cmd, cwd=scene_py_path.parent, check=True, timeout=timeout)
|
| 826 |
+
# except subprocess.CalledProcessError:
|
| 827 |
+
# print("β Manim render failed.")
|
| 828 |
+
# return None
|
| 829 |
+
# except Exception as e:
|
| 830 |
+
# print(f"β Exception: {e}")
|
| 831 |
+
# return None
|
| 832 |
+
|
| 833 |
+
# # Search output
|
| 834 |
+
# rendered = list(scene_py_path.parent.rglob("*.mp4")) + list(scene_py_path.parent.rglob("*.mov"))
|
| 835 |
+
# if not rendered:
|
| 836 |
+
# print("β No output file found after render.")
|
| 837 |
+
# return None
|
| 838 |
+
|
| 839 |
+
# rendered.sort(key=lambda p: p.stat().st_mtime, reverse=True)
|
| 840 |
+
# output_file = rendered[0]
|
| 841 |
+
# print(f"β
Rendered file: {output_file}")
|
| 842 |
+
# return output_file
|
| 843 |
+
|
| 844 |
+
|
| 845 |
+
# # === PIPELINE ===
|
| 846 |
+
# def process_image_pipeline(task_meta: Dict) -> Dict:
|
| 847 |
+
# """Simplified image β SVG β Manim video pipeline."""
|
| 848 |
+
# print("\n================= PIPELINE START =================")
|
| 849 |
+
# print(f"Metadata: {task_meta}")
|
| 850 |
+
|
| 851 |
+
# try:
|
| 852 |
+
# task_id = task_meta.get("task_id", "no_id")
|
| 853 |
+
# input_image = Path(task_meta.get("input_image", ""))
|
| 854 |
+
# if not input_image.exists():
|
| 855 |
+
# print(f"β Input image not found: {input_image}")
|
| 856 |
+
# return {"success": False, "error": "input-not-found"}
|
| 857 |
+
|
| 858 |
+
# style = task_meta.get("style", "fade-in")
|
| 859 |
+
# quality = task_meta.get("quality", "preview")
|
| 860 |
+
|
| 861 |
+
# # === Work directly inside tmp/ ===
|
| 862 |
+
# work_dir = TMP_DIR / f"{task_id}"
|
| 863 |
+
# work_dir.mkdir(exist_ok=True)
|
| 864 |
+
|
| 865 |
+
# safe_input = work_dir / input_image.name
|
| 866 |
+
# shutil.copy2(input_image, safe_input)
|
| 867 |
+
|
| 868 |
+
# print(f"πΌοΈ Copied input β {safe_input}")
|
| 869 |
+
|
| 870 |
+
# # === Vectorize ===
|
| 871 |
+
# vec_res = vectorize_image(safe_input, options={"quality": quality})
|
| 872 |
+
# if not vec_res.get("success"):
|
| 873 |
+
# print("β Vectorization failed.")
|
| 874 |
+
# return {"success": False, "error": vec_res.get("error")}
|
| 875 |
+
|
| 876 |
+
# svg_path = work_dir / f"{task_id}.svg"
|
| 877 |
+
# svg_path.write_text(vec_res.get("svg", ""), encoding="utf-8")
|
| 878 |
+
# print(f"π§© SVG written β {svg_path}")
|
| 879 |
+
|
| 880 |
+
# # === Animation ===
|
| 881 |
+
# anim_fn = STYLE_MAP.get(style, text_animations.fade_in)
|
| 882 |
+
# scene_info = anim_fn(svg_path, work_dir, task_meta)
|
| 883 |
+
|
| 884 |
+
# scene_name = scene_info["scene_name"]
|
| 885 |
+
# scene_code = scene_info["scene_code"]
|
| 886 |
+
|
| 887 |
+
# scene_py = work_dir / f"{scene_name}.py"
|
| 888 |
+
# scene_py.write_text(scene_code, encoding="utf-8")
|
| 889 |
+
# print(f"ποΈ Scene file written β {scene_py}")
|
| 890 |
+
|
| 891 |
+
# quality_flag = "-ql" if quality == "preview" else "-qh" if quality == "final" else "-qm"
|
| 892 |
+
# output_file = _run_manim(scene_py, scene_name, quality_flag=quality_flag)
|
| 893 |
+
|
| 894 |
+
# if not output_file:
|
| 895 |
+
# return {"success": False, "error": "render-failed"}
|
| 896 |
+
|
| 897 |
+
# final_output = GLOBAL_OUTPUTS_DIR / f"{task_id}.mp4"
|
| 898 |
+
|
| 899 |
+
# shutil.copy2(output_file, final_output)
|
| 900 |
+
# print(f"π¦ Final output copied β {final_output}")
|
| 901 |
+
|
| 902 |
+
# print("================= PIPELINE END =================\n")
|
| 903 |
+
# return {
|
| 904 |
+
# "success": True,
|
| 905 |
+
# "output_path": str(final_output),
|
| 906 |
+
# "output_bytes": final_output.read_bytes(),
|
| 907 |
+
# }
|
| 908 |
+
|
| 909 |
+
# except Exception as e:
|
| 910 |
+
# print(f"β Exception in pipeline: {e}")
|
| 911 |
+
# print("================= PIPELINE FAILED =================\n")
|
| 912 |
+
# return {"success": False, "error": str(e)}
|
| 913 |
+
|
| 914 |
+
|
| 915 |
+
|
| 916 |
+
|
| 917 |
+
# === MANIM RUNNER ===
|
| 918 |
+
def _run_manim(scene_py_path: Path, scene_class: str, quality_flag: str = "-ql", timeout: int = 300) -> Optional[Path]:
|
| 919 |
+
"""Run Manim and return the rendered video path."""
|
| 920 |
+
print(f"\n㪠Running Manim for scene: {scene_class}")
|
| 921 |
+
|
| 922 |
+
cmd = [
|
| 923 |
+
sys.executable,
|
| 924 |
+
"-m",
|
| 925 |
+
|
| 926 |
+
"manim",
|
| 927 |
+
quality_flag,
|
| 928 |
+
"--transparent",
|
| 929 |
+
|
| 930 |
+
"-t",
|
| 931 |
+
scene_py_path.name,
|
| 932 |
+
scene_class,
|
| 933 |
+
"-o",
|
| 934 |
+
f"{scene_class}_output",
|
| 935 |
+
]
|
| 936 |
+
|
| 937 |
+
print(f"β Command: {' '.join(cmd)}")
|
| 938 |
+
|
| 939 |
+
try:
|
| 940 |
+
subprocess.run(cmd, cwd=scene_py_path.parent, check=True, timeout=timeout)
|
| 941 |
+
except subprocess.CalledProcessError:
|
| 942 |
+
print("β Manim render failed.")
|
| 943 |
+
return None
|
| 944 |
+
except Exception as e:
|
| 945 |
+
print(f"β Exception: {e}")
|
| 946 |
+
return None
|
| 947 |
+
|
| 948 |
+
# Search for rendered outputs
|
| 949 |
+
rendered = list(scene_py_path.parent.rglob("*.mp4")) + list(scene_py_path.parent.rglob("*.mov"))
|
| 950 |
+
if not rendered:
|
| 951 |
+
print("β No output file found after render.")
|
| 952 |
+
return None
|
| 953 |
+
|
| 954 |
+
rendered.sort(key=lambda p: p.stat().st_mtime, reverse=True)
|
| 955 |
+
output_file = rendered[0]
|
| 956 |
+
print(f"β
Rendered file: {output_file}")
|
| 957 |
+
|
| 958 |
+
# π Convert MOV β MP4 if needed (safe cross-platform)
|
| 959 |
+
# if output_file.suffix.lower() == ".mov":
|
| 960 |
+
# try:
|
| 961 |
+
|
| 962 |
+
# converted = output_file.with_suffix(".mp4")
|
| 963 |
+
# print("ποΈ Converting .mov β .mp4 for compatibility...")
|
| 964 |
+
# clip = VideoFileClip(str(output_file))
|
| 965 |
+
# clip.write_videofile(str(converted), codec="libx264", audio_codec="aac")
|
| 966 |
+
# clip.close()
|
| 967 |
+
# output_file = converted
|
| 968 |
+
# except Exception as e:
|
| 969 |
+
# print(f"β οΈ MOVβMP4 conversion failed: {e}")
|
| 970 |
+
# return None
|
| 971 |
+
|
| 972 |
+
# if output_file.suffix.lower() == ".mov":
|
| 973 |
+
# try:
|
| 974 |
+
# converted = output_file.with_suffix(".webm")
|
| 975 |
+
# print("ποΈ Converting .mov β .webm (keeping transparency)...")
|
| 976 |
+
|
| 977 |
+
|
| 978 |
+
# cmd = [
|
| 979 |
+
# "ffmpeg",
|
| 980 |
+
# "-y",
|
| 981 |
+
# "-i", str(output_file),
|
| 982 |
+
# "-c:v", "libvpx-vp9",
|
| 983 |
+
# "-pix_fmt", "yuva420p", # β
keep alpha channel
|
| 984 |
+
# "-b:v", "4M",
|
| 985 |
+
# "-auto-alt-ref", "0",
|
| 986 |
+
# str(converted)
|
| 987 |
+
# ]
|
| 988 |
+
|
| 989 |
+
# subprocess.run(cmd, check=True)
|
| 990 |
+
# output_file1 = converted
|
| 991 |
+
# print(f"β
Converted successfully β {converted}")
|
| 992 |
+
|
| 993 |
+
# except Exception as e:
|
| 994 |
+
# print(f"β οΈ MOVβWEBM conversion failed: {e}")
|
| 995 |
+
# return None
|
| 996 |
+
|
| 997 |
+
return output_file
|
| 998 |
+
|
| 999 |
+
|
| 1000 |
+
# === PIPELINE ===
|
| 1001 |
+
def process_image_pipeline(task_meta: Dict) -> Dict:
|
| 1002 |
+
"""Simplified image β SVG β Manim video pipeline."""
|
| 1003 |
+
print("\n================= PIPELINE START =================")
|
| 1004 |
+
print(f"Metadata: {task_meta}")
|
| 1005 |
+
|
| 1006 |
+
try:
|
| 1007 |
+
task_id = task_meta.get("task_id", "no_id")
|
| 1008 |
+
input_image = Path(task_meta.get("input_image", ""))
|
| 1009 |
+
if not input_image.exists():
|
| 1010 |
+
print(f"β Input image not found: {input_image}")
|
| 1011 |
+
return {"success": False, "error": "input-not-found"}
|
| 1012 |
+
|
| 1013 |
+
style = task_meta.get("style", "fade-in")
|
| 1014 |
+
quality = task_meta.get("quality", "final")
|
| 1015 |
+
|
| 1016 |
+
# === Work directly inside tmp/ ===
|
| 1017 |
+
work_dir = TMP_DIR / f"{task_id}"
|
| 1018 |
+
work_dir.mkdir(exist_ok=True)
|
| 1019 |
+
|
| 1020 |
+
safe_input = work_dir / input_image.name
|
| 1021 |
+
shutil.copy2(input_image, safe_input)
|
| 1022 |
+
print(f"πΌοΈ Copied input β {safe_input}")
|
| 1023 |
+
|
| 1024 |
+
# === Vectorize ===
|
| 1025 |
+
vec_res = vectorize_image(safe_input, options={"quality": quality})
|
| 1026 |
+
if not vec_res.get("success"):
|
| 1027 |
+
print("β Vectorization failed.")
|
| 1028 |
+
return {"success": False, "error": vec_res.get("error")}
|
| 1029 |
+
|
| 1030 |
+
svg_path = work_dir / f"{task_id}.svg"
|
| 1031 |
+
svg_path.write_text(vec_res.get("svg", ""), encoding="utf-8")
|
| 1032 |
+
print(f"π§© SVG written β {svg_path}")
|
| 1033 |
+
|
| 1034 |
+
# === Animation ===
|
| 1035 |
+
anim_fn = STYLE_MAP.get(style, text_animations.fade_in)
|
| 1036 |
+
scene_info = anim_fn(svg_path, work_dir, task_meta)
|
| 1037 |
+
|
| 1038 |
+
scene_name = scene_info["scene_name"]
|
| 1039 |
+
scene_code = scene_info["scene_code"]
|
| 1040 |
+
|
| 1041 |
+
scene_py = work_dir / f"{scene_name}.py"
|
| 1042 |
+
scene_py.write_text(scene_code, encoding="utf-8")
|
| 1043 |
+
print(f"ποΈ Scene file written β {scene_py}")
|
| 1044 |
+
|
| 1045 |
+
# === Run Manim ===
|
| 1046 |
+
quality_flag = "-ql" if quality == "preview" else "-qh" if quality == "final" else "-qm"
|
| 1047 |
+
output_file = _run_manim(scene_py, scene_name, quality_flag=quality_flag)
|
| 1048 |
+
|
| 1049 |
+
if not output_file:
|
| 1050 |
+
print("β Render failed, no output returned.")
|
| 1051 |
+
return {"success": False, "error": "render-failed"}
|
| 1052 |
+
|
| 1053 |
+
# === Copy Final Output ===
|
| 1054 |
+
final_output = GLOBAL_OUTPUTS_DIR / f"{task_id}.mov"
|
| 1055 |
+
shutil.copy2(output_file, final_output)
|
| 1056 |
+
print(f"π¦ Final output copied β {final_output}")
|
| 1057 |
+
|
| 1058 |
+
print("================= PIPELINE END =================\n")
|
| 1059 |
+
|
| 1060 |
+
# === Return result in-memory ===
|
| 1061 |
+
return {
|
| 1062 |
+
"success": True,
|
| 1063 |
+
"output_path": str(final_output),
|
| 1064 |
+
# "output_bytes": final_output.read_bytes(),
|
| 1065 |
+
}
|
| 1066 |
+
|
| 1067 |
+
except Exception as e:
|
| 1068 |
+
print(f"β Exception in pipeline: {e}")
|
| 1069 |
+
print("================= PIPELINE FAILED =================\n")
|
| 1070 |
+
return {"success": False, "error": str(e)}
|
core/shape_animations.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/core/animations/shape_animations.py
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from typing import Dict
|
| 4 |
+
|
| 5 |
+
def line_draw(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 6 |
+
scene_name = "LineDraw"
|
| 7 |
+
scene_code = f'''
|
| 8 |
+
from manim import *
|
| 9 |
+
class {scene_name}(Scene):
|
| 10 |
+
def construct(self):
|
| 11 |
+
# TODO: Draw strokes of shapes (convert svg paths to strokes)
|
| 12 |
+
svg = SVGMobject("{svg_path.name}")
|
| 13 |
+
self.play(Write(svg), run_time=1)
|
| 14 |
+
self.wait(0.5)
|
| 15 |
+
'''
|
| 16 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 17 |
+
|
| 18 |
+
def shape_morph(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 19 |
+
scene_name = "ShapeMorph"
|
| 20 |
+
scene_code = f'''
|
| 21 |
+
from manim import *
|
| 22 |
+
class {scene_name}(Scene):
|
| 23 |
+
def construct(self):
|
| 24 |
+
# TODO: Morph shapes β will require matching points
|
| 25 |
+
svg = SVGMobject("{svg_path.name}")
|
| 26 |
+
self.play(FadeIn(svg), run_time=0.8)
|
| 27 |
+
self.wait(0.5)
|
| 28 |
+
'''
|
| 29 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 30 |
+
|
| 31 |
+
def grow_center(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 32 |
+
scene_name = "GrowFromCenter"
|
| 33 |
+
scene_code = f'''
|
| 34 |
+
from manim import *
|
| 35 |
+
class {scene_name}(Scene):
|
| 36 |
+
def construct(self):
|
| 37 |
+
svg = SVGMobject("{svg_path.name}").scale(0.2)
|
| 38 |
+
self.play(svg.animate.scale(5), run_time=1)
|
| 39 |
+
self.wait(0.5)
|
| 40 |
+
'''
|
| 41 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 42 |
+
|
| 43 |
+
def floating_bounce(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 44 |
+
scene_name = "FloatingBounce"
|
| 45 |
+
scene_code = f'''
|
| 46 |
+
from manim import *
|
| 47 |
+
class {scene_name}(Scene):
|
| 48 |
+
def construct(self):
|
| 49 |
+
svg = SVGMobject("{svg_path.name}")
|
| 50 |
+
self.play(svg.animate.shift(UP*0.3), run_time=0.6)
|
| 51 |
+
self.play(svg.animate.shift(DOWN*0.3), run_time=0.6)
|
| 52 |
+
self.wait(0.5)
|
| 53 |
+
'''
|
| 54 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
core/text_animations.py
ADDED
|
@@ -0,0 +1,410 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/core/animations/text_animations.py
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from typing import Dict
|
| 4 |
+
|
| 5 |
+
# Each function returns: {"scene_name": "SceneClass", "scene_code": "<python source>"}
|
| 6 |
+
# The scene_code should import manim and define the requested Scene subclass. Pipeline will write it to disk.
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def fade_in(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 12 |
+
"""
|
| 13 |
+
Premium Cinematic Fade-In (edge-safe, high contrast version).
|
| 14 |
+
Glows in softly, with depth and smooth easing β looks natural,
|
| 15 |
+
vivid, and perfect for logos or text reveals.
|
| 16 |
+
"""
|
| 17 |
+
scene_name = "FadeInScene"
|
| 18 |
+
svg_name = svg_path.name
|
| 19 |
+
|
| 20 |
+
scene_code = f"""
|
| 21 |
+
from manim import *
|
| 22 |
+
|
| 23 |
+
class {scene_name}(Scene):
|
| 24 |
+
def construct(self):
|
| 25 |
+
# Transparent background
|
| 26 |
+
self.camera.background_color = None
|
| 27 |
+
|
| 28 |
+
# Load SVG
|
| 29 |
+
svg = SVGMobject(r"{svg_name}")
|
| 30 |
+
svg.set(width=6)
|
| 31 |
+
svg.set_stroke(width=0)
|
| 32 |
+
svg.set_fill(opacity=1)
|
| 33 |
+
|
| 34 |
+
# π Create glow aura (simulated blur via scaling layers)
|
| 35 |
+
glow_layers = VGroup(
|
| 36 |
+
*[
|
| 37 |
+
svg.copy()
|
| 38 |
+
.set_opacity(0.15 - 0.03 * i)
|
| 39 |
+
.scale(1.02 + i * 0.01)
|
| 40 |
+
.set_z_index(-1)
|
| 41 |
+
for i in range(4)
|
| 42 |
+
]
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
svg_group = VGroup(glow_layers, svg)
|
| 46 |
+
svg_group.move_to(ORIGIN)
|
| 47 |
+
svg_group.set_opacity(0)
|
| 48 |
+
|
| 49 |
+
self.add(svg_group)
|
| 50 |
+
|
| 51 |
+
# β¨ Dramatic fade-in with pop
|
| 52 |
+
self.play(
|
| 53 |
+
svg_group.animate
|
| 54 |
+
.scale(1.08)
|
| 55 |
+
.shift(UP * 0.15)
|
| 56 |
+
.set_opacity(1),
|
| 57 |
+
rate_func=lambda t: smooth(t) ** 0.7, # nonlinear ease
|
| 58 |
+
run_time=2.5
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
# π¬οΈ Subtle cinematic recoil
|
| 62 |
+
self.play(
|
| 63 |
+
svg_group.animate.scale(0.98).shift(DOWN * 0.05),
|
| 64 |
+
rate_func=there_and_back,
|
| 65 |
+
run_time=1.5
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
# Hold on final frame
|
| 69 |
+
self.wait(0.8)
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 73 |
+
|
| 74 |
+
# def slide_in_left(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 75 |
+
# scene_name = "SlideInLeft"
|
| 76 |
+
# scene_code = f'''
|
| 77 |
+
# from manim import *
|
| 78 |
+
# class {scene_name}(Scene):
|
| 79 |
+
# def construct(self):
|
| 80 |
+
# svg = SVGMobject("{svg_path.name}")
|
| 81 |
+
# svg.set(width=6)
|
| 82 |
+
# svg.shift(LEFT*8)
|
| 83 |
+
# self.play(svg.animate.shift(RIGHT*8), run_time=1)
|
| 84 |
+
# self.wait(0.5)
|
| 85 |
+
# '''
|
| 86 |
+
# return {"scene_name": scene_name, "scene_code": scene_code}
|
| 87 |
+
def slide_in_left(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 88 |
+
scene_name = "SlideInLeft"
|
| 89 |
+
scene_code = f'''
|
| 90 |
+
from manim import *
|
| 91 |
+
|
| 92 |
+
class {scene_name}(Scene):
|
| 93 |
+
def construct(self):
|
| 94 |
+
# Load SVG with high quality
|
| 95 |
+
svg = SVGMobject("{svg_path.name}")
|
| 96 |
+
svg.set(width=6) # adjust size
|
| 97 |
+
svg.set_stroke(width=1.5) # make strokes visible but smooth
|
| 98 |
+
svg.set_fill(opacity=1) # ensure colors are fully visible
|
| 99 |
+
|
| 100 |
+
# Start off-screen left
|
| 101 |
+
svg.shift(LEFT*8)
|
| 102 |
+
|
| 103 |
+
# Slide in with smooth easing
|
| 104 |
+
self.play(
|
| 105 |
+
svg.animate.shift(RIGHT*8).set_opacity(1),
|
| 106 |
+
run_time=1.5,
|
| 107 |
+
rate_func=smooth
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
# Optional small bounce for natural feel
|
| 111 |
+
self.play(
|
| 112 |
+
svg.animate.shift(LEFT*0.2),
|
| 113 |
+
svg.animate.shift(RIGHT*0.2),
|
| 114 |
+
run_time=0.3,
|
| 115 |
+
rate_func=there_and_back
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
self.wait(0.5)
|
| 119 |
+
'''
|
| 120 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 121 |
+
|
| 122 |
+
# def pop_bounce(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 123 |
+
# scene_name = "PopBounce"
|
| 124 |
+
# scene_code = f'''
|
| 125 |
+
# from manim import *
|
| 126 |
+
# class {scene_name}(Scene):
|
| 127 |
+
# def construct(self):
|
| 128 |
+
# svg = SVGMobject("{svg_path.name}")
|
| 129 |
+
# svg.set(width=4)
|
| 130 |
+
# self.play(svg.animate.scale(1.2), run_time=0.15)
|
| 131 |
+
# self.play(svg.animate.scale(0.9), run_time=0.12)
|
| 132 |
+
# self.play(svg.animate.scale(1.0), run_time=0.13)
|
| 133 |
+
# self.wait(0.5)
|
| 134 |
+
# '''
|
| 135 |
+
# return {"scene_name": scene_name, "scene_code": scene_code}
|
| 136 |
+
def pop_bounce(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 137 |
+
scene_name = "PopBounce"
|
| 138 |
+
scene_code = f'''
|
| 139 |
+
from manim import *
|
| 140 |
+
|
| 141 |
+
class {scene_name}(Scene):
|
| 142 |
+
def construct(self):
|
| 143 |
+
# Load SVG with high quality
|
| 144 |
+
svg = SVGMobject("{svg_path.name}")
|
| 145 |
+
svg.set(width=4)
|
| 146 |
+
svg.set_stroke(width=1.5)
|
| 147 |
+
svg.set_fill(opacity=1)
|
| 148 |
+
|
| 149 |
+
# Optional: start slightly smaller and transparent for better pop effect
|
| 150 |
+
svg.scale(0.8)
|
| 151 |
+
svg.set_opacity(0)
|
| 152 |
+
|
| 153 |
+
# Fade in while popping
|
| 154 |
+
self.play(
|
| 155 |
+
svg.animate.set_opacity(1).scale(1.25),
|
| 156 |
+
run_time=0.15,
|
| 157 |
+
rate_func=smooth
|
| 158 |
+
)
|
| 159 |
+
self.play(
|
| 160 |
+
svg.animate.scale(0.95),
|
| 161 |
+
run_time=0.12,
|
| 162 |
+
rate_func=there_and_back
|
| 163 |
+
)
|
| 164 |
+
self.play(
|
| 165 |
+
svg.animate.scale(1.0),
|
| 166 |
+
run_time=0.13,
|
| 167 |
+
rate_func=smooth
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
self.wait(0.5)
|
| 171 |
+
'''
|
| 172 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 173 |
+
|
| 174 |
+
def zoom_in(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 175 |
+
scene_name = "ZoomIn"
|
| 176 |
+
scene_code = f'''
|
| 177 |
+
from manim import *
|
| 178 |
+
class {scene_name}(Scene):
|
| 179 |
+
def construct(self):
|
| 180 |
+
svg = SVGMobject("{svg_path.name}").scale(0.2)
|
| 181 |
+
self.add(svg)
|
| 182 |
+
self.play(svg.animate.scale(5), run_time=1)
|
| 183 |
+
self.wait(0.5)
|
| 184 |
+
'''
|
| 185 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 186 |
+
|
| 187 |
+
# def typewriter(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 188 |
+
# scene_name = "Typewriter"
|
| 189 |
+
# scene_code = f'''
|
| 190 |
+
# from manim import *
|
| 191 |
+
# class {scene_name}(Scene):
|
| 192 |
+
# def construct(self):
|
| 193 |
+
# # For text-based typewriter you might want to use Text/MarkupText
|
| 194 |
+
# txt = Text("Typewriter placeholder")
|
| 195 |
+
# self.play(Write(txt), run_time=1.5)
|
| 196 |
+
# self.wait(0.5)
|
| 197 |
+
# '''
|
| 198 |
+
# return {"scene_name": scene_name, "scene_code": scene_code}
|
| 199 |
+
# def typewriter(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 200 |
+
# scene_name = "Typewriter"
|
| 201 |
+
# scene_code = f'''
|
| 202 |
+
# from manim import *
|
| 203 |
+
|
| 204 |
+
# class {scene_name}(Scene):
|
| 205 |
+
# def construct(self):
|
| 206 |
+
# # Load the uploaded SVG
|
| 207 |
+
# svg = SVGMobject("{svg_path.name}")
|
| 208 |
+
# svg.set(width=6)
|
| 209 |
+
# svg.set_stroke(width=2)
|
| 210 |
+
# svg.set_fill(opacity=1)
|
| 211 |
+
# svg.move_to(ORIGIN)
|
| 212 |
+
|
| 213 |
+
# # Break into submobjects to animate each path like typewriter strokes
|
| 214 |
+
# for sub in svg:
|
| 215 |
+
# sub.set_opacity(0) # start invisible
|
| 216 |
+
|
| 217 |
+
# # Typewriter-style progressive drawing
|
| 218 |
+
# for sub in svg:
|
| 219 |
+
# sub.set_opacity(1)
|
| 220 |
+
# self.play(Create(sub), run_time=0.3, rate_func=smooth)
|
| 221 |
+
|
| 222 |
+
# self.wait(0.5)
|
| 223 |
+
# '''
|
| 224 |
+
# return {"scene_name": scene_name, "scene_code": scene_code}
|
| 225 |
+
# def typewriter(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 226 |
+
# scene_name = "Typewriter"
|
| 227 |
+
# scene_code = f'''
|
| 228 |
+
# from manim import *
|
| 229 |
+
|
| 230 |
+
# class {scene_name}(Scene):
|
| 231 |
+
# def construct(self):
|
| 232 |
+
# # Load the uploaded SVG
|
| 233 |
+
# svg = SVGMobject("{svg_path.name}")
|
| 234 |
+
# svg.set(width=6)
|
| 235 |
+
# svg.set_stroke(width=2)
|
| 236 |
+
# svg.set_fill(opacity=1)
|
| 237 |
+
# svg.move_to(ORIGIN)
|
| 238 |
+
|
| 239 |
+
# # Start invisible
|
| 240 |
+
# for sub in svg:
|
| 241 |
+
# sub.set_opacity(0)
|
| 242 |
+
# sub.scale(0.95) # start slightly smaller for cinematic pop
|
| 243 |
+
|
| 244 |
+
# # Draw each path progressively
|
| 245 |
+
# for sub in svg:
|
| 246 |
+
# sub.set_opacity(1)
|
| 247 |
+
# self.play(
|
| 248 |
+
# Create(sub),
|
| 249 |
+
# sub.animate.scale(1.05), # subtle overshoot
|
| 250 |
+
# run_time=0.35,
|
| 251 |
+
# rate_func=smooth
|
| 252 |
+
# )
|
| 253 |
+
# # Scale back smoothly
|
| 254 |
+
# self.play(sub.animate.scale(0.95), run_time=0.1, rate_func=there_and_back)
|
| 255 |
+
|
| 256 |
+
# # Optional: add a blinking cursor at the end
|
| 257 |
+
# cursor = Line(start=svg.get_right(), end=svg.get_right() + UP*0.5, stroke_width=2)
|
| 258 |
+
# self.add(cursor)
|
| 259 |
+
# for _ in range(6):
|
| 260 |
+
# self.play(cursor.animate.set_opacity(0), run_time=0.3)
|
| 261 |
+
# self.play(cursor.animate.set_opacity(1), run_time=0.3)
|
| 262 |
+
|
| 263 |
+
# self.wait(0.5)
|
| 264 |
+
# '''
|
| 265 |
+
# return {"scene_name": scene_name, "scene_code": scene_code}
|
| 266 |
+
def typewriter(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 267 |
+
scene_name = "Typewriter"
|
| 268 |
+
scene_code = f'''
|
| 269 |
+
from manim import *
|
| 270 |
+
|
| 271 |
+
class {scene_name}(Scene):
|
| 272 |
+
def construct(self):
|
| 273 |
+
# Load the uploaded SVG
|
| 274 |
+
svg = SVGMobject("{svg_path.name}")
|
| 275 |
+
svg.set(width=6)
|
| 276 |
+
svg.set_stroke(width=2)
|
| 277 |
+
svg.set_fill(opacity=1)
|
| 278 |
+
svg.move_to(ORIGIN)
|
| 279 |
+
|
| 280 |
+
# Start almost invisible for smooth fade-in
|
| 281 |
+
for sub in svg:
|
| 282 |
+
sub.set_opacity(0.05)
|
| 283 |
+
sub.scale(0.98) # subtle initial scale for cinematic feel
|
| 284 |
+
|
| 285 |
+
# Animate each path like a cinematic typewriter
|
| 286 |
+
for sub in svg:
|
| 287 |
+
self.play(
|
| 288 |
+
Create(sub),
|
| 289 |
+
sub.animate.set_opacity(1).scale(1.0), # fade in while drawing
|
| 290 |
+
run_time=0.35,
|
| 291 |
+
rate_func=smooth
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
# Optional: add a subtle blinking cursor at the end
|
| 295 |
+
cursor = Line(start=svg.get_right(), end=svg.get_right() + UP*0.5, stroke_width=2)
|
| 296 |
+
self.add(cursor)
|
| 297 |
+
for _ in range(6):
|
| 298 |
+
self.play(cursor.animate.set_opacity(0), run_time=0.3, rate_func=smooth)
|
| 299 |
+
self.play(cursor.animate.set_opacity(1), run_time=0.3, rate_func=smooth)
|
| 300 |
+
|
| 301 |
+
self.wait(0.5)
|
| 302 |
+
'''
|
| 303 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 304 |
+
|
| 305 |
+
def wipe_mask(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 306 |
+
scene_name = "WipeMask"
|
| 307 |
+
scene_code = f'''
|
| 308 |
+
from manim import *
|
| 309 |
+
class {scene_name}(Scene):
|
| 310 |
+
def construct(self):
|
| 311 |
+
svg = SVGMobject("{svg_path.name}")
|
| 312 |
+
bar = Rectangle(width=8, height=6).set_fill(BLACK, opacity=1).to_edge(LEFT)
|
| 313 |
+
self.add(bar)
|
| 314 |
+
self.play(bar.animate.shift(RIGHT*8), run_time=1)
|
| 315 |
+
self.add(svg)
|
| 316 |
+
self.wait(0.5)
|
| 317 |
+
'''
|
| 318 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 319 |
+
|
| 320 |
+
def flip_rotate(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 321 |
+
scene_name = "FlipRotate"
|
| 322 |
+
scene_code = f'''
|
| 323 |
+
from manim import *
|
| 324 |
+
class {scene_name}(Scene):
|
| 325 |
+
def construct(self):
|
| 326 |
+
svg = SVGMobject("{svg_path.name}")
|
| 327 |
+
self.play(Rotate(svg, angle=PI), run_time=0.8)
|
| 328 |
+
self.wait(0.5)
|
| 329 |
+
'''
|
| 330 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 331 |
+
|
| 332 |
+
def blur_in(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 333 |
+
scene_name = "BlurIn"
|
| 334 |
+
scene_code = f'''
|
| 335 |
+
from manim import *
|
| 336 |
+
class {scene_name}(Scene):
|
| 337 |
+
def construct(self):
|
| 338 |
+
svg = SVGMobject("{svg_path.name}")
|
| 339 |
+
# TODO: Add blur-like effect (manim's blur is limited)
|
| 340 |
+
self.play(FadeIn(svg), run_time=0.8)
|
| 341 |
+
self.wait(0.5)
|
| 342 |
+
'''
|
| 343 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 344 |
+
|
| 345 |
+
def scale_up(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 346 |
+
scene_name = "ScaleUp"
|
| 347 |
+
scene_code = f'''
|
| 348 |
+
from manim import *
|
| 349 |
+
class {scene_name}(Scene):
|
| 350 |
+
def construct(self):
|
| 351 |
+
svg = SVGMobject("{svg_path.name}")
|
| 352 |
+
svg.scale(0.2)
|
| 353 |
+
self.play(svg.animate.scale(5), run_time=1)
|
| 354 |
+
self.wait(0.5)
|
| 355 |
+
'''
|
| 356 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 357 |
+
|
| 358 |
+
# --- Premium / stylish placeholders ---
|
| 359 |
+
def neon_glow(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 360 |
+
scene_name = "NeonGlow"
|
| 361 |
+
scene_code = f'''
|
| 362 |
+
from manim import *
|
| 363 |
+
class {scene_name}(Scene):
|
| 364 |
+
def construct(self):
|
| 365 |
+
svg = SVGMobject("{svg_path.name}")
|
| 366 |
+
# TODO: Implement neon glow using strokes, duplications, and blurs
|
| 367 |
+
self.play(FadeIn(svg), run_time=0.8)
|
| 368 |
+
self.wait(0.5)
|
| 369 |
+
'''
|
| 370 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 371 |
+
|
| 372 |
+
def gradient_fill(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 373 |
+
scene_name = "GradientFill"
|
| 374 |
+
scene_code = f'''
|
| 375 |
+
from manim import *
|
| 376 |
+
class {scene_name}(Scene):
|
| 377 |
+
def construct(self):
|
| 378 |
+
# TODO: Add gradient fill by generating shapes or using shaders
|
| 379 |
+
svg = SVGMobject("{svg_path.name}")
|
| 380 |
+
self.play(FadeIn(svg), run_time=0.8)
|
| 381 |
+
self.wait(0.5)
|
| 382 |
+
'''
|
| 383 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 384 |
+
|
| 385 |
+
def wave_ripple(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 386 |
+
scene_name = "WaveRipple"
|
| 387 |
+
scene_code = f'''
|
| 388 |
+
from manim import *
|
| 389 |
+
class {scene_name}(Scene):
|
| 390 |
+
def construct(self):
|
| 391 |
+
# TODO: Animate vertices or use transforms for wave effect
|
| 392 |
+
svg = SVGMobject("{svg_path.name}")
|
| 393 |
+
self.play(FadeIn(svg), run_time=0.8)
|
| 394 |
+
self.wait(0.5)
|
| 395 |
+
'''
|
| 396 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 397 |
+
|
| 398 |
+
# Add more premium functions (split_text, stroke_draw, brush_reveal, liquid, 3d_rotate, sparkle)
|
| 399 |
+
def split_text(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 400 |
+
scene_name = "SplitTextReveal"
|
| 401 |
+
scene_code = f'''
|
| 402 |
+
from manim import *
|
| 403 |
+
class {scene_name}(Scene):
|
| 404 |
+
def construct(self):
|
| 405 |
+
# TODO: Split text into letters and animate
|
| 406 |
+
svg = SVGMobject("{svg_path.name}")
|
| 407 |
+
self.play(FadeIn(svg), run_time=0.8)
|
| 408 |
+
self.wait(0.5)
|
| 409 |
+
'''
|
| 410 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
core/transitions.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/core/animations/transitions.py
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from typing import Dict
|
| 4 |
+
|
| 5 |
+
def fade_transition(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 6 |
+
scene_name = "FadeTransition"
|
| 7 |
+
scene_code = f'''
|
| 8 |
+
from manim import *
|
| 9 |
+
class {scene_name}(Scene):
|
| 10 |
+
def construct(self):
|
| 11 |
+
bg = Rectangle(width=14, height=8).set_fill(BLACK, opacity=0)
|
| 12 |
+
self.play(FadeIn(bg), run_time=0.5)
|
| 13 |
+
self.wait(0.1)
|
| 14 |
+
'''
|
| 15 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
| 16 |
+
|
| 17 |
+
def slide_transition(svg_path: Path, working_dir: Path, task_meta: Dict) -> Dict:
|
| 18 |
+
scene_name = "SlideTransition"
|
| 19 |
+
scene_code = f'''
|
| 20 |
+
from manim import *
|
| 21 |
+
class {scene_name}(Scene):
|
| 22 |
+
def construct(self):
|
| 23 |
+
# TODO: create in/out slides
|
| 24 |
+
self.wait(0.5)
|
| 25 |
+
'''
|
| 26 |
+
return {"scene_name": scene_name, "scene_code": scene_code}
|
core/vectorizer.py
ADDED
|
@@ -0,0 +1,868 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# # app/core/vectorizer.py
|
| 2 |
+
# from pathlib import Path
|
| 3 |
+
# from typing import Dict
|
| 4 |
+
|
| 5 |
+
# def vectorize_image(input_raster_path: Path, output_svg_path: Path, options: Dict = None) -> Dict:
|
| 6 |
+
# """
|
| 7 |
+
# Simple, safe vectorizer stub.
|
| 8 |
+
# - Writes a minimal SVG to output_svg_path.
|
| 9 |
+
# - You should replace with your real vectorizer (potrace, autotrace, model, etc.)
|
| 10 |
+
# Returns: {"success": True, "svg_path": str(output_svg_path)} or {"success": False, "error": "..."}
|
| 11 |
+
# """
|
| 12 |
+
# options = options or {}
|
| 13 |
+
# try:
|
| 14 |
+
# input_raster_path = Path(input_raster_path)
|
| 15 |
+
# output_svg_path = Path(output_svg_path)
|
| 16 |
+
# output_svg_path.parent.mkdir(parents=True, exist_ok=True)
|
| 17 |
+
|
| 18 |
+
# if not input_raster_path.exists():
|
| 19 |
+
# return {"success": False, "error": "input-missing"}
|
| 20 |
+
|
| 21 |
+
# # Placeholder SVG (safe). Replace with actual vector output.
|
| 22 |
+
# sample_svg = f"""<svg xmlns="http://www.w3.org/2000/svg" width="600" height="200">
|
| 23 |
+
# <rect width="100%" height="100%" fill="transparent"/>
|
| 24 |
+
# <text x="10" y="40" font-family="Arial" font-size="28">Vector placeholder for {input_raster_path.name}</text>
|
| 25 |
+
# </svg>"""
|
| 26 |
+
# output_svg_path.write_text(sample_svg, encoding="utf-8")
|
| 27 |
+
# return {"success": True, "svg_path": str(output_svg_path)}
|
| 28 |
+
# except Exception as e:
|
| 29 |
+
# return {"success": False, "error": str(e)}
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
# from pathlib import Path
|
| 36 |
+
# from typing import Dict, Optional
|
| 37 |
+
# import cv2
|
| 38 |
+
# import numpy as np
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# def vectorize_image(input_raster_path: Path, output_svg_path: Path = None, options: Optional[Dict] = None) -> Dict:
|
| 42 |
+
# """
|
| 43 |
+
# Production-ready in-memory vectorizer.
|
| 44 |
+
# Converts raster (PNG/JPG) β SVG (string, not saved).
|
| 45 |
+
|
| 46 |
+
# Args:
|
| 47 |
+
# input_raster_path (Path): Path to input raster image.
|
| 48 |
+
# output_svg_path (Path, optional): Ignored here, kept for compatibility.
|
| 49 |
+
# options (dict, optional): {
|
| 50 |
+
# "threshold": int (default=127),
|
| 51 |
+
# "simplify_tolerance": float (default=1.5),
|
| 52 |
+
# "quality": str (e.g., "low", "medium", "high") - optional
|
| 53 |
+
# }
|
| 54 |
+
|
| 55 |
+
# Returns:
|
| 56 |
+
# dict: {"success": True, "svg": "<svg>...</svg>", "width": w, "height": h}
|
| 57 |
+
# or {"success": False, "error": "..."}
|
| 58 |
+
# """
|
| 59 |
+
# options = options or {}
|
| 60 |
+
# threshold_value = options.get("threshold", 127)
|
| 61 |
+
# simplify_tolerance = options.get("simplify_tolerance", 1.5)
|
| 62 |
+
|
| 63 |
+
# try:
|
| 64 |
+
# input_raster_path = Path(input_raster_path)
|
| 65 |
+
# if not input_raster_path.exists():
|
| 66 |
+
# return {"success": False, "error": "input file not found"}
|
| 67 |
+
|
| 68 |
+
# # ---- Load image ----
|
| 69 |
+
# img = cv2.imread(str(input_raster_path))
|
| 70 |
+
# if img is None:
|
| 71 |
+
# return {"success": False, "error": "invalid or unreadable image"}
|
| 72 |
+
|
| 73 |
+
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
| 74 |
+
# _, thresh = cv2.threshold(gray, threshold_value, 255, cv2.THRESH_BINARY_INV)
|
| 75 |
+
|
| 76 |
+
# # ---- Find contours ----
|
| 77 |
+
# contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 78 |
+
# h, w = gray.shape[:2]
|
| 79 |
+
|
| 80 |
+
# # ---- Build SVG in-memory ----
|
| 81 |
+
# svg_header = f'<svg xmlns="http://www.w3.org/2000/svg" width="{w}" height="{h}" viewBox="0 0 {w} {h}">'
|
| 82 |
+
# svg_paths = []
|
| 83 |
+
|
| 84 |
+
# for contour in contours:
|
| 85 |
+
# contour = cv2.approxPolyDP(contour, simplify_tolerance, True)
|
| 86 |
+
# if contour.shape[0] < 2:
|
| 87 |
+
# continue
|
| 88 |
+
# path_data = "M " + " L ".join(f"{int(x)} {int(y)}" for x, y in contour[:, 0, :]) + " Z"
|
| 89 |
+
# svg_paths.append(f'<path d="{path_data}" fill="none" stroke="black" stroke-width="1"/>')
|
| 90 |
+
|
| 91 |
+
# svg_content = svg_header + "".join(svg_paths) + "</svg>"
|
| 92 |
+
|
| 93 |
+
# # ---- Return result ----
|
| 94 |
+
# return {
|
| 95 |
+
# "success": True,
|
| 96 |
+
# "svg": svg_content,
|
| 97 |
+
# "width": w,
|
| 98 |
+
# "height": h
|
| 99 |
+
# }
|
| 100 |
+
|
| 101 |
+
# except Exception as e:
|
| 102 |
+
# return {"success": False, "error": str(e)}
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
# from pathlib import Path
|
| 106 |
+
# from typing import Dict, Optional
|
| 107 |
+
# import cv2
|
| 108 |
+
# import numpy as np
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
# def vectorize_image(input_raster_path: Path, output_svg_path: Path = None, options: Optional[Dict] = None) -> Dict:
|
| 112 |
+
# """
|
| 113 |
+
# Production-ready in-memory vectorizer.
|
| 114 |
+
# Converts raster (PNG/JPG) β SVG (string, not saved).
|
| 115 |
+
|
| 116 |
+
# Args:
|
| 117 |
+
# input_raster_path (Path): Path to input raster image.
|
| 118 |
+
# output_svg_path (Path, optional): Ignored here, kept for compatibility.
|
| 119 |
+
# options (dict, optional): {
|
| 120 |
+
# "threshold": int (default=127),
|
| 121 |
+
# "simplify_tolerance": float (default=1.5),
|
| 122 |
+
# "quality": str (e.g., "low", "medium", "high") - optional
|
| 123 |
+
# }
|
| 124 |
+
|
| 125 |
+
# Returns:
|
| 126 |
+
# dict: {"success": True, "svg": "<svg>...</svg>", "width": w, "height": h}
|
| 127 |
+
# or {"success": False, "error": "..."}
|
| 128 |
+
# """
|
| 129 |
+
# print("\n[DEBUG] π§© Starting vectorize_image()...")
|
| 130 |
+
# print(f"[DEBUG] Input raster path: {input_raster_path}")
|
| 131 |
+
# print(f"[DEBUG] Output SVG path (if any): {output_svg_path}")
|
| 132 |
+
# print(f"[DEBUG] Options: {options}")
|
| 133 |
+
|
| 134 |
+
# options = options or {}
|
| 135 |
+
# threshold_value = options.get("threshold", 127)
|
| 136 |
+
# simplify_tolerance = options.get("simplify_tolerance", 1.5)
|
| 137 |
+
|
| 138 |
+
# print(f"[DEBUG] Using threshold={threshold_value}, simplify_tolerance={simplify_tolerance}")
|
| 139 |
+
|
| 140 |
+
# try:
|
| 141 |
+
# input_raster_path = Path(input_raster_path)
|
| 142 |
+
# if not input_raster_path.exists():
|
| 143 |
+
# print("[ERROR] β Input file not found.")
|
| 144 |
+
# return {"success": False, "error": "input file not found"}
|
| 145 |
+
|
| 146 |
+
# # ---- Load image ----
|
| 147 |
+
# img = cv2.imread(str(input_raster_path))
|
| 148 |
+
# if img is None:
|
| 149 |
+
# print("[ERROR] β Invalid or unreadable image.")
|
| 150 |
+
# return {"success": False, "error": "invalid or unreadable image"}
|
| 151 |
+
|
| 152 |
+
# print(f"[DEBUG] Image loaded successfully. Shape: {img.shape}")
|
| 153 |
+
|
| 154 |
+
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
| 155 |
+
# print("[DEBUG] Converted image to grayscale.")
|
| 156 |
+
|
| 157 |
+
# _, thresh = cv2.threshold(gray, threshold_value, 255, cv2.THRESH_BINARY_INV)
|
| 158 |
+
# print("[DEBUG] Applied thresholding to generate binary image.")
|
| 159 |
+
|
| 160 |
+
# # ---- Find contours ----
|
| 161 |
+
# contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 162 |
+
# h, w = gray.shape[:2]
|
| 163 |
+
# print(f"[DEBUG] Found {len(contours)} contours. Image size: {w}x{h}")
|
| 164 |
+
|
| 165 |
+
# # ---- Build SVG in-memory ----
|
| 166 |
+
# svg_header = f'<svg xmlns="http://www.w3.org/2000/svg" width="{w}" height="{h}" viewBox="0 0 {w} {h}">'
|
| 167 |
+
# svg_paths = []
|
| 168 |
+
|
| 169 |
+
# for i, contour in enumerate(contours):
|
| 170 |
+
# contour = cv2.approxPolyDP(contour, simplify_tolerance, True)
|
| 171 |
+
# if contour.shape[0] < 2:
|
| 172 |
+
# print(f"[DEBUG] Skipping small contour #{i} (too few points).")
|
| 173 |
+
# continue
|
| 174 |
+
# path_data = "M " + " L ".join(f"{int(x)} {int(y)}" for x, y in contour[:, 0, :]) + " Z"
|
| 175 |
+
# svg_paths.append(f'<path d="{path_data}" fill="none" stroke="black" stroke-width="1"/>')
|
| 176 |
+
# print(f"[DEBUG] Added contour #{i} with {contour.shape[0]} points to SVG.")
|
| 177 |
+
|
| 178 |
+
# svg_content = svg_header + "".join(svg_paths) + "</svg>"
|
| 179 |
+
|
| 180 |
+
# print("[DEBUG] SVG generation completed successfully.")
|
| 181 |
+
# print(f"[DEBUG] Total paths in SVG: {len(svg_paths)}")
|
| 182 |
+
|
| 183 |
+
# # ---- Return result ----
|
| 184 |
+
# print("[SUCCESS] β
Vectorization completed.")
|
| 185 |
+
# return {
|
| 186 |
+
# "success": True,
|
| 187 |
+
# "svg": svg_content,
|
| 188 |
+
# "width": w,
|
| 189 |
+
# "height": h
|
| 190 |
+
# }
|
| 191 |
+
|
| 192 |
+
# except Exception as e:
|
| 193 |
+
# print(f"[ERROR] β Exception in vectorize_image(): {e}")
|
| 194 |
+
# return {"success": False, "error": str(e)}
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
from pathlib import Path
|
| 199 |
+
from typing import Dict, Optional
|
| 200 |
+
import cv2
|
| 201 |
+
import numpy as np
|
| 202 |
+
|
| 203 |
+
# def vectorize_image(input_raster_path: Path, output_svg_path: Path = None, options: Optional[Dict] = None) -> Dict:
|
| 204 |
+
# """
|
| 205 |
+
# Converts a raster (PNG/JPG) into a backgroundless SVG that represents visible shapes.
|
| 206 |
+
# Supports transparency, soft edges, and grayscale drawings.
|
| 207 |
+
# """
|
| 208 |
+
# print("\n[DEBUG] π§© Starting vectorize_image()...")
|
| 209 |
+
# input_raster_path = Path(input_raster_path)
|
| 210 |
+
# if not input_raster_path.exists():
|
| 211 |
+
# return {"success": False, "error": "input file not found"}
|
| 212 |
+
|
| 213 |
+
# options = options or {}
|
| 214 |
+
# threshold_value = options.get("threshold", 180)
|
| 215 |
+
# simplify_tolerance = options.get("simplify_tolerance", 2.0)
|
| 216 |
+
# stroke_color = options.get("stroke_color", "black")
|
| 217 |
+
|
| 218 |
+
# print(f"[DEBUG] Reading image: {input_raster_path}")
|
| 219 |
+
# img = cv2.imread(str(input_raster_path), cv2.IMREAD_UNCHANGED)
|
| 220 |
+
# if img is None:
|
| 221 |
+
# return {"success": False, "error": "cannot read image"}
|
| 222 |
+
|
| 223 |
+
# h, w = img.shape[:2]
|
| 224 |
+
# print(f"[DEBUG] Image shape: {w}x{h}")
|
| 225 |
+
|
| 226 |
+
# # ---- Handle alpha channel for transparency ----
|
| 227 |
+
# if img.shape[2] == 4:
|
| 228 |
+
# b, g, r, a = cv2.split(img)
|
| 229 |
+
# alpha_mask = a
|
| 230 |
+
# print("[DEBUG] Image has alpha channel (transparency detected).")
|
| 231 |
+
# else:
|
| 232 |
+
# b, g, r = cv2.split(img)
|
| 233 |
+
# alpha_mask = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
| 234 |
+
# print("[DEBUG] No alpha channel, using grayscale as mask.")
|
| 235 |
+
|
| 236 |
+
# # Invert alpha if background is white
|
| 237 |
+
# mean_val = np.mean(alpha_mask)
|
| 238 |
+
# if mean_val > 200:
|
| 239 |
+
# alpha_mask = 255 - alpha_mask
|
| 240 |
+
# print("[DEBUG] Inverted mask since background seemed white.")
|
| 241 |
+
|
| 242 |
+
# # ---- Threshold to get visible content ----
|
| 243 |
+
# _, binary = cv2.threshold(alpha_mask, threshold_value, 255, cv2.THRESH_BINARY)
|
| 244 |
+
# print("[DEBUG] Applied adaptive threshold.")
|
| 245 |
+
|
| 246 |
+
# # ---- Find contours ----
|
| 247 |
+
# contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 248 |
+
# print(f"[DEBUG] Found {len(contours)} contours in drawing.")
|
| 249 |
+
|
| 250 |
+
# svg_header = f'<svg xmlns="http://www.w3.org/2000/svg" width="{w}" height="{h}" viewBox="0 0 {w} {h}">'
|
| 251 |
+
# svg_paths = []
|
| 252 |
+
|
| 253 |
+
# for i, contour in enumerate(contours):
|
| 254 |
+
# contour = cv2.approxPolyDP(contour, simplify_tolerance, True)
|
| 255 |
+
# if contour.shape[0] < 3:
|
| 256 |
+
# continue
|
| 257 |
+
# path_data = "M " + " L ".join(f"{int(x)} {int(y)}" for x, y in contour[:, 0, :]) + " Z"
|
| 258 |
+
# svg_paths.append(f'<path d="{path_data}" fill="none" stroke="{stroke_color}" stroke-width="1"/>')
|
| 259 |
+
|
| 260 |
+
# svg_content = svg_header + "".join(svg_paths) + "</svg>"
|
| 261 |
+
# print(f"[DEBUG] Generated SVG with {len(svg_paths)} paths.")
|
| 262 |
+
|
| 263 |
+
# if len(svg_paths) == 0:
|
| 264 |
+
# print("[WARN] No visible content detected in the image.")
|
| 265 |
+
|
| 266 |
+
# return {
|
| 267 |
+
# "success": True,
|
| 268 |
+
# "svg": svg_content,
|
| 269 |
+
# "width": w,
|
| 270 |
+
# "height": h
|
| 271 |
+
# }
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
# def vectorize_image(input_raster_path: Path, output_svg_path: Path = None, options: Optional[Dict] = None) -> Dict:
|
| 275 |
+
# """
|
| 276 |
+
# Converts a raster (PNG/JPG) into a backgroundless SVG that represents visible shapes.
|
| 277 |
+
# Supports transparency, soft edges, grayscale drawings, and colored logos.
|
| 278 |
+
# """
|
| 279 |
+
# print("\n[DEBUG] π§© Starting vectorize_image()...")
|
| 280 |
+
# input_raster_path = Path(input_raster_path)
|
| 281 |
+
# if not input_raster_path.exists():
|
| 282 |
+
# return {"success": False, "error": "input file not found"}
|
| 283 |
+
|
| 284 |
+
# options = options or {}
|
| 285 |
+
# threshold_value = options.get("threshold", 180)
|
| 286 |
+
# simplify_tolerance = options.get("simplify_tolerance", 2.0)
|
| 287 |
+
# stroke_color = options.get("stroke_color", "black")
|
| 288 |
+
|
| 289 |
+
# print(f"[DEBUG] Reading image: {input_raster_path}")
|
| 290 |
+
# img = cv2.imread(str(input_raster_path), cv2.IMREAD_UNCHANGED)
|
| 291 |
+
# if img is None:
|
| 292 |
+
# return {"success": False, "error": "cannot read image"}
|
| 293 |
+
|
| 294 |
+
# h, w = img.shape[:2]
|
| 295 |
+
# print(f"[DEBUG] Image shape: {w}x{h}")
|
| 296 |
+
|
| 297 |
+
# # ---- Handle alpha channel for transparency ----
|
| 298 |
+
# if img.shape[2] == 4:
|
| 299 |
+
# b, g, r, a = cv2.split(img)
|
| 300 |
+
# alpha_mask = a
|
| 301 |
+
# print("[DEBUG] Image has alpha channel (transparency detected).")
|
| 302 |
+
# else:
|
| 303 |
+
# b, g, r = cv2.split(img)
|
| 304 |
+
# alpha_mask = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
| 305 |
+
# print("[DEBUG] No alpha channel, using grayscale as mask.")
|
| 306 |
+
|
| 307 |
+
# # Invert alpha if background is white
|
| 308 |
+
# mean_val = np.mean(alpha_mask)
|
| 309 |
+
# if mean_val > 200:
|
| 310 |
+
# alpha_mask = 255 - alpha_mask
|
| 311 |
+
# print("[DEBUG] Inverted mask since background seemed white.")
|
| 312 |
+
|
| 313 |
+
# # ---- Threshold to get visible content ----
|
| 314 |
+
# _, binary = cv2.threshold(alpha_mask, threshold_value, 255, cv2.THRESH_BINARY)
|
| 315 |
+
# print("[DEBUG] Applied adaptive threshold.")
|
| 316 |
+
|
| 317 |
+
# # ---- Find contours ----
|
| 318 |
+
# contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 319 |
+
# print(f"[DEBUG] Found {len(contours)} contours in drawing.")
|
| 320 |
+
|
| 321 |
+
# svg_header = f'<svg xmlns="http://www.w3.org/2000/svg" width="{w}" height="{h}" viewBox="0 0 {w} {h}">'
|
| 322 |
+
# svg_paths = []
|
| 323 |
+
|
| 324 |
+
# for i, contour in enumerate(contours):
|
| 325 |
+
# contour = cv2.approxPolyDP(contour, simplify_tolerance, True)
|
| 326 |
+
# if contour.shape[0] < 3:
|
| 327 |
+
# continue
|
| 328 |
+
|
| 329 |
+
# # ---- Sample mean color inside contour for fill ----
|
| 330 |
+
# mask = np.zeros((h, w), dtype=np.uint8)
|
| 331 |
+
# cv2.drawContours(mask, [contour], -1, 255, -1)
|
| 332 |
+
# mean_color = cv2.mean(img[:, :, :3], mask=mask)
|
| 333 |
+
# fill_color = f"rgb({int(mean_color[2])},{int(mean_color[1])},{int(mean_color[0])})" # RGB
|
| 334 |
+
|
| 335 |
+
# path_data = "M " + " L ".join(f"{int(x)} {int(y)}" for x, y in contour[:, 0, :]) + " Z"
|
| 336 |
+
# svg_paths.append(f'<path d="{path_data}" fill="{fill_color}" stroke="{stroke_color}" stroke-width="1"/>')
|
| 337 |
+
|
| 338 |
+
# svg_content = svg_header + "".join(svg_paths) + "</svg>"
|
| 339 |
+
# print(f"[DEBUG] Generated SVG with {len(svg_paths)} paths.")
|
| 340 |
+
|
| 341 |
+
# if len(svg_paths) == 0:
|
| 342 |
+
# print("[WARN] No visible content detected in the image.")
|
| 343 |
+
|
| 344 |
+
# # Save SVG if path provided
|
| 345 |
+
# if output_svg_path:
|
| 346 |
+
# Path(output_svg_path).write_text(svg_content)
|
| 347 |
+
# print(f"[DEBUG] SVG saved to: {output_svg_path}")
|
| 348 |
+
|
| 349 |
+
# return {
|
| 350 |
+
# "success": True,
|
| 351 |
+
# "svg": svg_content,
|
| 352 |
+
# "width": w,
|
| 353 |
+
# "height": h
|
| 354 |
+
# }
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
# def vectorize_image(input_raster_path: Path, output_svg_path: Path = None, options: Optional[Dict] = None) -> Dict:
|
| 358 |
+
# """
|
| 359 |
+
# Converts a raster (PNG/JPG) into an SVG that visually matches the original.
|
| 360 |
+
# Preserves colors, gradients, and transparency perfectly by embedding the image.
|
| 361 |
+
# """
|
| 362 |
+
# print("\n[DEBUG] π§© Starting vectorize_image() with full color preservation...")
|
| 363 |
+
# input_raster_path = Path(input_raster_path)
|
| 364 |
+
# if not input_raster_path.exists():
|
| 365 |
+
# return {"success": False, "error": "input file not found"}
|
| 366 |
+
|
| 367 |
+
# # Read image and encode as base64
|
| 368 |
+
# import base64
|
| 369 |
+
# import mimetypes
|
| 370 |
+
|
| 371 |
+
# mime_type, _ = mimetypes.guess_type(input_raster_path)
|
| 372 |
+
# if mime_type is None:
|
| 373 |
+
# mime_type = "image/png"
|
| 374 |
+
|
| 375 |
+
# with open(input_raster_path, "rb") as f:
|
| 376 |
+
# encoded = base64.b64encode(f.read()).decode("utf-8")
|
| 377 |
+
|
| 378 |
+
# # Read image size
|
| 379 |
+
# import cv2
|
| 380 |
+
# img = cv2.imread(str(input_raster_path), cv2.IMREAD_UNCHANGED)
|
| 381 |
+
# if img is None:
|
| 382 |
+
# return {"success": False, "error": "cannot read image"}
|
| 383 |
+
|
| 384 |
+
# h, w = img.shape[:2]
|
| 385 |
+
# print(f"[DEBUG] Image shape: {w}x{h}")
|
| 386 |
+
|
| 387 |
+
# # Construct SVG that embeds image as <image> tag
|
| 388 |
+
# svg_content = f'''<svg xmlns="http://www.w3.org/2000/svg"
|
| 389 |
+
# width="{w}" height="{h}" viewBox="0 0 {w} {h}" version="1.1">
|
| 390 |
+
# <image href="data:{mime_type};base64,{encoded}" width="{w}" height="{h}" />
|
| 391 |
+
# </svg>'''
|
| 392 |
+
|
| 393 |
+
# if output_svg_path:
|
| 394 |
+
# Path(output_svg_path).write_text(svg_content)
|
| 395 |
+
# print(f"[DEBUG] Saved SVG with embedded image to: {output_svg_path}")
|
| 396 |
+
|
| 397 |
+
# print("[DEBUG] β
Vectorization complete β colors and gradients preserved perfectly.")
|
| 398 |
+
# return {
|
| 399 |
+
# "success": True,
|
| 400 |
+
# "svg": svg_content,
|
| 401 |
+
# "width": w,
|
| 402 |
+
# "height": h
|
| 403 |
+
# }
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
# def vectorize_image(input_raster_path: Path, output_svg_path: Optional[Path] = None, options: Optional[Dict] = None) -> Dict:
|
| 408 |
+
# """
|
| 409 |
+
# Converts a raster image (PNG/JPG) into a visually perfect SVG.
|
| 410 |
+
# β
Preserves all colors, gradients, and transparency by embedding the raster as a base64 image.
|
| 411 |
+
# β‘ Output SVG is fully scalable and compatible with Manim or web rendering.
|
| 412 |
+
# """
|
| 413 |
+
# print("\n[DEBUG] π¨ Starting vectorize_image() β full visual fidelity mode")
|
| 414 |
+
|
| 415 |
+
# input_raster_path = Path(input_raster_path)
|
| 416 |
+
# if not input_raster_path.exists():
|
| 417 |
+
# return {"success": False, "error": "Input file not found"}
|
| 418 |
+
|
| 419 |
+
# # Detect MIME type
|
| 420 |
+
# mime_type, _ = mimetypes.guess_type(input_raster_path)
|
| 421 |
+
# if mime_type is None:
|
| 422 |
+
# mime_type = "image/png"
|
| 423 |
+
|
| 424 |
+
# # Read and encode image
|
| 425 |
+
# with open(input_raster_path, "rb") as f:
|
| 426 |
+
# encoded = base64.b64encode(f.read()).decode("utf-8")
|
| 427 |
+
|
| 428 |
+
# # Get image size
|
| 429 |
+
# img = cv2.imread(str(input_raster_path), cv2.IMREAD_UNCHANGED)
|
| 430 |
+
# if img is None:
|
| 431 |
+
# return {"success": False, "error": "Cannot read image"}
|
| 432 |
+
# h, w = img.shape[:2]
|
| 433 |
+
|
| 434 |
+
# print(f"[DEBUG] πΌοΈ Image dimensions: {w}x{h}")
|
| 435 |
+
|
| 436 |
+
# # High-quality embedded SVG
|
| 437 |
+
# svg_content = f"""<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
| 438 |
+
# <svg xmlns="http://www.w3.org/2000/svg"
|
| 439 |
+
# width="{w}px" height="{h}px"
|
| 440 |
+
# viewBox="0 0 {w} {h}"
|
| 441 |
+
# version="1.1"
|
| 442 |
+
# preserveAspectRatio="xMidYMid meet">
|
| 443 |
+
# <image width="{w}" height="{h}"
|
| 444 |
+
# href="data:{mime_type};base64,{encoded}"
|
| 445 |
+
# style="image-rendering: optimizeQuality;"
|
| 446 |
+
# preserveAspectRatio="xMidYMid meet"/>
|
| 447 |
+
# </svg>
|
| 448 |
+
# """
|
| 449 |
+
|
| 450 |
+
# if output_svg_path:
|
| 451 |
+
# Path(output_svg_path).write_text(svg_content, encoding="utf-8")
|
| 452 |
+
# print(f"[DEBUG] πΎ Saved high-fidelity SVG β {output_svg_path}")
|
| 453 |
+
|
| 454 |
+
# print("[DEBUG] β
Vectorization complete β gradients, alpha, and sharp edges preserved perfectly.")
|
| 455 |
+
# return {
|
| 456 |
+
# "success": True,
|
| 457 |
+
# "svg": svg_content,
|
| 458 |
+
# "width": w,
|
| 459 |
+
# "height": h
|
| 460 |
+
# }
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
# def vectorize_image(input_raster_path: Path, output_svg_path: Optional[Path] = None, options: Optional[Dict] = None) -> Dict:
|
| 464 |
+
# """
|
| 465 |
+
# Converts a raster image (PNG/JPG/PNG with alpha) into a near visually perfect vector-based SVG.
|
| 466 |
+
# β
Preserves colors, gradients, and transparency as much as possible.
|
| 467 |
+
# β‘ Output SVG is fully scalable and compatible with Manim or web rendering.
|
| 468 |
+
# """
|
| 469 |
+
# import cv2, numpy as np, mimetypes
|
| 470 |
+
# from pathlib import Path
|
| 471 |
+
# from sklearn.cluster import KMeans
|
| 472 |
+
|
| 473 |
+
# print("\n[DEBUG] π¨ Starting vectorize_image() β high-fidelity gradient-aware mode")
|
| 474 |
+
|
| 475 |
+
# input_raster_path = Path(input_raster_path)
|
| 476 |
+
# if not input_raster_path.exists():
|
| 477 |
+
# return {"success": False, "error": "Input file not found"}
|
| 478 |
+
|
| 479 |
+
# mime_type, _ = mimetypes.guess_type(input_raster_path)
|
| 480 |
+
# if mime_type is None:
|
| 481 |
+
# mime_type = "image/png"
|
| 482 |
+
|
| 483 |
+
# # Read with alpha preserved
|
| 484 |
+
# img = cv2.imread(str(input_raster_path), cv2.IMREAD_UNCHANGED)
|
| 485 |
+
# if img is None:
|
| 486 |
+
# return {"success": False, "error": "Cannot read image"}
|
| 487 |
+
|
| 488 |
+
# if img.shape[2] == 4:
|
| 489 |
+
# bgr, alpha = img[:, :, :3], img[:, :, 3]
|
| 490 |
+
# else:
|
| 491 |
+
# bgr, alpha = img, np.full(img.shape[:2], 255, dtype=np.uint8)
|
| 492 |
+
|
| 493 |
+
# h, w = bgr.shape[:2]
|
| 494 |
+
# print(f"[DEBUG] πΌοΈ Image dimensions: {w}x{h}")
|
| 495 |
+
|
| 496 |
+
# # ---------------------------
|
| 497 |
+
# # Step 1: Smart color clustering (using KMeans)
|
| 498 |
+
# # ---------------------------
|
| 499 |
+
# n_colors = (options or {}).get("colors", 24)
|
| 500 |
+
# print(f"[DEBUG] π¨ Using {n_colors} colors for smoother accuracy")
|
| 501 |
+
|
| 502 |
+
# data = bgr.reshape((-1, 3))
|
| 503 |
+
# kmeans = KMeans(n_clusters=n_colors, n_init=4, random_state=0).fit(data)
|
| 504 |
+
# labels = kmeans.labels_.reshape(h, w)
|
| 505 |
+
# centers = np.uint8(kmeans.cluster_centers_)
|
| 506 |
+
|
| 507 |
+
# # ---------------------------
|
| 508 |
+
# # Step 2: Build SVG Paths
|
| 509 |
+
# # ---------------------------
|
| 510 |
+
# print("[DEBUG] βοΈ Extracting color regions...")
|
| 511 |
+
|
| 512 |
+
# paths = []
|
| 513 |
+
# for idx, color in enumerate(centers):
|
| 514 |
+
# mask = (labels == idx).astype(np.uint8) * 255
|
| 515 |
+
# contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 516 |
+
# hex_color = "#{:02x}{:02x}{:02x}".format(*color)
|
| 517 |
+
# for cnt in contours:
|
| 518 |
+
# if len(cnt) > 3:
|
| 519 |
+
# d = "M " + " L ".join(f"{p[0][0]},{p[0][1]}" for p in cnt) + " Z"
|
| 520 |
+
# # Apply average alpha for this region
|
| 521 |
+
# region_alpha = np.mean(alpha[mask == 255]) / 255.0
|
| 522 |
+
# if region_alpha < 0.05: # fully transparent, skip
|
| 523 |
+
# continue
|
| 524 |
+
# fill_opacity = round(region_alpha, 3)
|
| 525 |
+
# paths.append((hex_color, fill_opacity, d))
|
| 526 |
+
|
| 527 |
+
# if not paths:
|
| 528 |
+
# return {"success": False, "error": "No visible regions found (maybe full transparency)"}
|
| 529 |
+
|
| 530 |
+
# print(f"[DEBUG] β
{len(paths)} color regions traced")
|
| 531 |
+
|
| 532 |
+
# # ---------------------------
|
| 533 |
+
# # Step 3: Gradient approximation
|
| 534 |
+
# # ---------------------------
|
| 535 |
+
# print("[DEBUG] π Estimating global color gradients...")
|
| 536 |
+
# dominant1, dominant2 = centers[0], centers[-1]
|
| 537 |
+
# grad_id = "grad_main"
|
| 538 |
+
# grad_def = f"""
|
| 539 |
+
# <defs>
|
| 540 |
+
# <linearGradient id="{grad_id}" x1="0%" y1="0%" x2="100%" y2="100%">
|
| 541 |
+
# <stop offset="0%" stop-color="#{dominant1[0]:02x}{dominant1[1]:02x}{dominant1[2]:02x}" />
|
| 542 |
+
# <stop offset="100%" stop-color="#{dominant2[0]:02x}{dominant2[1]:02x}{dominant2[2]:02x}" />
|
| 543 |
+
# </linearGradient>
|
| 544 |
+
# </defs>
|
| 545 |
+
# """
|
| 546 |
+
|
| 547 |
+
# # ---------------------------
|
| 548 |
+
# # Step 4: Assemble SVG
|
| 549 |
+
# # ---------------------------
|
| 550 |
+
# svg_content = [
|
| 551 |
+
# '<?xml version="1.0" encoding="UTF-8" standalone="no"?>',
|
| 552 |
+
# f'<svg xmlns="http://www.w3.org/2000/svg" width="{w}px" height="{h}px" viewBox="0 0 {w} {h}" version="1.1">'
|
| 553 |
+
# ]
|
| 554 |
+
# svg_content.append(grad_def)
|
| 555 |
+
# for color, opacity, d in paths:
|
| 556 |
+
# svg_content.append(f' <path d="{d}" fill="{color}" fill-opacity="{opacity}" stroke="none"/>')
|
| 557 |
+
# svg_content.append('</svg>')
|
| 558 |
+
|
| 559 |
+
# svg_content = "\n".join(svg_content)
|
| 560 |
+
|
| 561 |
+
# # Save SVG
|
| 562 |
+
# if output_svg_path:
|
| 563 |
+
# Path(output_svg_path).write_text(svg_content, encoding="utf-8")
|
| 564 |
+
# print(f"[DEBUG] πΎ Saved vectorized SVG β {output_svg_path}")
|
| 565 |
+
|
| 566 |
+
# print("[DEBUG] β
Vectorization complete β transparency, color, and gradient preserved.")
|
| 567 |
+
# return {
|
| 568 |
+
# "success": True,
|
| 569 |
+
# "svg": svg_content,
|
| 570 |
+
# "width": w,
|
| 571 |
+
# "height": h
|
| 572 |
+
# }
|
| 573 |
+
|
| 574 |
+
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
# def vectorize_image(input_raster_path: Path, options: Optional[Dict] = None) -> Dict:
|
| 578 |
+
# """
|
| 579 |
+
# Converts a raster image (PNG/JPG/WEBP) into a high-quality vector SVG using PyVTracer.
|
| 580 |
+
# β
Uses only PyVTracer
|
| 581 |
+
# β
Returns SVG directly in memory
|
| 582 |
+
# β
Fixes all int/float/bool/string conversion issues
|
| 583 |
+
# """
|
| 584 |
+
# print("\n[DEBUG] π¨ Starting vectorize_image() β final type-safe configuration")
|
| 585 |
+
|
| 586 |
+
# input_raster_path = Path(input_raster_path)
|
| 587 |
+
# if not input_raster_path.exists():
|
| 588 |
+
# return {"success": False, "error": "Input file not found"}
|
| 589 |
+
|
| 590 |
+
# opts = options or {}
|
| 591 |
+
|
| 592 |
+
# try:
|
| 593 |
+
# with Image.open(input_raster_path) as img:
|
| 594 |
+
# w, h = img.size
|
| 595 |
+
|
| 596 |
+
# tracer = pyvtracer.Vtracer()
|
| 597 |
+
|
| 598 |
+
# # β
Correct, final parameter typing
|
| 599 |
+
# tracer.input_path = str(input_raster_path)
|
| 600 |
+
# tracer.color_mode = str(opts.get("color_mode", "color"))
|
| 601 |
+
# tracer.filter_speckle = int(opts.get("filter_speckle", 2))
|
| 602 |
+
# tracer.corner_threshold = int(opts.get("corner_threshold", 60))
|
| 603 |
+
# tracer.color_precision = int(opts.get("color_precision", 10))
|
| 604 |
+
# tracer.layer_difference = int(opts.get("layer_difference", 16))
|
| 605 |
+
# tracer.path_precision = int(opts.get("path_precision", 2))
|
| 606 |
+
# tracer.length_threshold = int(opts.get("length_threshold", 4))
|
| 607 |
+
# tracer.splice_threshold = int(opts.get("splice_threshold", 45))
|
| 608 |
+
# tracer.hierarchical = "true" if opts.get("hierarchical", True) else "false"
|
| 609 |
+
# tracer.max_iterations = int(opts.get("max_iterations", 10))
|
| 610 |
+
# tracer.path_simplify_mode = int(opts.get("path_simplify_mode", 0))
|
| 611 |
+
|
| 612 |
+
# print("[DEBUG] β
All parameters properly typed (int/str).")
|
| 613 |
+
# print(f"[DEBUG] π Vectorizing: {input_raster_path.name}")
|
| 614 |
+
|
| 615 |
+
# # π Get SVG string directly
|
| 616 |
+
# if hasattr(tracer, "to_svg_string"):
|
| 617 |
+
# svg_content = tracer.to_svg_string()
|
| 618 |
+
# else:
|
| 619 |
+
# tmp_svg = Path(tempfile.gettempdir()) / f"{input_raster_path.stem}_vtrace.svg"
|
| 620 |
+
# tracer.output_path = str(tmp_svg)
|
| 621 |
+
# tracer.to_svg()
|
| 622 |
+
# svg_content = tmp_svg.read_text(encoding="utf-8")
|
| 623 |
+
# tmp_svg.unlink(missing_ok=True)
|
| 624 |
+
|
| 625 |
+
# print("[DEBUG] β
Vectorization complete β SVG generated in memory.")
|
| 626 |
+
# return {
|
| 627 |
+
# "success": True,
|
| 628 |
+
# "svg": svg_content,
|
| 629 |
+
# "width": w,
|
| 630 |
+
# "height": h
|
| 631 |
+
# }
|
| 632 |
+
|
| 633 |
+
# except Exception as e:
|
| 634 |
+
# print(f"β [ERROR] PyVTracer failed: {e}")
|
| 635 |
+
# return {"success": False, "error": f"PyVTracer failed: {e}"}
|
| 636 |
+
|
| 637 |
+
|
| 638 |
+
|
| 639 |
+
# from pathlib import Path
|
| 640 |
+
# from typing import Optional, Dict, Any
|
| 641 |
+
# from PIL import Image
|
| 642 |
+
# import tempfile
|
| 643 |
+
|
| 644 |
+
# # prefer the official vtracer binding from PyPI
|
| 645 |
+
# import vtracer
|
| 646 |
+
|
| 647 |
+
# # mapping and sane defaults (names & types follow vtracer docs)
|
| 648 |
+
# DEFAULTS: Dict[str, Any] = {
|
| 649 |
+
# "colormode": "color", # "color" or "binary"
|
| 650 |
+
# "hierarchical": "stacked", # "stacked" or "cutout"
|
| 651 |
+
# "mode": "spline", # "spline", "polygon", or "none"
|
| 652 |
+
# "filter_speckle": 2, # int
|
| 653 |
+
# "color_precision": 14, # int
|
| 654 |
+
# "layer_difference": 6, # int (gradient step)
|
| 655 |
+
# "corner_threshold": 50, # int
|
| 656 |
+
# "length_threshold": 3.5, # float (in [3.5, 10])
|
| 657 |
+
# "max_iterations": 10, # int
|
| 658 |
+
# "splice_threshold": 40, # int
|
| 659 |
+
# "path_precision": 10 # int (path digits/precision)
|
| 660 |
+
# }
|
| 661 |
+
|
| 662 |
+
# def _normalize_options(opts: Optional[Dict]) -> Dict:
|
| 663 |
+
# """
|
| 664 |
+
# Keep only acceptable keys with correct types according to official docs.
|
| 665 |
+
# """
|
| 666 |
+
# opts = opts or {}
|
| 667 |
+
# out: Dict[str, Any] = {}
|
| 668 |
+
|
| 669 |
+
# # strings (colormode, hierarchical, mode)
|
| 670 |
+
# out["colormode"] = str(opts.get("colormode", DEFAULTS["colormode"]))
|
| 671 |
+
# out["hierarchical"] = str(opts.get("hierarchical", DEFAULTS["hierarchical"]))
|
| 672 |
+
# out["mode"] = str(opts.get("mode", DEFAULTS["mode"]))
|
| 673 |
+
|
| 674 |
+
# # integer parameters
|
| 675 |
+
# for k in ("filter_speckle", "color_precision", "layer_difference",
|
| 676 |
+
# "corner_threshold", "max_iterations", "splice_threshold",
|
| 677 |
+
# "path_precision"):
|
| 678 |
+
# out[k] = int(opts.get(k, DEFAULTS[k]))
|
| 679 |
+
|
| 680 |
+
# # float parameter
|
| 681 |
+
# out["length_threshold"] = float(opts.get("length_threshold", DEFAULTS["length_threshold"]))
|
| 682 |
+
|
| 683 |
+
# return out
|
| 684 |
+
|
| 685 |
+
# def vectorize_image(input_raster_path: Path, options: Optional[Dict] = None) -> Dict:
|
| 686 |
+
# """
|
| 687 |
+
# Vectorize using the official vtracer binding.
|
| 688 |
+
|
| 689 |
+
# Returns:
|
| 690 |
+
# {
|
| 691 |
+
# "success": True,
|
| 692 |
+
# "svg": "<svg ...>",
|
| 693 |
+
# "width": w,
|
| 694 |
+
# "height": h
|
| 695 |
+
# }
|
| 696 |
+
# On error:
|
| 697 |
+
# {"success": False, "error": "message"}
|
| 698 |
+
# """
|
| 699 |
+
# input_raster_path = Path(input_raster_path)
|
| 700 |
+
# if not input_raster_path.exists():
|
| 701 |
+
# return {"success": False, "error": "Input file not found"}
|
| 702 |
+
|
| 703 |
+
# opts = _normalize_options(options)
|
| 704 |
+
|
| 705 |
+
# try:
|
| 706 |
+
# # read width/height
|
| 707 |
+
# with Image.open(input_raster_path) as img:
|
| 708 |
+
# w, h = img.size
|
| 709 |
+
# # convert to bytes for the raw API if needed
|
| 710 |
+
# img_bytes_io = None
|
| 711 |
+
# try:
|
| 712 |
+
# # ensure a common in-memory format, keep original mode if possible
|
| 713 |
+
# img_format = img.format or "PNG"
|
| 714 |
+
# img_bytes_io = tempfile.SpooledTemporaryFile() # small-memory friendly
|
| 715 |
+
# img.save(img_bytes_io, format=img_format)
|
| 716 |
+
# img_bytes_io.seek(0)
|
| 717 |
+
# raw_bytes = img_bytes_io.read()
|
| 718 |
+
# finally:
|
| 719 |
+
# if img_bytes_io is not None:
|
| 720 |
+
# img_bytes_io.close()
|
| 721 |
+
|
| 722 |
+
# # Prefer in-memory API: convert_raw_image_to_svg(bytes, img_format=...)
|
| 723 |
+
# if hasattr(vtracer, "convert_raw_image_to_svg"):
|
| 724 |
+
# # vtracer expects bytes and an image format string like 'png' or 'jpg'
|
| 725 |
+
# img_format_lower = (Image.open(input_raster_path).format or "PNG").lower()
|
| 726 |
+
# svg_str = vtracer.convert_raw_image_to_svg(raw_bytes, img_format=img_format_lower, **opts)
|
| 727 |
+
# elif hasattr(vtracer, "convert_pixels_to_svg"):
|
| 728 |
+
# # alternative: convert pixels to svg β slower for large images
|
| 729 |
+
# img = Image.open(input_raster_path).convert("RGBA")
|
| 730 |
+
# pixels = list(img.getdata())
|
| 731 |
+
# svg_str = vtracer.convert_pixels_to_svg(pixels, img.width, img.height, **opts)
|
| 732 |
+
# else:
|
| 733 |
+
# # last resort: call convert_image_to_svg_py which writes to file; read & delete the file
|
| 734 |
+
# tmp_svg = Path(tempfile.gettempdir()) / f"{input_raster_path.stem}_vtrace_temp.svg"
|
| 735 |
+
# # convert_image_to_svg_py(inp, out, **kwargs) - writes file
|
| 736 |
+
# vtracer.convert_image_to_svg_py(str(input_raster_path), str(tmp_svg), **opts)
|
| 737 |
+
# svg_str = tmp_svg.read_text(encoding="utf-8")
|
| 738 |
+
# try:
|
| 739 |
+
# tmp_svg.unlink()
|
| 740 |
+
# except Exception:
|
| 741 |
+
# pass
|
| 742 |
+
|
| 743 |
+
# return {"success": True, "svg": svg_str, "width": w, "height": h}
|
| 744 |
+
|
| 745 |
+
# except Exception as e:
|
| 746 |
+
# # return full error message so you can debug inside logs
|
| 747 |
+
# return {"success": False, "error": f"VTracer failed: {e}"}
|
| 748 |
+
|
| 749 |
+
|
| 750 |
+
|
| 751 |
+
from pathlib import Path
|
| 752 |
+
from typing import Optional, Dict, Any
|
| 753 |
+
from PIL import Image, ImageFilter
|
| 754 |
+
import tempfile
|
| 755 |
+
import vtracer
|
| 756 |
+
|
| 757 |
+
# DEFAULTS: Dict[str, Any] = {
|
| 758 |
+
# "colormode": "color",
|
| 759 |
+
# "hierarchical": "stacked",
|
| 760 |
+
# "mode": "spline",
|
| 761 |
+
# "filter_speckle": 2,
|
| 762 |
+
# "color_precision": 14,
|
| 763 |
+
# "layer_difference": 2,
|
| 764 |
+
# "corner_threshold": 50,
|
| 765 |
+
# "length_threshold": 3.5,
|
| 766 |
+
# "max_iterations": 10,
|
| 767 |
+
# "splice_threshold": 40,
|
| 768 |
+
# "path_precision": 10
|
| 769 |
+
# }
|
| 770 |
+
|
| 771 |
+
|
| 772 |
+
|
| 773 |
+
# pro
|
| 774 |
+
# DEFAULTS_PAID: Dict[str, Any] = {
|
| 775 |
+
# "colormode": "color",
|
| 776 |
+
# "hierarchical": "stacked",
|
| 777 |
+
# "mode": "spline", # sharpest edges and curves
|
| 778 |
+
# "filter_speckle": 0, # keep all tiny speckles
|
| 779 |
+
# "color_precision": 256, # extremely high color accuracy
|
| 780 |
+
# "layer_difference": 8, # very fine gradient layers
|
| 781 |
+
# "corner_threshold": 100, # preserve almost all corners
|
| 782 |
+
# "length_threshold": 0.1, # keep even the tiniest paths
|
| 783 |
+
# "max_iterations": 500, # thorough path optimization
|
| 784 |
+
# "splice_threshold": 100, # merge paths very carefully
|
| 785 |
+
# "path_precision": 128 # ultra-smooth curves and clean edges
|
| 786 |
+
# }
|
| 787 |
+
|
| 788 |
+
DEFAULTS: Dict[str, Any] = {
|
| 789 |
+
"colormode": "color",
|
| 790 |
+
"hierarchical": "stacked",
|
| 791 |
+
"mode": "curve", # sharper edges
|
| 792 |
+
"filter_speckle": 1, # minimal removal
|
| 793 |
+
"color_precision": 20, # high color accuracy
|
| 794 |
+
"layer_difference": 10, # finer gradient layers
|
| 795 |
+
"corner_threshold": 35, # preserve corners
|
| 796 |
+
"length_threshold": 1.0, # more detail
|
| 797 |
+
"max_iterations": 10,
|
| 798 |
+
"splice_threshold": 40,
|
| 799 |
+
"path_precision": 16 # smoother curves and clean edges
|
| 800 |
+
}
|
| 801 |
+
def _normalize_options(opts: Optional[Dict]) -> Dict[str, Any]:
|
| 802 |
+
opts = opts or {}
|
| 803 |
+
out: Dict[str, Any] = {}
|
| 804 |
+
out["colormode"] = str(opts.get("colormode", DEFAULTS["colormode"]))
|
| 805 |
+
out["hierarchical"] = str(opts.get("hierarchical", DEFAULTS["hierarchical"]))
|
| 806 |
+
out["mode"] = str(opts.get("mode", DEFAULTS["mode"]))
|
| 807 |
+
for k in ("filter_speckle", "color_precision", "layer_difference",
|
| 808 |
+
"corner_threshold", "max_iterations", "splice_threshold",
|
| 809 |
+
"path_precision"):
|
| 810 |
+
out[k] = int(opts.get(k, DEFAULTS[k]))
|
| 811 |
+
out["length_threshold"] = float(opts.get("length_threshold", DEFAULTS["length_threshold"]))
|
| 812 |
+
return out
|
| 813 |
+
|
| 814 |
+
def vectorize_image(input_raster_path: Path, options: Optional[Dict] = None,
|
| 815 |
+
preprocess: bool = False) -> Dict[str, Any]:
|
| 816 |
+
input_raster_path = Path(input_raster_path)
|
| 817 |
+
if not input_raster_path.exists():
|
| 818 |
+
return {"success": False, "error": "Input file not found"}
|
| 819 |
+
|
| 820 |
+
opts = _normalize_options(options)
|
| 821 |
+
|
| 822 |
+
try:
|
| 823 |
+
with Image.open(input_raster_path) as img:
|
| 824 |
+
# preserve original mode
|
| 825 |
+
orig_mode = img.mode
|
| 826 |
+
w, h = img.size
|
| 827 |
+
|
| 828 |
+
if preprocess:
|
| 829 |
+
# optional: add slight blur or noise to reduce banding in gradients
|
| 830 |
+
img = img.convert("RGB")
|
| 831 |
+
img = img.filter(ImageFilter.GaussianBlur(radius=0.8))
|
| 832 |
+
# you could add noise here if needed
|
| 833 |
+
|
| 834 |
+
img_format = img.format or "PNG"
|
| 835 |
+
bytes_io = tempfile.SpooledTemporaryFile()
|
| 836 |
+
img.save(bytes_io, format=img_format)
|
| 837 |
+
bytes_io.seek(0)
|
| 838 |
+
raw_bytes = bytes_io.read()
|
| 839 |
+
bytes_io.close()
|
| 840 |
+
|
| 841 |
+
# Use in-memory API if available
|
| 842 |
+
if hasattr(vtracer, "convert_raw_image_to_svg"):
|
| 843 |
+
format_lower = img_format.lower()
|
| 844 |
+
svg_str = vtracer.convert_raw_image_to_svg(raw_bytes, img_format=format_lower, **opts)
|
| 845 |
+
elif hasattr(vtracer, "convert_pixels_to_svg"):
|
| 846 |
+
with Image.open(input_raster_path) as img2:
|
| 847 |
+
img2 = img2.convert("RGBA")
|
| 848 |
+
pixels = list(img2.getdata())
|
| 849 |
+
svg_str = vtracer.convert_pixels_to_svg(pixels, img2.width, img2.height, **opts)
|
| 850 |
+
else:
|
| 851 |
+
tmp_svg = Path(tempfile.gettempdir()) / f"{input_raster_path.stem}_vtrace_temp.svg"
|
| 852 |
+
vtracer.convert_image_to_svg_py(str(input_raster_path), str(tmp_svg), **opts)
|
| 853 |
+
svg_str = tmp_svg.read_text(encoding="utf-8")
|
| 854 |
+
try:
|
| 855 |
+
tmp_svg.unlink()
|
| 856 |
+
except Exception:
|
| 857 |
+
pass
|
| 858 |
+
|
| 859 |
+
return {
|
| 860 |
+
"success": True,
|
| 861 |
+
"svg": svg_str,
|
| 862 |
+
"width": w,
|
| 863 |
+
"height": h,
|
| 864 |
+
"mode": orig_mode
|
| 865 |
+
}
|
| 866 |
+
|
| 867 |
+
except Exception as e:
|
| 868 |
+
return {"success": False, "error": f"VTracer failed: {e}"}
|
requirements.txt
ADDED
|
File without changes
|
task_queue.py
ADDED
|
@@ -0,0 +1,399 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# # queue.py
|
| 2 |
+
# import os
|
| 3 |
+
# import time
|
| 4 |
+
# import pickle
|
| 5 |
+
# import asyncio
|
| 6 |
+
# import nest_asyncio
|
| 7 |
+
# from pathlib import Path
|
| 8 |
+
# from enum import Enum
|
| 9 |
+
# from collections import deque
|
| 10 |
+
# from concurrent.futures import ThreadPoolExecutor
|
| 11 |
+
# from typing import Callable, Optional
|
| 12 |
+
|
| 13 |
+
# # allow nested event loops in some environments
|
| 14 |
+
# nest_asyncio.apply()
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# class TaskStatus(Enum):
|
| 18 |
+
# QUEUED = "queued"
|
| 19 |
+
# RUNNING = "running"
|
| 20 |
+
# COMPLETED = "completed"
|
| 21 |
+
# FAILED = "failed"
|
| 22 |
+
# CANCELLED = "cancelled"
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
# class TaskQueue:
|
| 26 |
+
# """
|
| 27 |
+
# File-backed persistent queue that stores tasks as metadata dicts.
|
| 28 |
+
# Uses ThreadPoolExecutor to run CPU-bound pipeline in worker threads.
|
| 29 |
+
# """
|
| 30 |
+
# def __init__(self, base_dir: Path | str, max_workers: int = 2):
|
| 31 |
+
# self.base_dir = Path(base_dir)
|
| 32 |
+
# self.base_dir.mkdir(parents=True, exist_ok=True)
|
| 33 |
+
# self._queue_file = self.base_dir / "queue.pkl"
|
| 34 |
+
# self._state_file = self.base_dir / "state.pkl"
|
| 35 |
+
|
| 36 |
+
# self._dq = deque() # stores metadata dicts
|
| 37 |
+
# self._tasks = {} # task_id -> metadata
|
| 38 |
+
# self._statuses = {} # task_id -> TaskStatus
|
| 39 |
+
# self._lock = asyncio.Lock()
|
| 40 |
+
|
| 41 |
+
# self._executor = ThreadPoolExecutor(max_workers=max_workers)
|
| 42 |
+
# self._worker_task: Optional[asyncio.Task] = None
|
| 43 |
+
# self._shutdown = False
|
| 44 |
+
# self._processor: Optional[Callable] = None # sync function to run per task
|
| 45 |
+
|
| 46 |
+
# self._load_state()
|
| 47 |
+
|
| 48 |
+
# # ------------------------
|
| 49 |
+
# # persistence
|
| 50 |
+
# # ------------------------
|
| 51 |
+
# def _save_state(self):
|
| 52 |
+
# try:
|
| 53 |
+
# tmp = self._state_file.with_suffix(".tmp")
|
| 54 |
+
# with tmp.open("wb") as f:
|
| 55 |
+
# pickle.dump({
|
| 56 |
+
# "queue": list(self._dq),
|
| 57 |
+
# "tasks": self._tasks,
|
| 58 |
+
# "statuses": self._statuses,
|
| 59 |
+
# }, f)
|
| 60 |
+
# tmp.replace(self._state_file)
|
| 61 |
+
# except Exception:
|
| 62 |
+
# # do not crash app on disk save error; log in real app
|
| 63 |
+
# pass
|
| 64 |
+
|
| 65 |
+
# def _load_state(self):
|
| 66 |
+
# if self._state_file.exists():
|
| 67 |
+
# try:
|
| 68 |
+
# with self._state_file.open("rb") as f:
|
| 69 |
+
# data = pickle.load(f)
|
| 70 |
+
# for item in data.get("queue", []):
|
| 71 |
+
# self._dq.append(item)
|
| 72 |
+
# self._tasks.update(data.get("tasks", {}))
|
| 73 |
+
# self._statuses.update(data.get("statuses", {}))
|
| 74 |
+
# except Exception:
|
| 75 |
+
# # if corrupted, start fresh
|
| 76 |
+
# self._dq = deque()
|
| 77 |
+
# self._tasks = {}
|
| 78 |
+
# self._statuses = {}
|
| 79 |
+
|
| 80 |
+
# # ------------------------
|
| 81 |
+
# # public API
|
| 82 |
+
# # ------------------------
|
| 83 |
+
# def enqueue(self, task_meta: dict):
|
| 84 |
+
# task_id = task_meta.get("task_id")
|
| 85 |
+
# if not task_id:
|
| 86 |
+
# raise ValueError("task_meta must contain 'task_id'")
|
| 87 |
+
|
| 88 |
+
# self._dq.append(task_meta)
|
| 89 |
+
# self._tasks[task_id] = task_meta
|
| 90 |
+
# self._statuses[task_id] = TaskStatus.QUEUED
|
| 91 |
+
# self._save_state()
|
| 92 |
+
|
| 93 |
+
# def get_status(self, task_id: str):
|
| 94 |
+
# return self._statuses.get(task_id)
|
| 95 |
+
|
| 96 |
+
# def get_task_info(self, task_id: str):
|
| 97 |
+
# return self._tasks.get(task_id)
|
| 98 |
+
|
| 99 |
+
# def remove_task(self, task_id: str):
|
| 100 |
+
# # Remove from tasks and statuses; queue items will be filtered by worker
|
| 101 |
+
# self._tasks.pop(task_id, None)
|
| 102 |
+
# self._statuses.pop(task_id, None)
|
| 103 |
+
# self._save_state()
|
| 104 |
+
|
| 105 |
+
# # ------------------------
|
| 106 |
+
# # lifecycle
|
| 107 |
+
# # ------------------------
|
| 108 |
+
# async def start(self, processor: Callable):
|
| 109 |
+
# """Start the background worker loop. processor should be a sync function accept task_meta."""
|
| 110 |
+
# if self._worker_task:
|
| 111 |
+
# return
|
| 112 |
+
# self._processor = processor
|
| 113 |
+
# self._shutdown = False
|
| 114 |
+
# loop = asyncio.get_event_loop()
|
| 115 |
+
# self._worker_task = loop.create_task(self._worker_loop())
|
| 116 |
+
|
| 117 |
+
# async def stop(self):
|
| 118 |
+
# self._shutdown = True
|
| 119 |
+
# if self._worker_task:
|
| 120 |
+
# await self._worker_task
|
| 121 |
+
# self._worker_task = None
|
| 122 |
+
# self._executor.shutdown(wait=True)
|
| 123 |
+
# self._save_state()
|
| 124 |
+
|
| 125 |
+
# # ------------------------
|
| 126 |
+
# # worker loop
|
| 127 |
+
# # ------------------------
|
| 128 |
+
# async def _worker_loop(self):
|
| 129 |
+
# logger.info("π Worker loop started")
|
| 130 |
+
# while not self._shutdown:
|
| 131 |
+
# if not self._dq:
|
| 132 |
+
# await asyncio.sleep(0.5)
|
| 133 |
+
# continue
|
| 134 |
+
|
| 135 |
+
# task_meta = self._dq.popleft()
|
| 136 |
+
# task_id = task_meta.get("task_id")
|
| 137 |
+
# logger.info(f"Processing task {task_id}")
|
| 138 |
+
|
| 139 |
+
# try:
|
| 140 |
+
# self._statuses[task_id] = TaskStatus.RUNNING
|
| 141 |
+
# self._save_state()
|
| 142 |
+
|
| 143 |
+
# loop = asyncio.get_event_loop()
|
| 144 |
+
# logger.debug(f"Running processor for task {task_id}")
|
| 145 |
+
# future = loop.run_in_executor(
|
| 146 |
+
# self._executor,
|
| 147 |
+
# self._run_processor_safe,
|
| 148 |
+
# task_meta
|
| 149 |
+
# )
|
| 150 |
+
# result = await future
|
| 151 |
+
# logger.debug(f"Processor result: {result}")
|
| 152 |
+
|
| 153 |
+
# if isinstance(result, dict) and result.get("success"):
|
| 154 |
+
# self._statuses[task_id] = TaskStatus.COMPLETED
|
| 155 |
+
# self._tasks[task_id].update({
|
| 156 |
+
# "output_path": result.get("output_path"),
|
| 157 |
+
# "output_bytes": result.get("output_bytes")
|
| 158 |
+
# })
|
| 159 |
+
# logger.info(f"Task {task_id} completed successfully")
|
| 160 |
+
# else:
|
| 161 |
+
# self._statuses[task_id] = TaskStatus.FAILED
|
| 162 |
+
# logger.error(f"Task {task_id} failed: {result}")
|
| 163 |
+
|
| 164 |
+
# self._save_state()
|
| 165 |
+
# except Exception as e:
|
| 166 |
+
# logger.exception(f"Error processing task {task_id}")
|
| 167 |
+
# self._statuses[task_id] = TaskStatus.FAILED
|
| 168 |
+
# self._save_state()
|
| 169 |
+
|
| 170 |
+
# await asyncio.sleep(0.1)
|
| 171 |
+
|
| 172 |
+
# logger.info("π Worker loop exiting cleanly.")
|
| 173 |
+
|
| 174 |
+
# def _run_processor_safe(self, task_meta: dict) -> dict:
|
| 175 |
+
# try:
|
| 176 |
+
# if not self._processor:
|
| 177 |
+
# logger.error("β No processor configured, cannot run task.")
|
| 178 |
+
# return {"success": False, "error": "No processor configured"}
|
| 179 |
+
|
| 180 |
+
# task_id = task_meta.get("task_id")
|
| 181 |
+
# logger.debug(f"π§ Running processor for task {task_id}...")
|
| 182 |
+
# result = self._processor(task_meta)
|
| 183 |
+
# logger.debug(f"π― Processor result for {task_id}: {result}")
|
| 184 |
+
# return result or {"success": False}
|
| 185 |
+
# except Exception as e:
|
| 186 |
+
# logger.exception(f"π₯ Processor crashed for task {task_meta.get('task_id')}: {e}")
|
| 187 |
+
# return {"success": False, "error": str(e)}
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
# queue.py
|
| 193 |
+
import os
|
| 194 |
+
import time
|
| 195 |
+
import pickle
|
| 196 |
+
import asyncio
|
| 197 |
+
import nest_asyncio
|
| 198 |
+
from pathlib import Path
|
| 199 |
+
from enum import Enum
|
| 200 |
+
from collections import deque
|
| 201 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 202 |
+
from typing import Callable, Optional
|
| 203 |
+
|
| 204 |
+
# allow nested event loops in some environments
|
| 205 |
+
nest_asyncio.apply()
|
| 206 |
+
|
| 207 |
+
# --------------------------------------------------
|
| 208 |
+
# Logging setup
|
| 209 |
+
# --------------------------------------------------
|
| 210 |
+
import logging
|
| 211 |
+
|
| 212 |
+
logging.basicConfig(
|
| 213 |
+
level=logging.DEBUG,
|
| 214 |
+
format="π§© [%(asctime)s] [%(levelname)s] %(message)s",
|
| 215 |
+
datefmt="%H:%M:%S",
|
| 216 |
+
)
|
| 217 |
+
logger = logging.getLogger("queue_system")
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
class TaskStatus(Enum):
|
| 221 |
+
QUEUED = "queued"
|
| 222 |
+
RUNNING = "running"
|
| 223 |
+
COMPLETED = "completed"
|
| 224 |
+
FAILED = "failed"
|
| 225 |
+
CANCELLED = "cancelled"
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
class TaskQueue:
|
| 229 |
+
"""
|
| 230 |
+
File-backed persistent queue that stores tasks as metadata dicts.
|
| 231 |
+
Uses ThreadPoolExecutor to run CPU-bound pipeline in worker threads.
|
| 232 |
+
"""
|
| 233 |
+
def __init__(self, base_dir: Path | str, max_workers: int = 2):
|
| 234 |
+
self.base_dir = Path(base_dir)
|
| 235 |
+
self.base_dir.mkdir(parents=True, exist_ok=True)
|
| 236 |
+
self._queue_file = self.base_dir / "queue.pkl"
|
| 237 |
+
self._state_file = self.base_dir / "state.pkl"
|
| 238 |
+
|
| 239 |
+
self._dq = deque()
|
| 240 |
+
self._tasks = {}
|
| 241 |
+
self._statuses = {}
|
| 242 |
+
self._lock = asyncio.Lock()
|
| 243 |
+
|
| 244 |
+
self._executor = ThreadPoolExecutor(max_workers=max_workers)
|
| 245 |
+
self._worker_task: Optional[asyncio.Task] = None
|
| 246 |
+
self._shutdown = False
|
| 247 |
+
self._processor: Optional[Callable] = None
|
| 248 |
+
|
| 249 |
+
logger.info(f"π TaskQueue initialized | base_dir={self.base_dir} | workers={max_workers}")
|
| 250 |
+
self._load_state()
|
| 251 |
+
|
| 252 |
+
# ------------------------
|
| 253 |
+
# persistence
|
| 254 |
+
# ------------------------
|
| 255 |
+
def _save_state(self):
|
| 256 |
+
try:
|
| 257 |
+
tmp = self._state_file.with_suffix(".tmp")
|
| 258 |
+
with tmp.open("wb") as f:
|
| 259 |
+
pickle.dump({
|
| 260 |
+
"queue": list(self._dq),
|
| 261 |
+
"tasks": self._tasks,
|
| 262 |
+
"statuses": self._statuses,
|
| 263 |
+
}, f)
|
| 264 |
+
tmp.replace(self._state_file)
|
| 265 |
+
logger.debug(f"πΎ Queue state saved | queued={len(self._dq)} tasks")
|
| 266 |
+
except Exception as e:
|
| 267 |
+
logger.warning(f"β οΈ Failed to save state: {e}")
|
| 268 |
+
|
| 269 |
+
def _load_state(self):
|
| 270 |
+
if self._state_file.exists():
|
| 271 |
+
try:
|
| 272 |
+
with self._state_file.open("rb") as f:
|
| 273 |
+
data = pickle.load(f)
|
| 274 |
+
for item in data.get("queue", []):
|
| 275 |
+
self._dq.append(item)
|
| 276 |
+
self._tasks.update(data.get("tasks", {}))
|
| 277 |
+
self._statuses.update(data.get("statuses", {}))
|
| 278 |
+
logger.info(f"π Loaded previous queue state | tasks={len(self._tasks)}")
|
| 279 |
+
except Exception as e:
|
| 280 |
+
logger.error(f"β Failed to load state file, starting fresh: {e}")
|
| 281 |
+
self._dq = deque()
|
| 282 |
+
self._tasks = {}
|
| 283 |
+
self._statuses = {}
|
| 284 |
+
|
| 285 |
+
# ------------------------
|
| 286 |
+
# public API
|
| 287 |
+
# ------------------------
|
| 288 |
+
def enqueue(self, task_meta: dict):
|
| 289 |
+
task_id = task_meta.get("task_id")
|
| 290 |
+
if not task_id:
|
| 291 |
+
raise ValueError("task_meta must contain 'task_id'")
|
| 292 |
+
self._dq.append(task_meta)
|
| 293 |
+
self._tasks[task_id] = task_meta
|
| 294 |
+
self._statuses[task_id] = TaskStatus.QUEUED
|
| 295 |
+
self._save_state()
|
| 296 |
+
logger.info(f"π Task enqueued | id={task_id} | total_queued={len(self._dq)}")
|
| 297 |
+
|
| 298 |
+
def get_status(self, task_id: str):
|
| 299 |
+
st = self._statuses.get(task_id)
|
| 300 |
+
logger.debug(f"π get_status({task_id}) β {st}")
|
| 301 |
+
return st
|
| 302 |
+
|
| 303 |
+
def get_task_info(self, task_id: str):
|
| 304 |
+
info = self._tasks.get(task_id)
|
| 305 |
+
logger.debug(f"βΉοΈ get_task_info({task_id}) β {'found' if info else 'not found'}")
|
| 306 |
+
return info
|
| 307 |
+
|
| 308 |
+
def remove_task(self, task_id: str):
|
| 309 |
+
self._tasks.pop(task_id, None)
|
| 310 |
+
self._statuses.pop(task_id, None)
|
| 311 |
+
self._save_state()
|
| 312 |
+
logger.info(f"π Task removed from system | id={task_id}")
|
| 313 |
+
|
| 314 |
+
# ------------------------
|
| 315 |
+
# lifecycle
|
| 316 |
+
# ------------------------
|
| 317 |
+
async def start(self, processor: Callable):
|
| 318 |
+
if self._worker_task:
|
| 319 |
+
logger.warning("β οΈ Queue worker already running, ignoring start request.")
|
| 320 |
+
return
|
| 321 |
+
self._processor = processor
|
| 322 |
+
self._shutdown = False
|
| 323 |
+
loop = asyncio.get_event_loop()
|
| 324 |
+
self._worker_task = loop.create_task(self._worker_loop())
|
| 325 |
+
logger.info("π Background worker started successfully.")
|
| 326 |
+
|
| 327 |
+
async def stop(self):
|
| 328 |
+
logger.info("π Stopping background worker...")
|
| 329 |
+
self._shutdown = True
|
| 330 |
+
if self._worker_task:
|
| 331 |
+
await self._worker_task
|
| 332 |
+
self._worker_task = None
|
| 333 |
+
self._executor.shutdown(wait=True)
|
| 334 |
+
self._save_state()
|
| 335 |
+
logger.info("β
Worker stopped and executor shut down cleanly.")
|
| 336 |
+
|
| 337 |
+
# ------------------------
|
| 338 |
+
# worker loop
|
| 339 |
+
# ------------------------
|
| 340 |
+
async def _worker_loop(self):
|
| 341 |
+
logger.info("π Worker loop started")
|
| 342 |
+
while not self._shutdown:
|
| 343 |
+
if not self._dq:
|
| 344 |
+
await asyncio.sleep(0.5)
|
| 345 |
+
continue
|
| 346 |
+
|
| 347 |
+
task_meta = self._dq.popleft()
|
| 348 |
+
task_id = task_meta.get("task_id")
|
| 349 |
+
logger.info(f"Processing task {task_id}")
|
| 350 |
+
|
| 351 |
+
try:
|
| 352 |
+
self._statuses[task_id] = TaskStatus.RUNNING
|
| 353 |
+
self._save_state()
|
| 354 |
+
|
| 355 |
+
loop = asyncio.get_event_loop()
|
| 356 |
+
logger.debug(f"Running processor for task {task_id}")
|
| 357 |
+
future = loop.run_in_executor(
|
| 358 |
+
self._executor,
|
| 359 |
+
self._run_processor_safe,
|
| 360 |
+
task_meta
|
| 361 |
+
)
|
| 362 |
+
result = await future
|
| 363 |
+
logger.debug(f"Processor result: {result}")
|
| 364 |
+
|
| 365 |
+
if isinstance(result, dict) and result.get("success"):
|
| 366 |
+
self._statuses[task_id] = TaskStatus.COMPLETED
|
| 367 |
+
self._tasks[task_id].update({
|
| 368 |
+
"output_path": result.get("output_path"),
|
| 369 |
+
"output_bytes": result.get("output_bytes")
|
| 370 |
+
})
|
| 371 |
+
logger.info(f"Task {task_id} completed successfully")
|
| 372 |
+
else:
|
| 373 |
+
self._statuses[task_id] = TaskStatus.FAILED
|
| 374 |
+
logger.error(f"Task {task_id} failed: {result}")
|
| 375 |
+
|
| 376 |
+
self._save_state()
|
| 377 |
+
except Exception as e:
|
| 378 |
+
logger.exception(f"Error processing task {task_id}")
|
| 379 |
+
self._statuses[task_id] = TaskStatus.FAILED
|
| 380 |
+
self._save_state()
|
| 381 |
+
|
| 382 |
+
await asyncio.sleep(0.1)
|
| 383 |
+
|
| 384 |
+
logger.info("π Worker loop exiting cleanly.")
|
| 385 |
+
|
| 386 |
+
def _run_processor_safe(self, task_meta: dict) -> dict:
|
| 387 |
+
try:
|
| 388 |
+
if not self._processor:
|
| 389 |
+
logger.error("β No processor configured, cannot run task.")
|
| 390 |
+
return {"success": False, "error": "No processor configured"}
|
| 391 |
+
|
| 392 |
+
task_id = task_meta.get("task_id")
|
| 393 |
+
logger.debug(f"π§ Running processor for task {task_id}...")
|
| 394 |
+
result = self._processor(task_meta)
|
| 395 |
+
logger.debug(f"π― Processor result for {task_id}: {result}")
|
| 396 |
+
return result or {"success": False}
|
| 397 |
+
except Exception as e:
|
| 398 |
+
logger.exception(f"π₯ Processor crashed for task {task_meta.get('task_id')}: {e}")
|
| 399 |
+
return {"success": False, "error": str(e)}
|