Spaces:
Runtime error
Runtime error
set -euo pipefail | |
# Make sure logs directory exists with proper permissions | |
mkdir -p /app/logs 2>/dev/null || true | |
cd /app | |
echo "===== Container Debug Info =====" | |
id | |
ls -la /app | |
ls -la /app/logs 2>/dev/null || echo "Logs directory not accessible" | |
echo "==============================" | |
# 1) Check if spaCy model is already installed before trying to download it | |
echo "Checking spaCy model..." | |
if python -c "import spacy; spacy.load('en_core_web_sm')" 2>/dev/null; then | |
echo "spaCy model already installed!" | |
else | |
echo "Attempting to install spaCy model..." | |
# Try installing to a location with proper permissions | |
python -m spacy download en_core_web_sm --user || echo "spaCy model installation skipped - will use fallback methods" | |
fi | |
# 1) Launch your MCP servers in a background terminal that keeps running | |
echo "Starting MCP servers…" | |
cd /app/backend | |
bash start_mcp_servers.sh & | |
MCP_PID=$! | |
cd /app | |
# 2) Ensure LLM model is present | |
LLM_DIR="/app/backend/pretrained_models/llm" | |
mkdir -p "${LLM_DIR}" || echo "Warning: Could not create LLM_DIR" | |
if [ ! -f "${LLM_DIR}/${MODEL_FILE}" ]; then | |
echo "Downloading LLM model ${MODEL_FILE}…" | |
python - <<PYCODE | |
from huggingface_hub import login, hf_hub_download | |
import os | |
# Get token from environment variable | |
hf_token = os.getenv("HF_TOKEN") | |
if hf_token: | |
print("Using provided HF_TOKEN for authentication") | |
login(token=hf_token) | |
else: | |
print("Warning: No HF_TOKEN provided, attempting anonymous download") | |
hf_hub_download(repo_id="${MODEL_REPO}", | |
filename="${MODEL_FILE}", | |
local_dir="${LLM_DIR}") | |
PYCODE | |
fi | |
# 3) Launch llama-cpp-python server | |
echo "Starting LLM server on port 8000…" | |
python -m llama_cpp.server \ | |
--model "${LLM_DIR}/${MODEL_FILE}" \ | |
--n_gpu_layers -1 \ | |
--n_ctx 10000 \ | |
--n_threads 12 \ | |
--n_batch 1024 \ | |
--host 0.0.0.0 --port 8000 & | |
LLM_PID=$! | |
# Wait a bit to ensure services start properly | |
sleep 10 | |
# 4) Launch FastAPI Jarvis app on port 7860 for HuggingFace Spaces | |
echo "Starting Jarvis FastAPI on port 7860…" | |
cd /app | |
uvicorn backend.app:app --host 0.0.0.0 --port 7860 | |
# If the FastAPI app exits, clean up other processes | |
kill $MCP_PID $LLM_PID 2>/dev/null || true |