Spaces:
Running
Running
Commit
·
8061397
1
Parent(s):
c3b0824
added error logging and handling
Browse files- src/gemini_routes.py +90 -48
- src/google_api_client.py +112 -20
- src/openai_routes.py +83 -13
src/gemini_routes.py
CHANGED
@@ -4,6 +4,7 @@ This module provides native Gemini API endpoints that proxy directly to Google's
|
|
4 |
without any format transformations.
|
5 |
"""
|
6 |
import json
|
|
|
7 |
from fastapi import APIRouter, Request, Response, Depends
|
8 |
|
9 |
from .auth import authenticate_user
|
@@ -20,15 +21,31 @@ async def gemini_list_models(request: Request, username: str = Depends(authentic
|
|
20 |
Returns available models in Gemini format, matching the official Gemini API.
|
21 |
"""
|
22 |
|
23 |
-
|
24 |
-
"models"
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
|
34 |
@router.api_route("/{full_path:path}", methods=["GET", "POST", "PUT", "DELETE", "PATCH"])
|
@@ -44,53 +61,78 @@ async def gemini_proxy(request: Request, full_path: str, username: str = Depends
|
|
44 |
- etc.
|
45 |
"""
|
46 |
|
47 |
-
# Get the request body
|
48 |
-
post_data = await request.body()
|
49 |
-
|
50 |
-
# Determine if this is a streaming request
|
51 |
-
is_streaming = "stream" in full_path.lower()
|
52 |
-
|
53 |
-
# Extract model name from the path
|
54 |
-
# Paths typically look like: v1beta/models/gemini-1.5-pro/generateContent
|
55 |
-
model_name = _extract_model_from_path(full_path)
|
56 |
-
|
57 |
-
if not model_name:
|
58 |
-
return Response(
|
59 |
-
content=json.dumps({
|
60 |
-
"error": {
|
61 |
-
"message": f"Could not extract model name from path: {full_path}",
|
62 |
-
"code": 400
|
63 |
-
}
|
64 |
-
}),
|
65 |
-
status_code=400,
|
66 |
-
media_type="application/json"
|
67 |
-
)
|
68 |
-
|
69 |
-
# Parse the incoming request
|
70 |
try:
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
return Response(
|
77 |
content=json.dumps({
|
78 |
"error": {
|
79 |
-
"message": "
|
80 |
-
"code":
|
81 |
}
|
82 |
}),
|
83 |
-
status_code=
|
84 |
media_type="application/json"
|
85 |
)
|
86 |
-
|
87 |
-
# Build the payload for Google API
|
88 |
-
gemini_payload = build_gemini_payload_from_native(incoming_request, model_name)
|
89 |
-
|
90 |
-
# Send the request to Google API
|
91 |
-
response = send_gemini_request(gemini_payload, is_streaming=is_streaming)
|
92 |
-
|
93 |
-
return response
|
94 |
|
95 |
|
96 |
def _extract_model_from_path(path: str) -> str:
|
|
|
4 |
without any format transformations.
|
5 |
"""
|
6 |
import json
|
7 |
+
import logging
|
8 |
from fastapi import APIRouter, Request, Response, Depends
|
9 |
|
10 |
from .auth import authenticate_user
|
|
|
21 |
Returns available models in Gemini format, matching the official Gemini API.
|
22 |
"""
|
23 |
|
24 |
+
try:
|
25 |
+
logging.info("Gemini models list requested")
|
26 |
+
|
27 |
+
models_response = {
|
28 |
+
"models": SUPPORTED_MODELS
|
29 |
+
}
|
30 |
+
|
31 |
+
logging.info(f"Returning {len(SUPPORTED_MODELS)} Gemini models")
|
32 |
+
return Response(
|
33 |
+
content=json.dumps(models_response),
|
34 |
+
status_code=200,
|
35 |
+
media_type="application/json; charset=utf-8"
|
36 |
+
)
|
37 |
+
except Exception as e:
|
38 |
+
logging.error(f"Failed to list Gemini models: {str(e)}")
|
39 |
+
return Response(
|
40 |
+
content=json.dumps({
|
41 |
+
"error": {
|
42 |
+
"message": f"Failed to list models: {str(e)}",
|
43 |
+
"code": 500
|
44 |
+
}
|
45 |
+
}),
|
46 |
+
status_code=500,
|
47 |
+
media_type="application/json"
|
48 |
+
)
|
49 |
|
50 |
|
51 |
@router.api_route("/{full_path:path}", methods=["GET", "POST", "PUT", "DELETE", "PATCH"])
|
|
|
61 |
- etc.
|
62 |
"""
|
63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
try:
|
65 |
+
# Get the request body
|
66 |
+
post_data = await request.body()
|
67 |
+
|
68 |
+
# Determine if this is a streaming request
|
69 |
+
is_streaming = "stream" in full_path.lower()
|
70 |
+
|
71 |
+
# Extract model name from the path
|
72 |
+
# Paths typically look like: v1beta/models/gemini-1.5-pro/generateContent
|
73 |
+
model_name = _extract_model_from_path(full_path)
|
74 |
+
|
75 |
+
logging.info(f"Gemini proxy request: path={full_path}, model={model_name}, stream={is_streaming}")
|
76 |
+
|
77 |
+
if not model_name:
|
78 |
+
logging.error(f"Could not extract model name from path: {full_path}")
|
79 |
+
return Response(
|
80 |
+
content=json.dumps({
|
81 |
+
"error": {
|
82 |
+
"message": f"Could not extract model name from path: {full_path}",
|
83 |
+
"code": 400
|
84 |
+
}
|
85 |
+
}),
|
86 |
+
status_code=400,
|
87 |
+
media_type="application/json"
|
88 |
+
)
|
89 |
+
|
90 |
+
# Parse the incoming request
|
91 |
+
try:
|
92 |
+
if post_data:
|
93 |
+
incoming_request = json.loads(post_data)
|
94 |
+
else:
|
95 |
+
incoming_request = {}
|
96 |
+
except json.JSONDecodeError as e:
|
97 |
+
logging.error(f"Invalid JSON in request body: {str(e)}")
|
98 |
+
return Response(
|
99 |
+
content=json.dumps({
|
100 |
+
"error": {
|
101 |
+
"message": "Invalid JSON in request body",
|
102 |
+
"code": 400
|
103 |
+
}
|
104 |
+
}),
|
105 |
+
status_code=400,
|
106 |
+
media_type="application/json"
|
107 |
+
)
|
108 |
+
|
109 |
+
# Build the payload for Google API
|
110 |
+
gemini_payload = build_gemini_payload_from_native(incoming_request, model_name)
|
111 |
+
|
112 |
+
# Send the request to Google API
|
113 |
+
response = send_gemini_request(gemini_payload, is_streaming=is_streaming)
|
114 |
+
|
115 |
+
# Log the response status
|
116 |
+
if hasattr(response, 'status_code'):
|
117 |
+
if response.status_code != 200:
|
118 |
+
logging.error(f"Gemini API returned error: status={response.status_code}")
|
119 |
+
else:
|
120 |
+
logging.info(f"Successfully processed Gemini request for model: {model_name}")
|
121 |
+
|
122 |
+
return response
|
123 |
+
|
124 |
+
except Exception as e:
|
125 |
+
logging.error(f"Gemini proxy error: {str(e)}")
|
126 |
return Response(
|
127 |
content=json.dumps({
|
128 |
"error": {
|
129 |
+
"message": f"Proxy error: {str(e)}",
|
130 |
+
"code": 500
|
131 |
}
|
132 |
}),
|
133 |
+
status_code=500,
|
134 |
media_type="application/json"
|
135 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
|
137 |
|
138 |
def _extract_model_from_path(path: str) -> str:
|
src/google_api_client.py
CHANGED
@@ -3,6 +3,7 @@ Google API Client - Handles all communication with Google's Gemini API.
|
|
3 |
This module is used by both OpenAI compatibility layer and native Gemini endpoints.
|
4 |
"""
|
5 |
import json
|
|
|
6 |
import requests
|
7 |
from fastapi import Response
|
8 |
from fastapi.responses import StreamingResponse
|
@@ -80,29 +81,79 @@ def send_gemini_request(payload: dict, is_streaming: bool = False) -> Response:
|
|
80 |
final_post_data = json.dumps(final_payload)
|
81 |
|
82 |
# Send the request
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
|
90 |
|
91 |
def _handle_streaming_response(resp) -> StreamingResponse:
|
92 |
"""Handle streaming response from Google API."""
|
93 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
async def stream_generator():
|
95 |
try:
|
96 |
with resp:
|
97 |
-
resp.raise_for_status()
|
98 |
-
|
99 |
-
|
100 |
for chunk in resp.iter_lines():
|
101 |
if chunk:
|
102 |
if not isinstance(chunk, str):
|
103 |
chunk = chunk.decode('utf-8')
|
104 |
|
105 |
-
|
106 |
if chunk.startswith('data: '):
|
107 |
chunk = chunk[len('data: '):]
|
108 |
|
@@ -113,18 +164,34 @@ def _handle_streaming_response(resp) -> StreamingResponse:
|
|
113 |
response_chunk = obj["response"]
|
114 |
response_json = json.dumps(response_chunk, separators=(',', ':'))
|
115 |
response_line = f"data: {response_json}\n\n"
|
116 |
-
yield response_line
|
117 |
await asyncio.sleep(0)
|
118 |
else:
|
119 |
obj_json = json.dumps(obj, separators=(',', ':'))
|
120 |
-
yield f"data: {obj_json}\n\n"
|
121 |
except json.JSONDecodeError:
|
122 |
continue
|
123 |
|
124 |
except requests.exceptions.RequestException as e:
|
125 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
except Exception as e:
|
127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
|
129 |
response_headers = {
|
130 |
"Content-Type": "text/event-stream",
|
@@ -153,20 +220,45 @@ def _handle_non_streaming_response(resp) -> Response:
|
|
153 |
google_api_response = json.loads(google_api_response)
|
154 |
standard_gemini_response = google_api_response.get("response")
|
155 |
return Response(
|
156 |
-
content=json.dumps(standard_gemini_response),
|
157 |
-
status_code=200,
|
158 |
media_type="application/json; charset=utf-8"
|
159 |
)
|
160 |
except (json.JSONDecodeError, AttributeError) as e:
|
|
|
161 |
return Response(
|
162 |
-
content=resp.content,
|
163 |
-
status_code=resp.status_code,
|
164 |
media_type=resp.headers.get("Content-Type")
|
165 |
)
|
166 |
else:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
167 |
return Response(
|
168 |
-
content=resp.content,
|
169 |
-
status_code=resp.status_code,
|
170 |
media_type=resp.headers.get("Content-Type")
|
171 |
)
|
172 |
|
|
|
3 |
This module is used by both OpenAI compatibility layer and native Gemini endpoints.
|
4 |
"""
|
5 |
import json
|
6 |
+
import logging
|
7 |
import requests
|
8 |
from fastapi import Response
|
9 |
from fastapi.responses import StreamingResponse
|
|
|
81 |
final_post_data = json.dumps(final_payload)
|
82 |
|
83 |
# Send the request
|
84 |
+
try:
|
85 |
+
if is_streaming:
|
86 |
+
resp = requests.post(target_url, data=final_post_data, headers=request_headers, stream=True)
|
87 |
+
return _handle_streaming_response(resp)
|
88 |
+
else:
|
89 |
+
resp = requests.post(target_url, data=final_post_data, headers=request_headers)
|
90 |
+
return _handle_non_streaming_response(resp)
|
91 |
+
except requests.exceptions.RequestException as e:
|
92 |
+
logging.error(f"Request to Google API failed: {str(e)}")
|
93 |
+
return Response(
|
94 |
+
content=json.dumps({"error": {"message": f"Request failed: {str(e)}"}}),
|
95 |
+
status_code=500,
|
96 |
+
media_type="application/json"
|
97 |
+
)
|
98 |
+
except Exception as e:
|
99 |
+
logging.error(f"Unexpected error during Google API request: {str(e)}")
|
100 |
+
return Response(
|
101 |
+
content=json.dumps({"error": {"message": f"Unexpected error: {str(e)}"}}),
|
102 |
+
status_code=500,
|
103 |
+
media_type="application/json"
|
104 |
+
)
|
105 |
|
106 |
|
107 |
def _handle_streaming_response(resp) -> StreamingResponse:
|
108 |
"""Handle streaming response from Google API."""
|
109 |
|
110 |
+
# Check for HTTP errors before starting to stream
|
111 |
+
if resp.status_code != 200:
|
112 |
+
logging.error(f"Google API returned status {resp.status_code}: {resp.text}")
|
113 |
+
error_message = f"Google API error: {resp.status_code}"
|
114 |
+
try:
|
115 |
+
error_data = resp.json()
|
116 |
+
if "error" in error_data:
|
117 |
+
error_message = error_data["error"].get("message", error_message)
|
118 |
+
except:
|
119 |
+
pass
|
120 |
+
|
121 |
+
# Return error as a streaming response
|
122 |
+
async def error_generator():
|
123 |
+
error_response = {
|
124 |
+
"error": {
|
125 |
+
"message": error_message,
|
126 |
+
"type": "invalid_request_error" if resp.status_code == 404 else "api_error",
|
127 |
+
"code": resp.status_code
|
128 |
+
}
|
129 |
+
}
|
130 |
+
yield f'data: {json.dumps(error_response)}\n\n'.encode('utf-8')
|
131 |
+
|
132 |
+
response_headers = {
|
133 |
+
"Content-Type": "text/event-stream",
|
134 |
+
"Content-Disposition": "attachment",
|
135 |
+
"Vary": "Origin, X-Origin, Referer",
|
136 |
+
"X-XSS-Protection": "0",
|
137 |
+
"X-Frame-Options": "SAMEORIGIN",
|
138 |
+
"X-Content-Type-Options": "nosniff",
|
139 |
+
"Server": "ESF"
|
140 |
+
}
|
141 |
+
|
142 |
+
return StreamingResponse(
|
143 |
+
error_generator(),
|
144 |
+
media_type="text/event-stream",
|
145 |
+
headers=response_headers,
|
146 |
+
status_code=resp.status_code
|
147 |
+
)
|
148 |
+
|
149 |
async def stream_generator():
|
150 |
try:
|
151 |
with resp:
|
|
|
|
|
|
|
152 |
for chunk in resp.iter_lines():
|
153 |
if chunk:
|
154 |
if not isinstance(chunk, str):
|
155 |
chunk = chunk.decode('utf-8')
|
156 |
|
|
|
157 |
if chunk.startswith('data: '):
|
158 |
chunk = chunk[len('data: '):]
|
159 |
|
|
|
164 |
response_chunk = obj["response"]
|
165 |
response_json = json.dumps(response_chunk, separators=(',', ':'))
|
166 |
response_line = f"data: {response_json}\n\n"
|
167 |
+
yield response_line.encode('utf-8')
|
168 |
await asyncio.sleep(0)
|
169 |
else:
|
170 |
obj_json = json.dumps(obj, separators=(',', ':'))
|
171 |
+
yield f"data: {obj_json}\n\n".encode('utf-8')
|
172 |
except json.JSONDecodeError:
|
173 |
continue
|
174 |
|
175 |
except requests.exceptions.RequestException as e:
|
176 |
+
logging.error(f"Streaming request failed: {str(e)}")
|
177 |
+
error_response = {
|
178 |
+
"error": {
|
179 |
+
"message": f"Upstream request failed: {str(e)}",
|
180 |
+
"type": "api_error",
|
181 |
+
"code": 502
|
182 |
+
}
|
183 |
+
}
|
184 |
+
yield f'data: {json.dumps(error_response)}\n\n'.encode('utf-8')
|
185 |
except Exception as e:
|
186 |
+
logging.error(f"Unexpected error during streaming: {str(e)}")
|
187 |
+
error_response = {
|
188 |
+
"error": {
|
189 |
+
"message": f"An unexpected error occurred: {str(e)}",
|
190 |
+
"type": "api_error",
|
191 |
+
"code": 500
|
192 |
+
}
|
193 |
+
}
|
194 |
+
yield f'data: {json.dumps(error_response)}\n\n'.encode('utf-8')
|
195 |
|
196 |
response_headers = {
|
197 |
"Content-Type": "text/event-stream",
|
|
|
220 |
google_api_response = json.loads(google_api_response)
|
221 |
standard_gemini_response = google_api_response.get("response")
|
222 |
return Response(
|
223 |
+
content=json.dumps(standard_gemini_response),
|
224 |
+
status_code=200,
|
225 |
media_type="application/json; charset=utf-8"
|
226 |
)
|
227 |
except (json.JSONDecodeError, AttributeError) as e:
|
228 |
+
logging.error(f"Failed to parse Google API response: {str(e)}")
|
229 |
return Response(
|
230 |
+
content=resp.content,
|
231 |
+
status_code=resp.status_code,
|
232 |
media_type=resp.headers.get("Content-Type")
|
233 |
)
|
234 |
else:
|
235 |
+
# Log the error details
|
236 |
+
logging.error(f"Google API returned status {resp.status_code}: {resp.text}")
|
237 |
+
|
238 |
+
# Try to parse error response and provide meaningful error message
|
239 |
+
try:
|
240 |
+
error_data = resp.json()
|
241 |
+
if "error" in error_data:
|
242 |
+
error_message = error_data["error"].get("message", f"API error: {resp.status_code}")
|
243 |
+
error_response = {
|
244 |
+
"error": {
|
245 |
+
"message": error_message,
|
246 |
+
"type": "invalid_request_error" if resp.status_code == 404 else "api_error",
|
247 |
+
"code": resp.status_code
|
248 |
+
}
|
249 |
+
}
|
250 |
+
return Response(
|
251 |
+
content=json.dumps(error_response),
|
252 |
+
status_code=resp.status_code,
|
253 |
+
media_type="application/json"
|
254 |
+
)
|
255 |
+
except (json.JSONDecodeError, KeyError):
|
256 |
+
pass
|
257 |
+
|
258 |
+
# Fallback to original response if we can't parse the error
|
259 |
return Response(
|
260 |
+
content=resp.content,
|
261 |
+
status_code=resp.status_code,
|
262 |
media_type=resp.headers.get("Content-Type")
|
263 |
)
|
264 |
|
src/openai_routes.py
CHANGED
@@ -49,7 +49,8 @@ async def openai_chat_completions(
|
|
49 |
content=json.dumps({
|
50 |
"error": {
|
51 |
"message": f"Request processing failed: {str(e)}",
|
52 |
-
"type": "invalid_request_error"
|
|
|
53 |
}
|
54 |
}),
|
55 |
status_code=400,
|
@@ -76,6 +77,21 @@ async def openai_chat_completions(
|
|
76 |
chunk_data = chunk[6:] # Remove 'data: ' prefix
|
77 |
gemini_chunk = json.loads(chunk_data)
|
78 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
# Transform to OpenAI format
|
80 |
openai_chunk = gemini_stream_chunk_to_openai(
|
81 |
gemini_chunk,
|
@@ -95,18 +111,32 @@ async def openai_chat_completions(
|
|
95 |
yield "data: [DONE]\n\n"
|
96 |
logging.info(f"Completed streaming response: {response_id}")
|
97 |
else:
|
98 |
-
# Error case -
|
99 |
error_msg = "Streaming request failed"
|
|
|
|
|
100 |
if hasattr(response, 'status_code'):
|
101 |
-
|
|
|
|
|
102 |
if hasattr(response, 'body'):
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
|
105 |
-
logging.error(error_msg)
|
106 |
error_data = {
|
107 |
"error": {
|
108 |
"message": error_msg,
|
109 |
-
"type": "api_error"
|
|
|
110 |
}
|
111 |
}
|
112 |
yield f"data: {json.dumps(error_data)}\n\n"
|
@@ -116,7 +146,8 @@ async def openai_chat_completions(
|
|
116 |
error_data = {
|
117 |
"error": {
|
118 |
"message": f"Streaming failed: {str(e)}",
|
119 |
-
"type": "api_error"
|
|
|
120 |
}
|
121 |
}
|
122 |
yield f"data: {json.dumps(error_data)}\n\n"
|
@@ -133,9 +164,45 @@ async def openai_chat_completions(
|
|
133 |
response = send_gemini_request(gemini_payload, is_streaming=False)
|
134 |
|
135 |
if isinstance(response, Response) and response.status_code != 200:
|
136 |
-
#
|
137 |
-
logging.error(f"Gemini API error: status={response.status_code}
|
138 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
139 |
|
140 |
try:
|
141 |
# Parse Gemini response and transform to OpenAI format
|
@@ -151,7 +218,8 @@ async def openai_chat_completions(
|
|
151 |
content=json.dumps({
|
152 |
"error": {
|
153 |
"message": f"Failed to process response: {str(e)}",
|
154 |
-
"type": "api_error"
|
|
|
155 |
}
|
156 |
}),
|
157 |
status_code=500,
|
@@ -163,7 +231,8 @@ async def openai_chat_completions(
|
|
163 |
content=json.dumps({
|
164 |
"error": {
|
165 |
"message": f"Request failed: {str(e)}",
|
166 |
-
"type": "api_error"
|
|
|
167 |
}
|
168 |
}),
|
169 |
status_code=500,
|
@@ -225,7 +294,8 @@ async def openai_list_models(username: str = Depends(authenticate_user)):
|
|
225 |
content=json.dumps({
|
226 |
"error": {
|
227 |
"message": f"Failed to list models: {str(e)}",
|
228 |
-
"type": "api_error"
|
|
|
229 |
}
|
230 |
}),
|
231 |
status_code=500,
|
|
|
49 |
content=json.dumps({
|
50 |
"error": {
|
51 |
"message": f"Request processing failed: {str(e)}",
|
52 |
+
"type": "invalid_request_error",
|
53 |
+
"code": 400
|
54 |
}
|
55 |
}),
|
56 |
status_code=400,
|
|
|
77 |
chunk_data = chunk[6:] # Remove 'data: ' prefix
|
78 |
gemini_chunk = json.loads(chunk_data)
|
79 |
|
80 |
+
# Check if this is an error chunk
|
81 |
+
if "error" in gemini_chunk:
|
82 |
+
logging.error(f"Error in streaming response: {gemini_chunk['error']}")
|
83 |
+
# Transform error to OpenAI format
|
84 |
+
error_data = {
|
85 |
+
"error": {
|
86 |
+
"message": gemini_chunk["error"].get("message", "Unknown error"),
|
87 |
+
"type": gemini_chunk["error"].get("type", "api_error"),
|
88 |
+
"code": gemini_chunk["error"].get("code")
|
89 |
+
}
|
90 |
+
}
|
91 |
+
yield f"data: {json.dumps(error_data)}\n\n"
|
92 |
+
yield "data: [DONE]\n\n"
|
93 |
+
return
|
94 |
+
|
95 |
# Transform to OpenAI format
|
96 |
openai_chunk = gemini_stream_chunk_to_openai(
|
97 |
gemini_chunk,
|
|
|
111 |
yield "data: [DONE]\n\n"
|
112 |
logging.info(f"Completed streaming response: {response_id}")
|
113 |
else:
|
114 |
+
# Error case - handle Response object with error
|
115 |
error_msg = "Streaming request failed"
|
116 |
+
status_code = 500
|
117 |
+
|
118 |
if hasattr(response, 'status_code'):
|
119 |
+
status_code = response.status_code
|
120 |
+
error_msg += f" (status: {status_code})"
|
121 |
+
|
122 |
if hasattr(response, 'body'):
|
123 |
+
try:
|
124 |
+
# Try to parse error response
|
125 |
+
error_body = response.body
|
126 |
+
if isinstance(error_body, bytes):
|
127 |
+
error_body = error_body.decode('utf-8')
|
128 |
+
error_data = json.loads(error_body)
|
129 |
+
if "error" in error_data:
|
130 |
+
error_msg = error_data["error"].get("message", error_msg)
|
131 |
+
except:
|
132 |
+
pass
|
133 |
|
134 |
+
logging.error(f"Streaming request failed: {error_msg}")
|
135 |
error_data = {
|
136 |
"error": {
|
137 |
"message": error_msg,
|
138 |
+
"type": "invalid_request_error" if status_code == 404 else "api_error",
|
139 |
+
"code": status_code
|
140 |
}
|
141 |
}
|
142 |
yield f"data: {json.dumps(error_data)}\n\n"
|
|
|
146 |
error_data = {
|
147 |
"error": {
|
148 |
"message": f"Streaming failed: {str(e)}",
|
149 |
+
"type": "api_error",
|
150 |
+
"code": 500
|
151 |
}
|
152 |
}
|
153 |
yield f"data: {json.dumps(error_data)}\n\n"
|
|
|
164 |
response = send_gemini_request(gemini_payload, is_streaming=False)
|
165 |
|
166 |
if isinstance(response, Response) and response.status_code != 200:
|
167 |
+
# Handle error responses from Google API
|
168 |
+
logging.error(f"Gemini API error: status={response.status_code}")
|
169 |
+
|
170 |
+
try:
|
171 |
+
# Try to parse the error response and transform to OpenAI format
|
172 |
+
error_body = response.body
|
173 |
+
if isinstance(error_body, bytes):
|
174 |
+
error_body = error_body.decode('utf-8')
|
175 |
+
|
176 |
+
error_data = json.loads(error_body)
|
177 |
+
if "error" in error_data:
|
178 |
+
# Transform Google API error to OpenAI format
|
179 |
+
openai_error = {
|
180 |
+
"error": {
|
181 |
+
"message": error_data["error"].get("message", f"API error: {response.status_code}"),
|
182 |
+
"type": error_data["error"].get("type", "invalid_request_error" if response.status_code == 404 else "api_error"),
|
183 |
+
"code": error_data["error"].get("code", response.status_code)
|
184 |
+
}
|
185 |
+
}
|
186 |
+
return Response(
|
187 |
+
content=json.dumps(openai_error),
|
188 |
+
status_code=response.status_code,
|
189 |
+
media_type="application/json"
|
190 |
+
)
|
191 |
+
except (json.JSONDecodeError, UnicodeDecodeError):
|
192 |
+
pass
|
193 |
+
|
194 |
+
# Fallback error response
|
195 |
+
return Response(
|
196 |
+
content=json.dumps({
|
197 |
+
"error": {
|
198 |
+
"message": f"API error: {response.status_code}",
|
199 |
+
"type": "invalid_request_error" if response.status_code == 404 else "api_error",
|
200 |
+
"code": response.status_code
|
201 |
+
}
|
202 |
+
}),
|
203 |
+
status_code=response.status_code,
|
204 |
+
media_type="application/json"
|
205 |
+
)
|
206 |
|
207 |
try:
|
208 |
# Parse Gemini response and transform to OpenAI format
|
|
|
218 |
content=json.dumps({
|
219 |
"error": {
|
220 |
"message": f"Failed to process response: {str(e)}",
|
221 |
+
"type": "api_error",
|
222 |
+
"code": 500
|
223 |
}
|
224 |
}),
|
225 |
status_code=500,
|
|
|
231 |
content=json.dumps({
|
232 |
"error": {
|
233 |
"message": f"Request failed: {str(e)}",
|
234 |
+
"type": "api_error",
|
235 |
+
"code": 500
|
236 |
}
|
237 |
}),
|
238 |
status_code=500,
|
|
|
294 |
content=json.dumps({
|
295 |
"error": {
|
296 |
"message": f"Failed to list models: {str(e)}",
|
297 |
+
"type": "api_error",
|
298 |
+
"code": 500
|
299 |
}
|
300 |
}),
|
301 |
status_code=500,
|