MiklX commited on
Commit
8c29ba3
·
1 Parent(s): 68d114f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -38
app.py CHANGED
@@ -3,41 +3,27 @@ import random
3
  import string
4
  import time
5
  from typing import Any
6
-
7
- #new
8
- import freeGPT
9
  from asyncio import run
10
- #/new
11
-
12
  from flask import Flask, request
13
  from flask_cors import CORS
14
-
15
  from g4f import ChatCompletion, Provider
16
-
17
  app = Flask(__name__)
18
  CORS(app)
19
-
20
  @app.route("/")
21
  def main():
22
  return """Just iqai.ru for more!!!"""
23
-
24
  @app.route("/chat/completions", methods=["POST"])
25
- def chat_completions():
26
- model = request.get_json().get("model", "gpt-3.5-turbo")
27
- stream = request.get_json().get("stream", False)
28
- messages = request.get_json().get("messages")
29
-
30
- #old
31
- #response = ChatCompletion.create(model=model, messages=messages)
32
- #/old
33
-
34
- #new
35
- response = freeGPT.gpt3.Completion().create(messages)
36
- #/new
37
-
38
  completion_id = "".join(random.choices(string.ascii_letters + string.digits, k=28))
39
  completion_timestamp = int(time.time())
40
-
41
  if not stream:
42
  return {
43
  "id": f"chatcmpl-{completion_id}",
@@ -51,16 +37,15 @@ def chat_completions():
51
  "role": "assistant",
52
  "content": response,
53
  },
54
- "finish_reason": "stop",
55
  }
56
  ],
57
  "usage": {
58
- "prompt_tokens": None,
59
- "completion_tokens": None,
60
- "total_tokens": None,
61
  },
62
  }
63
-
64
  def streaming():
65
  for chunk in response:
66
  completion_data = {
@@ -74,15 +59,13 @@ def chat_completions():
74
  "delta": {
75
  "content": chunk,
76
  },
77
- "finish_reason": None,
78
  }
79
  ],
80
  }
81
-
82
  content = json.dumps(completion_data, separators=(",", ":"))
83
- yield f"data: {content}\n\n"
84
  time.sleep(0.1)
85
-
86
  end_completion_data: dict[str, Any] = {
87
  "id": f"chatcmpl-{completion_id}",
88
  "object": "chat.completion.chunk",
@@ -92,15 +75,12 @@ def chat_completions():
92
  {
93
  "index": 0,
94
  "delta": {},
95
- "finish_reason": "stop",
96
  }
97
  ],
98
  }
99
  content = json.dumps(end_completion_data, separators=(",", ":"))
100
- yield f"data: {content}\n\n"
101
-
102
  return app.response_class(streaming(), mimetype="text/event-stream")
103
-
104
-
105
  if __name__ == "__main__":
106
- app.run(host="0.0.0.0", port=7860, debug=False)
 
3
  import string
4
  import time
5
  from typing import Any
6
+ from freeGPT import gpt3
 
 
7
  from asyncio import run
 
 
8
  from flask import Flask, request
9
  from flask_cors import CORS
 
10
  from g4f import ChatCompletion, Provider
 
11
  app = Flask(__name__)
12
  CORS(app)
 
13
  @app.route("/")
14
  def main():
15
  return """Just iqai.ru for more!!!"""
 
16
  @app.route("/chat/completions", methods=["POST"])
17
+ def chatcompletions():
18
+ data = request.get_json()
19
+ model = data.get("model", "gpt-3.5-turbo")
20
+ stream = data.get("stream", False)
21
+ messages = data.get("messages")
22
+ if messages is None:
23
+ return {"error": "No messages provided"}, 400
24
+ response = gpt3.Completion.create(prompt=messages)
 
 
 
 
 
25
  completion_id = "".join(random.choices(string.ascii_letters + string.digits, k=28))
26
  completion_timestamp = int(time.time())
 
27
  if not stream:
28
  return {
29
  "id": f"chatcmpl-{completion_id}",
 
37
  "role": "assistant",
38
  "content": response,
39
  },
40
+ "finishreason": "stop",
41
  }
42
  ],
43
  "usage": {
44
+ "prompttokens": None,
45
+ "completiontokens": None,
46
+ "totaltokens": None,
47
  },
48
  }
 
49
  def streaming():
50
  for chunk in response:
51
  completion_data = {
 
59
  "delta": {
60
  "content": chunk,
61
  },
62
+ "finishreason": None,
63
  }
64
  ],
65
  }
 
66
  content = json.dumps(completion_data, separators=(",", ":"))
67
+ yield f"data: {content}nn"
68
  time.sleep(0.1)
 
69
  end_completion_data: dict[str, Any] = {
70
  "id": f"chatcmpl-{completion_id}",
71
  "object": "chat.completion.chunk",
 
75
  {
76
  "index": 0,
77
  "delta": {},
78
+ "finishreason": "stop",
79
  }
80
  ],
81
  }
82
  content = json.dumps(end_completion_data, separators=(",", ":"))
83
+ yield f"data: {content}nn"
 
84
  return app.response_class(streaming(), mimetype="text/event-stream")
 
 
85
  if __name__ == "__main__":
86
+ app.run(host="0.0.0.0", port=7860, debug=False)