Commit
·
1e99029
0
Parent(s):
ChatGPT: Initial.
Browse files- .gitattributes +36 -0
- Dockerfile +20 -0
- README.md +56 -0
- app.py +30 -0
- assets/images/9299765.jpg +3 -0
.gitattributes
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# SPDX-FileCopyrightText: Hadad <[email protected]>
|
3 |
+
# SPDX-License-Identifier: Apache-2.0
|
4 |
+
#
|
5 |
+
|
6 |
+
# Use a specific container image for the application
|
7 |
+
FROM hadadrjt/ai:latest
|
8 |
+
|
9 |
+
# Set the main working directory inside the container
|
10 |
+
WORKDIR /usr/src/app
|
11 |
+
|
12 |
+
# Copy all files and directories from the build context on the
|
13 |
+
# Host machine into the working directory in the container
|
14 |
+
COPY . .
|
15 |
+
|
16 |
+
# Open the port so the application can be accessed
|
17 |
+
EXPOSE 7860
|
18 |
+
|
19 |
+
# Define the default command to start the application
|
20 |
+
CMD ["python", "app.py"]
|
README.md
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: ChatGPT
|
3 |
+
short_description: 'OpenAI: GPT-4.1 (Nano)'
|
4 |
+
emoji: ⚡
|
5 |
+
colorFrom: purple
|
6 |
+
colorTo: green
|
7 |
+
sdk: docker
|
8 |
+
app_port: 7860
|
9 |
+
pinned: false
|
10 |
+
models:
|
11 |
+
# Used to promote this Hugging Face Space
|
12 |
+
- hadadrjt/JARVIS
|
13 |
+
- agentica-org/DeepCoder-14B-Preview
|
14 |
+
- agentica-org/DeepSWE-Preview
|
15 |
+
- fka/awesome-chatgpt-prompts
|
16 |
+
- black-forest-labs/FLUX.1-Kontext-dev
|
17 |
+
- ChatDOC/OCRFlux-3B
|
18 |
+
- deepseek-ai/DeepSeek-R1
|
19 |
+
- deepseek-ai/DeepSeek-R1-0528
|
20 |
+
- deepseek-ai/DeepSeek-R1-Distill-Llama-70B
|
21 |
+
- deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
|
22 |
+
- deepseek-ai/DeepSeek-R1-0528-Qwen3-8B
|
23 |
+
- deepseek-ai/DeepSeek-V3-0324
|
24 |
+
- google/gemma-3-1b-it
|
25 |
+
- google/gemma-3-27b-it
|
26 |
+
- google/gemma-3-4b-it
|
27 |
+
- google/gemma-3n-E4B-it
|
28 |
+
- google/gemma-3n-E4B-it-litert-preview
|
29 |
+
- google/medsiglip-448
|
30 |
+
- kyutai/tts-1.6b-en_fr
|
31 |
+
- meta-llama/Llama-3.1-8B-Instruct
|
32 |
+
- meta-llama/Llama-3.2-3B-Instruct
|
33 |
+
- meta-llama/Llama-3.3-70B-Instruct
|
34 |
+
- meta-llama/Llama-4-Maverick-17B-128E-Instruct
|
35 |
+
- meta-llama/Llama-4-Scout-17B-16E-Instruct
|
36 |
+
- microsoft/Phi-4-mini-instruct
|
37 |
+
- mistralai/Devstral-Small-2505
|
38 |
+
- mistralai/Mistral-Small-3.1-24B-Instruct-2503
|
39 |
+
- openai/webgpt_comparisons
|
40 |
+
- openai/whisper-large-v3-turbo
|
41 |
+
- openai/gpt-oss-120b
|
42 |
+
- openai/gpt-oss-20b
|
43 |
+
- Qwen/QwQ-32B
|
44 |
+
- Qwen/Qwen2.5-VL-32B-Instruct
|
45 |
+
- Qwen/Qwen2.5-VL-3B-Instruct
|
46 |
+
- Qwen/Qwen2.5-VL-72B-Instruct
|
47 |
+
- Qwen/Qwen3-235B-A22B
|
48 |
+
- THUDM/GLM-4.1V-9B-Thinking
|
49 |
+
- tngtech/DeepSeek-TNG-R1T2-Chimera
|
50 |
+
- moonshotai/Kimi-K2-Instruct
|
51 |
+
- Qwen/Qwen3-235B-A22B-Instruct-2507
|
52 |
+
- Qwen/Qwen3-Coder-480B-A35B-Instruct
|
53 |
+
- Qwen/Qwen3-235B-A22B-Thinking-2507
|
54 |
+
- zai-org/GLM-4.5
|
55 |
+
- zai-org/GLM-4.5-Air
|
56 |
+
---
|
app.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# SPDX-FileCopyrightText: Hadad <[email protected]>
|
3 |
+
# SPDX-License-Identifier: Apache-2.0
|
4 |
+
#
|
5 |
+
|
6 |
+
import os # Used for accessing environment variables
|
7 |
+
import gradio as gr # Used to create the user interface
|
8 |
+
|
9 |
+
# Gradio user interface
|
10 |
+
gr.load_chat(
|
11 |
+
os.getenv("OPENAI_API_BASE_URL"), # Endpoint
|
12 |
+
token=os.getenv("OPENAI_API_KEY"), # API Key
|
13 |
+
model="gpt-4.1-nano", # Model
|
14 |
+
chatbot=gr.Chatbot(
|
15 |
+
label="ChatGPT", # Title
|
16 |
+
type="messages", # OpenAI-style messages format
|
17 |
+
show_copy_button=True, # Allow users to copy responses
|
18 |
+
scale=1 # Standard display scaling
|
19 |
+
),
|
20 |
+
file_types=["image"], # Multimodal
|
21 |
+
examples=[
|
22 |
+
["Please introduce yourself."],
|
23 |
+
[{"text": "Explain about this image.",
|
24 |
+
"files": ["assets/images/9299765.jpg"]}],
|
25 |
+
["Give me a short introduction to large language model."],
|
26 |
+
["Explain about quantum computers."]
|
27 |
+
], # Provide sample inputs for users to try
|
28 |
+
cache_examples=False, # Ensure responses always fresh
|
29 |
+
show_api=False # Disable Gradio API
|
30 |
+
).launch() # Start the app
|
assets/images/9299765.jpg
ADDED
![]() |
Git LFS Details
|