Bradarr commited on
Commit
971430d
·
verified ·
1 Parent(s): 37e38a4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -37,7 +37,8 @@ if not os.path.exists("./models/google.gemma-3-12b-pt.Q4_K_M.gguf"):
37
  download_model("DevQuasar/google.gemma-3-12b-pt-GGUF", "google.gemma-3-12b-pt.Q4_K_M.gguf")
38
  if not os.path.exists("./models/google.gemma-3-4b-pt.Q4_K_M.gguf"): # Example from original, in case needed.
39
  download_model("DevQuasar/google.gemma-3-4b-pt-GGUF", "google.gemma-3-4b-pt.Q4_K_M.gguf")
40
-
 
41
 
42
  # Set the title and description
43
  title = "Gemma Text Generation"
@@ -87,8 +88,6 @@ def generate_text(
87
  flash_attn=True,
88
  n_gpu_layers=999, # Adjust based on your GPU availability
89
  n_ctx=4096, # Context window size. Can increase.
90
- n_threads=4, # Adjust as needed for performance.
91
- n_threads_batch=4,
92
  verbose=False #Reduce unnecessary verbosity
93
  )
94
  llm_model = model
@@ -144,6 +143,7 @@ with gr.Blocks(theme="Ocean", title=title) as demo:
144
  "google.gemma-3-1b-pt.Q4_K_M.gguf",
145
  "google.gemma-3-4b-pt.Q4_K_M.gguf",
146
  "google.gemma-3-12b-pt.Q4_K_M.gguf",
 
147
  # Add other models as needed and downloaded
148
  ],
149
  value="google.gemma-3-1b-pt.Q4_K_M.gguf", # Default model
 
37
  download_model("DevQuasar/google.gemma-3-12b-pt-GGUF", "google.gemma-3-12b-pt.Q4_K_M.gguf")
38
  if not os.path.exists("./models/google.gemma-3-4b-pt.Q4_K_M.gguf"): # Example from original, in case needed.
39
  download_model("DevQuasar/google.gemma-3-4b-pt-GGUF", "google.gemma-3-4b-pt.Q4_K_M.gguf")
40
+ if not os.path.exists("./models/google.gemma-3-27b-pt.Q4_K_M.gguf"): # Example from original, in case needed.
41
+ download_model("DevQuasar/google.gemma-3-27b-pt-GGUF", "google.gemma-3-27b-pt.Q4_K_M.gguf")
42
 
43
  # Set the title and description
44
  title = "Gemma Text Generation"
 
88
  flash_attn=True,
89
  n_gpu_layers=999, # Adjust based on your GPU availability
90
  n_ctx=4096, # Context window size. Can increase.
 
 
91
  verbose=False #Reduce unnecessary verbosity
92
  )
93
  llm_model = model
 
143
  "google.gemma-3-1b-pt.Q4_K_M.gguf",
144
  "google.gemma-3-4b-pt.Q4_K_M.gguf",
145
  "google.gemma-3-12b-pt.Q4_K_M.gguf",
146
+ "google.gemma-3-27b-pt.Q4_K_M.gguf",
147
  # Add other models as needed and downloaded
148
  ],
149
  value="google.gemma-3-1b-pt.Q4_K_M.gguf", # Default model