appvoid commited on
Commit
9e83ff8
Β·
verified Β·
1 Parent(s): e8ee200

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -17
app.py CHANGED
@@ -1,31 +1,25 @@
1
  import os
2
  from threading import Thread
3
  from typing import Iterator
4
-
5
  import gradio as gr
6
  import spaces
7
  import torch
8
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
9
 
10
- tokenizer = AutoTokenizer.from_pretrained("appvoid/palmer-004")
11
- model = AutoModelForCausalLM.from_pretrained("appvoid/palmer-004")
12
 
13
  @spaces.GPU
14
- def text_generation(input_text, seed):
15
- input_ids = tokenizer(input_text, return_tensors="pt")
16
- torch.manual_seed(seed) # Max value: 18446744073709551615
17
- outputs = model.generate(input_ids, do_sample=True, max_length=100)
18
- generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
19
- return generated_text
20
-
21
- title = "palmer demo"
22
- description = "Text completion app by appvoid"
23
 
24
  gr.Interface(
25
- text_generation,
26
- [gr.inputs.Textbox(lines=2, label="Enter input text"), gr.inputs.Number(default=10, label="Enter seed number")],
27
- [gr.outputs.Textbox(type="auto", label="Text Generated")],
28
  title=title,
29
  description=description,
30
- theme="huggingface"
31
- ).launch()
 
 
1
  import os
2
  from threading import Thread
3
  from typing import Iterator
 
4
  import gradio as gr
5
  import spaces
6
  import torch
7
  from transformers import AutoModelForCausalLM, AutoTokenizer
8
+ from transformers import pipeline
9
 
10
+ model = pipeline("text-generation" , model="palmer-004")
 
11
 
12
  @spaces.GPU
13
+ def predict(prompt):
14
+ completion = model(prompt)[0]["generated_text"]
15
+ return completion
 
 
 
 
 
 
16
 
17
  gr.Interface(
18
+ fn=predict,
19
+ inputs="text",
20
+ outputs="text",
21
  title=title,
22
  description=description,
23
+ examples=examples,
24
+ enable_queue=True,
25
+ ).launch()