File size: 527 Bytes
4375b7f
4e683ec
76a154f
 
b1c12fa
76a154f
d754671
9e83ff8
d754671
97831d4
d754671
1ec2891
9e83ff8
9c665c5
9e83ff8
d754671
 
9e83ff8
 
 
97831d4
9e83ff8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
import os
from threading import Thread
from typing import Iterator
import gradio as gr
import spaces
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import pipeline

model = pipeline("text-generation" , model="appvoid/text-arco")

@spaces.GPU
def predict(prompt):
    completion = model(prompt, max_new_tokens=64, temperature=0.3)[0]["generated_text"]
    return completion

gr.Interface(
    fn=predict, 
    inputs="text", 
    outputs="text",
    title="text arco",
    ).launch()