|
import streamlit as st |
|
from transformers import AutoModel, AutoTokenizer, trainer_utils |
|
|
|
|
|
device = "cpu" |
|
model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device) |
|
tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") |
|
|
|
|
|
def generate_text(input_text, max_tokens=50): |
|
x_token = tokenizer(input_text, return_tensors="pt") |
|
trainer_utils.set_seed(30) |
|
input_ids = x_token.input_ids.to(device) |
|
gen_token = model.generate(input_ids, max_new_tokens=max_tokens) |
|
return tokenizer.decode(gen_token[0]) |
|
|
|
|
|
def main(): |
|
st.title("Japanese Text Generator") |
|
|
|
|
|
input_text = st.text_area("Enter the starting text:", "織田信長は、") |
|
|
|
|
|
max_tokens = st.slider("Max Tokens", 1, 100, 50) |
|
|
|
|
|
if st.button("Generate Text"): |
|
generated_text = generate_text(input_text, max_tokens) |
|
st.text("Generated Text:") |
|
st.write(generated_text) |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|