File size: 1,453 Bytes
5c869b1 eb47558 5c869b1 6bfd93c 7b70bdf 6bfd93c e6a63bf 6bfd93c eb47558 6bfd93c eb47558 6bfd93c 5e93a7e 6bfd93c a6c7076 eb47558 c115721 a6c7076 b5d7b7a a6c7076 6bfd93c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import os
# コマンドを実行する
os.system("pip install transformers torch psutil")
# コマンドの実行結果を取得する(stdoutとstderrは出力されない)
result = os.system("pip install transformers")
from transformers import AutoModel, AutoTokenizer, trainer_utils
import gradio as gr
import psutil
device = "cpu"
model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device)
tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
trainer_utils.set_seed(30)
def get_memory_usage():
process = psutil.Process()
memory_usage = process.memory_info().rss / 1024 / 1024 # メモリ使用量をMB単位で取得
return f"Memory Usage: {memory_usage:.2f} MB"
def generate_text(input_text):
usag=get_memory_usage()
x_token = tokenizer("", prefix_text=input_text, return_tensors="pt")
input_ids = x_token.input_ids.to(device)
token_type_ids = x_token.token_type_ids.to(device)
gen_token = model.generate(input_ids, token_type_ids=token_type_ids, max_new_tokens=150)
output_text = tokenizer.decode(gen_token[0])
return output_text
input_text = gr.inputs.Textbox(lines=5, label="Input Text")
output_text = gr.outputs.Textbox(label="Generated Text")
interface = gr.Interface(
fn=generate_text,
inputs=input_text,
outputs=output_text,
title=get_memory_usage(),
description="Enter a prompt in Japanese to generate text."
)
interface.launch()
|