OneclickAI's picture
Upload 3 files
39c2220 verified
raw
history blame
3.77 kB
# test.py (์˜ค๋ฅ˜ ์ˆ˜์ • ์ตœ์ข… ์ฝ”๋“œ)
import numpy as np
import tensorflow as tf
from tensorflow import keras
# from_pretrained_keras ๋Œ€์‹  hf_hub_download๋ฅผ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค.
from huggingface_hub import hf_hub_download
print("TensorFlow ๋ฒ„์ „:", tf.__version__)
# 1. Hugging Face Hub์—์„œ ๋ชจ๋ธ ํŒŒ์ผ ๋‹ค์šด๋กœ๋“œ ํ›„ ๋กœ๋“œ
REPO_ID = "OneclickAI/LSTM_GUE_test_Model"
print(f"\n'{REPO_ID}' ์ €์žฅ์†Œ์—์„œ ๋ชจ๋ธ ํŒŒ์ผ์˜ ์œ„์น˜๋ฅผ ํ™•์ธํ•ฉ๋‹ˆ๋‹ค...")
try:
# 1๋‹จ๊ณ„: hf_hub_download๋กœ ํŒŒ์ผ์˜ ๋กœ์ปฌ ์บ์‹œ ๊ฒฝ๋กœ๋ฅผ ๊ฐ€์ ธ์˜ต๋‹ˆ๋‹ค.
# ํŒŒ์ผ์ด ์ด๋ฏธ ๋‹ค์šด๋กœ๋“œ ๋˜์—ˆ๋‹ค๋ฉด, ๋‹ค์šด๋กœ๋“œ๋ฅผ ์ƒ๋žตํ•˜๊ณ  ๊ฒฝ๋กœ๋งŒ ์ฆ‰์‹œ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
print("LSTM ๋ชจ๋ธ ๊ฒฝ๋กœ ํ™•์ธ ์ค‘...")
lstm_model_path = hf_hub_download(repo_id=REPO_ID, filename="lstm_model.keras")
print(f"LSTM ๋ชจ๋ธ ํŒŒ์ผ ์œ„์น˜: {lstm_model_path}")
print("GRU ๋ชจ๋ธ ๊ฒฝ๋กœ ํ™•์ธ ์ค‘...")
gru_model_path = hf_hub_download(repo_id=REPO_ID, filename="gru_model.keras")
print(f"GRU ๋ชจ๋ธ ํŒŒ์ผ ์œ„์น˜: {gru_model_path}")
# 2๋‹จ๊ณ„: ๋‹ค์šด๋กœ๋“œ๋œ ํŒŒ์ผ ๊ฒฝ๋กœ๋ฅผ Keras์˜ ํ‘œ์ค€ load_model ํ•จ์ˆ˜๋กœ ์ง์ ‘ ๋กœ๋“œํ•ฉ๋‹ˆ๋‹ค.
print("\nKeras๋กœ ๋ชจ๋ธ์„ ๋กœ๋“œํ•ฉ๋‹ˆ๋‹ค...")
lstm_model = keras.models.load_model(lstm_model_path)
gru_model = keras.models.load_model(gru_model_path)
print("๋ชจ๋ธ์„ ์„ฑ๊ณต์ ์œผ๋กœ ๋กœ๋“œํ–ˆ์Šต๋‹ˆ๋‹ค.")
except Exception as e:
print(f"๋ชจ๋ธ ๋กœ๋”ฉ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {e}")
print("์ธํ„ฐ๋„ท ์—ฐ๊ฒฐ ๋ฐ ์ €์žฅ์†Œ ID, ํŒŒ์ผ๋ช…์„ ํ™•์ธํ•ด์ฃผ์„ธ์š”.")
exit()
# IMDB ๋ฐ์ดํ„ฐ์…‹์˜ ๋‹จ์–ด ์ธ๋ฑ์Šค ๋กœ๋“œ ('๋‹จ์–ด': ์ •์ˆ˜)
word_index = keras.datasets.imdb.get_word_index()
# 2. ์˜ˆ์ธกํ•  ๋ฆฌ๋ทฐ ๋ฌธ์žฅ ์ •์˜
review1 = "This movie was fantastic and wonderful. I really enjoyed it."
review2 = "It was a complete waste of time. The plot was terrible and the acting was bad."
# 3. ๋ฌธ์žฅ ์ „์ฒ˜๋ฆฌ ํ•จ์ˆ˜
def preprocess_text(text, word_index, maxlen=256):
"""
ํ…์ŠคํŠธ๋ฅผ ๋ชจ๋ธ์ด ์ดํ•ดํ•  ์ˆ˜ ์žˆ๋Š” ์ •์ˆ˜ ์‹œํ€€์Šค๋กœ ๋ณ€ํ™˜ํ•˜๊ณ  ํŒจ๋”ฉํ•ฉ๋‹ˆ๋‹ค.
"""
# ๋ฌธ์žฅ์„ ์†Œ๋ฌธ์ž๋กœ ๋ณ€ํ™˜ํ•˜๊ณ  ๋‹จ์–ด ๋‹จ์œ„๋กœ ๋ถ„ํ• 
tokens = text.lower().split()
# ๊ฐ ๋‹จ์–ด๋ฅผ ์ •์ˆ˜ ์ธ๋ฑ์Šค๋กœ ๋ณ€ํ™˜ (word_index์— ์—†์œผ๋ฉด 2๋ฒˆ ์ธ๋ฑ์Šค'<unk>' ์‚ฌ์šฉ)
token_indices = [word_index.get(word, 2) for word in tokens]
# ์‹œํ€€์Šค ํŒจ๋”ฉ
padded_sequence = keras.preprocessing.sequence.pad_sequences([token_indices], maxlen=maxlen)
return padded_sequence
# 4. ๋ชจ๋ธ ์˜ˆ์ธก ๋ฐ ๊ฒฐ๊ณผ ์ถœ๋ ฅ ํ•จ์ˆ˜
def predict_review(review_text, model, model_name):
"""
์ „์ฒ˜๋ฆฌ๋œ ํ…์ŠคํŠธ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๊ฐ์„ฑ ๋ถ„์„์„ ์ˆ˜ํ–‰ํ•˜๊ณ  ๊ฒฐ๊ณผ๋ฅผ ์ถœ๋ ฅํ•ฉ๋‹ˆ๋‹ค.
"""
# ๋ฌธ์žฅ ์ „์ฒ˜๋ฆฌ
processed_review = preprocess_text(review_text, word_index)
# ์˜ˆ์ธก ์ˆ˜ํ–‰
prediction = model.predict(processed_review, verbose=0) # ์˜ˆ์ธก ์‹œ ๋กœ๊ทธ ์ถœ๋ ฅ์„ ๋”
positive_probability = prediction[0][0] * 100
print(f"--- {model_name} ๋ชจ๋ธ ์˜ˆ์ธก ๊ฒฐ๊ณผ ---")
print(f"๋ฆฌ๋ทฐ: '{review_text}'")
print(f"๊ธ์ • ํ™•๋ฅ : {positive_probability:.2f}%")
if positive_probability > 50:
print("๊ฒฐ๊ณผ: ๊ธ์ •์ ์ธ ๋ฆฌ๋ทฐ์ž…๋‹ˆ๋‹ค.")
else:
print("๊ฒฐ๊ณผ: ๋ถ€์ •์ ์ธ ๋ฆฌ๋ทฐ์ž…๋‹ˆ๋‹ค.")
print("-" * 30)
# 5. ๊ฐ ๋ฆฌ๋ทฐ์— ๋Œ€ํ•ด ๋‘ ๋ชจ๋ธ๋กœ ์˜ˆ์ธก ์ˆ˜ํ–‰
print("\n" + "="*40)
print("์ฒซ ๋ฒˆ์งธ ๋ฆฌ๋ทฐ ์˜ˆ์ธก ์‹œ์ž‘")
print("="*40)
predict_review(review1, lstm_model, "LSTM")
predict_review(review1, gru_model, "GRU")
print("\n" + "="*40)
print("๋‘ ๋ฒˆ์งธ ๋ฆฌ๋ทฐ ์˜ˆ์ธก ์‹œ์ž‘")
print("="*40)
predict_review(review2, lstm_model, "LSTM")
predict_review(review2, gru_model, "GRU")