japanese-clip-stair / test_model.py
AoiNoGeso's picture
Upload Japanese CLIP model with fixed configuration
cb2a584 verified
#!/usr/bin/env python3
"""
Japanese CLIP テストスクリプト
"""
import torch
from transformers import AutoTokenizer, AutoModel
def test_model_loading():
"""モデル読み込みテスト"""
print("Testing model loading...")
try:
# モデルとトークナイザーの読み込み
model = AutoModel.from_pretrained(".", trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(".")
print("✓ Model and tokenizer loaded successfully")
# ダミーデータでテスト
texts = ["テスト", "犬", "猫"]
text_inputs = tokenizer(texts, padding=True, return_tensors="pt")
# ダミー画像データ
dummy_image = torch.randn(1, 3, 224, 224)
with torch.no_grad():
outputs = model(
pixel_values=dummy_image,
input_ids=text_inputs['input_ids'],
attention_mask=text_inputs['attention_mask']
)
print(f"✓ Forward pass successful")
print(f" - Image features shape: {outputs['image_features'].shape}")
print(f" - Text features shape: {outputs['text_features'].shape}")
print(f" - Logits shape: {outputs['logits_per_image'].shape}")
print(f" - Temperature: {outputs['temperature'].item():.4f}")
return True
except Exception as e:
print(f"✗ Test failed: {e}")
return False
if __name__ == "__main__":
success = test_model_loading()
if success:
print("\n🎉 All tests passed!")
else:
print("\n❌ Tests failed!")