File size: 1,677 Bytes
cb2a584 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
#!/usr/bin/env python3
"""
Japanese CLIP テストスクリプト
"""
import torch
from transformers import AutoTokenizer, AutoModel
def test_model_loading():
"""モデル読み込みテスト"""
print("Testing model loading...")
try:
# モデルとトークナイザーの読み込み
model = AutoModel.from_pretrained(".", trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(".")
print("✓ Model and tokenizer loaded successfully")
# ダミーデータでテスト
texts = ["テスト", "犬", "猫"]
text_inputs = tokenizer(texts, padding=True, return_tensors="pt")
# ダミー画像データ
dummy_image = torch.randn(1, 3, 224, 224)
with torch.no_grad():
outputs = model(
pixel_values=dummy_image,
input_ids=text_inputs['input_ids'],
attention_mask=text_inputs['attention_mask']
)
print(f"✓ Forward pass successful")
print(f" - Image features shape: {outputs['image_features'].shape}")
print(f" - Text features shape: {outputs['text_features'].shape}")
print(f" - Logits shape: {outputs['logits_per_image'].shape}")
print(f" - Temperature: {outputs['temperature'].item():.4f}")
return True
except Exception as e:
print(f"✗ Test failed: {e}")
return False
if __name__ == "__main__":
success = test_model_loading()
if success:
print("\n🎉 All tests passed!")
else:
print("\n❌ Tests failed!")
|