import torch def check_gpu(): print("🔍 Checking CUDA and GPU details...\n") # Check if CUDA is available if torch.cuda.is_available(): device = torch.device("cuda") print("✅ CUDA is available.") print(f"🖥️ GPU Name: {torch.cuda.get_device_name(0)}") print(f"📊 GPU Memory: {round(torch.cuda.get_device_properties(0).total_memory / 1024**3, 2)} GB") # Create a tensor on GPU x = torch.rand(1000, 1000).to(device) y = torch.mm(x, x) print(f"🚀 Tensor computation successful on GPU! Tensor shape: {y.shape}") else: print("❌ CUDA is NOT available. Using CPU fallback.") x = torch.rand(1000, 1000) y = torch.mm(x, x) print(f"✅ CPU computation done. Tensor shape: {y.shape}") if __name__ == "__main__": check_gpu()