liangzhidanta commited on
Commit
c35145e
·
verified ·
1 Parent(s): 9a33eb3

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +58 -0
README.md CHANGED
@@ -60,6 +60,64 @@ This demonstrates the fidelity of VQGAN latents for reconstruction and downstrea
60
 
61
  ---
62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  ## Example Usage
64
 
65
  ```python
 
60
 
61
  ---
62
 
63
+ ## Decoding Latents
64
+
65
+ You can decode the latent representations back into images using a pre-trained VQGAN.
66
+ Here is an example workflow:
67
+
68
+ ```python
69
+ import torch
70
+ from PIL import Image
71
+ import matplotlib.pyplot as plt
72
+ from torchvision import transforms
73
+ from ldm.util import instantiate_from_config
74
+ from omegaconf import OmegaConf
75
+
76
+ # ----------------------------
77
+ # 1) Initialize VQGAN
78
+ # ----------------------------
79
+ class VQGANProcessor:
80
+ def __init__(self, config_path, ckpt_path, device=None):
81
+ self.device = device or (torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu'))
82
+ config = OmegaConf.load(config_path)
83
+ sd = torch.load(ckpt_path, map_location=self.device)['state_dict']
84
+ model = instantiate_from_config(config.model)
85
+ model.load_state_dict(sd, strict=False)
86
+ model.eval().to(self.device)
87
+ self.first_stage = model.first_stage_model
88
+
89
+ def decode(self, latent: torch.Tensor) -> torch.Tensor:
90
+ with torch.no_grad():
91
+ rec = self.first_stage.decode(latent.to(self.device))
92
+ rec = torch.clamp((rec + 1.)/2., 0, 1)
93
+ return rec
94
+
95
+ # ----------------------------
96
+ # 2) Load a latent sample
97
+ # ----------------------------
98
+ latent_path = "latent_imagenet/class_name/sample.pt"
99
+ latent = torch.load(latent_path).unsqueeze(0) # [1,3,64,64]
100
+
101
+ # ----------------------------
102
+ # 3) Decode the latent
103
+ # ----------------------------
104
+ processor = VQGANProcessor("configs/latent-diffusion/cin256-v2.yaml",
105
+ "models/ldm/cin256-v2/model.ckpt")
106
+ recon = processor.decode(latent).squeeze(0).permute(1,2,0).cpu().numpy() # [H,W,3] ∈ [0,1]
107
+
108
+ # ----------------------------
109
+ # 4) (Optional) Compare with original image
110
+ # ----------------------------
111
+ orig_path = "process_data/imagenet/val/class_name/sample.JPEG"
112
+ orig = Image.open(orig_path).convert("RGB")
113
+ orig_resized = orig.resize((256,256))
114
+
115
+ fig, axes = plt.subplots(1,3, figsize=(18,6))
116
+ axes[0].imshow(orig); axes[0].set_title("Original"); axes[0].axis("off")
117
+ axes[1].imshow(orig_resized); axes[1].set_title("Resized 256×256"); axes[1].axis("off")
118
+ axes[2].imshow(recon); axes[2].set_title("Reconstruction"); axes[2].axis("off")
119
+ plt.show()
120
+
121
  ## Example Usage
122
 
123
  ```python