|  |  | 
					
						
						|  | """ EvaCLIP model configuration""" | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | from dataclasses import dataclass | 
					
						
						|  | from typing import Any, Optional, Tuple, Union | 
					
						
						|  |  | 
					
						
						|  | import torch | 
					
						
						|  | import torch.utils.checkpoint | 
					
						
						|  | from torch import nn | 
					
						
						|  |  | 
					
						
						|  | from transformers.activations import ACT2FN | 
					
						
						|  | from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling | 
					
						
						|  | from transformers.modeling_utils import PreTrainedModel | 
					
						
						|  | from transformers.utils import ( | 
					
						
						|  | ModelOutput, | 
					
						
						|  | logging, | 
					
						
						|  | ) | 
					
						
						|  | from .configuration_evaclip import EvaCLIPConfig, EvaCLIPTextConfig, EvaCLIPVisionConfig | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | logger = logging.get_logger(__name__) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | class RMSNorm(nn.Module): | 
					
						
						|  | """ | 
					
						
						|  | adepted from transformers T5LayerNorm | 
					
						
						|  | """ | 
					
						
						|  | def __init__(self, hidden_size, eps=1e-6): | 
					
						
						|  | """ | 
					
						
						|  | Construct a layernorm module in the T5 style. No bias and no subtraction of mean. | 
					
						
						|  | """ | 
					
						
						|  | super().__init__() | 
					
						
						|  | self.weight = nn.Parameter(torch.ones(hidden_size)) | 
					
						
						|  | self.variance_epsilon = eps | 
					
						
						|  |  | 
					
						
						|  | def forward(self, hidden_states): | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) | 
					
						
						|  | hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | if self.weight.dtype in [torch.float16, torch.bfloat16]: | 
					
						
						|  | hidden_states = hidden_states.to(self.weight.dtype) | 
					
						
						|  |  | 
					
						
						|  | return self.weight * hidden_states | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): | 
					
						
						|  | """ | 
					
						
						|  | Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. | 
					
						
						|  | """ | 
					
						
						|  | bsz, src_len = mask.size() | 
					
						
						|  | tgt_len = tgt_len if tgt_len is not None else src_len | 
					
						
						|  |  | 
					
						
						|  | expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) | 
					
						
						|  |  | 
					
						
						|  | inverted_mask = 1.0 - expanded_mask | 
					
						
						|  |  | 
					
						
						|  | return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: | 
					
						
						|  | return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) | 
					
						
						|  |  | 
					
						
						|  | def clip_loss(similarity: torch.Tensor) -> torch.Tensor: | 
					
						
						|  | caption_loss = contrastive_loss(similarity) | 
					
						
						|  | image_loss = contrastive_loss(similarity.t()) | 
					
						
						|  | return (caption_loss + image_loss) / 2.0 | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | @dataclass | 
					
						
						|  | class EvaCLIPVisionModelOutput(ModelOutput): | 
					
						
						|  | image_embeds: Optional[torch.FloatTensor] = None | 
					
						
						|  | last_hidden_state: torch.FloatTensor = None | 
					
						
						|  | hidden_states: Optional[Tuple[torch.FloatTensor]] = None | 
					
						
						|  | attentions: Optional[Tuple[torch.FloatTensor]] = None | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | @dataclass | 
					
						
						|  | class EvaCLIPTextModelOutput(ModelOutput): | 
					
						
						|  | text_embeds: Optional[torch.FloatTensor] = None | 
					
						
						|  | last_hidden_state: torch.FloatTensor = None | 
					
						
						|  | hidden_states: Optional[Tuple[torch.FloatTensor]] = None | 
					
						
						|  | attentions: Optional[Tuple[torch.FloatTensor]] = None | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | @dataclass | 
					
						
						|  | class EvaCLIPOutput(ModelOutput): | 
					
						
						|  | loss: Optional[torch.FloatTensor] = None | 
					
						
						|  | logits_per_image: torch.FloatTensor = None | 
					
						
						|  | logits_per_text: torch.FloatTensor = None | 
					
						
						|  | text_embeds: torch.FloatTensor = None | 
					
						
						|  | image_embeds: torch.FloatTensor = None | 
					
						
						|  | text_model_output: BaseModelOutputWithPooling = None | 
					
						
						|  | vision_model_output: BaseModelOutputWithPooling = None | 
					
						
						|  |  | 
					
						
						|  | def to_tuple(self) -> Tuple[Any]: | 
					
						
						|  | return tuple( | 
					
						
						|  | self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() | 
					
						
						|  | for k in self.keys() | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | class EvaCLIPVisionEmbeddings(nn.Module): | 
					
						
						|  | def __init__(self, config: EvaCLIPVisionConfig): | 
					
						
						|  | super().__init__() | 
					
						
						|  | self.config = config | 
					
						
						|  | self.embed_dim = config.hidden_size | 
					
						
						|  | self.image_size = config.image_size | 
					
						
						|  | self.patch_size = config.patch_size | 
					
						
						|  |  | 
					
						
						|  | self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) | 
					
						
						|  |  | 
					
						
						|  | self.patch_embedding = nn.Conv2d( | 
					
						
						|  | in_channels=config.num_channels, | 
					
						
						|  | out_channels=self.embed_dim, | 
					
						
						|  | kernel_size=self.patch_size, | 
					
						
						|  | stride=self.patch_size, | 
					
						
						|  | bias=True, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | self.num_patches = (self.image_size // self.patch_size) ** 2 | 
					
						
						|  | self.num_positions = self.num_patches + 1 | 
					
						
						|  | self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) | 
					
						
						|  | self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent = False) | 
					
						
						|  |  | 
					
						
						|  | def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: | 
					
						
						|  | batch_size = pixel_values.shape[0] | 
					
						
						|  | patch_embeds = self.patch_embedding(pixel_values) | 
					
						
						|  | patch_embeds = patch_embeds.flatten(2).transpose(1, 2) | 
					
						
						|  |  | 
					
						
						|  | class_embeds = self.class_embedding.expand(batch_size, 1, -1) | 
					
						
						|  | embeddings = torch.cat([class_embeds, patch_embeds], dim=1) | 
					
						
						|  | embeddings = embeddings + self.position_embedding(self.position_ids) | 
					
						
						|  | return embeddings | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | class EvaCLIPTextEmbeddings(nn.Module): | 
					
						
						|  | def __init__(self, config: EvaCLIPTextConfig): | 
					
						
						|  | super().__init__() | 
					
						
						|  | embed_dim = config.hidden_size | 
					
						
						|  |  | 
					
						
						|  | self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) | 
					
						
						|  | self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) | 
					
						
						|  |  | 
					
						
						|  | def forward( | 
					
						
						|  | self, | 
					
						
						|  | input_ids: Optional[torch.LongTensor] = None, | 
					
						
						|  | position_ids: Optional[torch.LongTensor] = None, | 
					
						
						|  | inputs_embeds: Optional[torch.FloatTensor] = None, | 
					
						
						|  | ) -> torch.Tensor: | 
					
						
						|  | seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] | 
					
						
						|  |  | 
					
						
						|  | if position_ids is None: | 
					
						
						|  | position_ids = self.position_ids[:, :seq_length] | 
					
						
						|  |  | 
					
						
						|  | if inputs_embeds is None: | 
					
						
						|  | inputs_embeds = self.token_embedding(input_ids) | 
					
						
						|  |  | 
					
						
						|  | position_embeddings = self.position_embedding(position_ids) | 
					
						
						|  | embeddings = inputs_embeds + position_embeddings | 
					
						
						|  |  | 
					
						
						|  | return embeddings | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | class EvaCLIPAttention(nn.Module): | 
					
						
						|  | """Multi-headed attention from 'Attention Is All You Need' paper""" | 
					
						
						|  |  | 
					
						
						|  | def __init__(self, config): | 
					
						
						|  | super().__init__() | 
					
						
						|  | self.config = config | 
					
						
						|  | self.embed_dim = config.hidden_size | 
					
						
						|  | self.num_heads = config.num_attention_heads | 
					
						
						|  | self.head_dim = self.embed_dim // self.num_heads | 
					
						
						|  | if self.head_dim * self.num_heads != self.embed_dim: | 
					
						
						|  | raise ValueError( | 
					
						
						|  | f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" | 
					
						
						|  | f" {self.num_heads})." | 
					
						
						|  | ) | 
					
						
						|  | self.scale = self.head_dim**-0.5 | 
					
						
						|  | self.dropout = config.attention_dropout | 
					
						
						|  | self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.k_bias) | 
					
						
						|  | self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.v_bias) | 
					
						
						|  | self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.q_bias) | 
					
						
						|  | self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True) | 
					
						
						|  |  | 
					
						
						|  | def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): | 
					
						
						|  | return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() | 
					
						
						|  |  | 
					
						
						|  | def forward( | 
					
						
						|  | self, | 
					
						
						|  | hidden_states: torch.Tensor, | 
					
						
						|  | attention_mask: Optional[torch.Tensor] = None, | 
					
						
						|  | causal_attention_mask: Optional[torch.Tensor] = None, | 
					
						
						|  | output_attentions: Optional[bool] = False, | 
					
						
						|  | ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: | 
					
						
						|  | """Input shape: Batch x Time x Channel""" | 
					
						
						|  |  | 
					
						
						|  | bsz, tgt_len, embed_dim = hidden_states.size() | 
					
						
						|  |  | 
					
						
						|  | query_states = self.q_proj(hidden_states) * self.scale | 
					
						
						|  | key_states = self._shape(self.k_proj(hidden_states), -1, bsz) | 
					
						
						|  | value_states = self._shape(self.v_proj(hidden_states), -1, bsz) | 
					
						
						|  |  | 
					
						
						|  | proj_shape = (bsz * self.num_heads, -1, self.head_dim) | 
					
						
						|  | query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) | 
					
						
						|  | key_states = key_states.view(*proj_shape) | 
					
						
						|  | value_states = value_states.view(*proj_shape) | 
					
						
						|  |  | 
					
						
						|  | src_len = key_states.size(1) | 
					
						
						|  | attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) | 
					
						
						|  |  | 
					
						
						|  | if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): | 
					
						
						|  | raise ValueError( | 
					
						
						|  | f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" | 
					
						
						|  | f" {attn_weights.size()}" | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | if causal_attention_mask is not None: | 
					
						
						|  | if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): | 
					
						
						|  | raise ValueError( | 
					
						
						|  | f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" | 
					
						
						|  | f" {causal_attention_mask.size()}" | 
					
						
						|  | ) | 
					
						
						|  | attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask | 
					
						
						|  | attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) | 
					
						
						|  |  | 
					
						
						|  | if attention_mask is not None: | 
					
						
						|  | if attention_mask.size() != (bsz, 1, tgt_len, src_len): | 
					
						
						|  | raise ValueError( | 
					
						
						|  | f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" | 
					
						
						|  | ) | 
					
						
						|  | attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask | 
					
						
						|  | attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) | 
					
						
						|  |  | 
					
						
						|  | attn_weights = nn.functional.softmax(attn_weights, dim=-1) | 
					
						
						|  |  | 
					
						
						|  | if output_attentions: | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) | 
					
						
						|  | attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) | 
					
						
						|  | else: | 
					
						
						|  | attn_weights_reshaped = None | 
					
						
						|  |  | 
					
						
						|  | attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) | 
					
						
						|  |  | 
					
						
						|  | attn_output = torch.bmm(attn_probs, value_states) | 
					
						
						|  |  | 
					
						
						|  | if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): | 
					
						
						|  | raise ValueError( | 
					
						
						|  | f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" | 
					
						
						|  | f" {attn_output.size()}" | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) | 
					
						
						|  | attn_output = attn_output.transpose(1, 2) | 
					
						
						|  | attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) | 
					
						
						|  |  | 
					
						
						|  | attn_output = self.out_proj(attn_output) | 
					
						
						|  |  | 
					
						
						|  | return attn_output, attn_weights_reshaped | 
					
						
						|  |  | 
					
						
						|  | class EvaCLIPTextAttention(nn.Module): | 
					
						
						|  | """Multi-headed attention from 'Attention Is All You Need' paper""" | 
					
						
						|  |  | 
					
						
						|  | def __init__(self, config): | 
					
						
						|  | super().__init__() | 
					
						
						|  | self.config = config | 
					
						
						|  | self.embed_dim = config.hidden_size | 
					
						
						|  | self.num_heads = config.num_attention_heads | 
					
						
						|  | self.head_dim = self.embed_dim // self.num_heads | 
					
						
						|  | if self.head_dim * self.num_heads != self.embed_dim: | 
					
						
						|  | raise ValueError( | 
					
						
						|  | f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" | 
					
						
						|  | f" {self.num_heads})." | 
					
						
						|  | ) | 
					
						
						|  | self.scale = self.head_dim**-0.5 | 
					
						
						|  | self.dropout = config.attention_dropout | 
					
						
						|  | self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.k_bias) | 
					
						
						|  | self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.v_bias) | 
					
						
						|  | self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.q_bias) | 
					
						
						|  | self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True) | 
					
						
						|  |  | 
					
						
						|  | def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): | 
					
						
						|  | return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() | 
					
						
						|  |  | 
					
						
						|  | def forward( | 
					
						
						|  | self, | 
					
						
						|  | hidden_states: torch.Tensor, | 
					
						
						|  | attention_mask: Optional[torch.Tensor] = None, | 
					
						
						|  | causal_attention_mask: Optional[torch.Tensor] = None, | 
					
						
						|  | output_attentions: Optional[bool] = False, | 
					
						
						|  | ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: | 
					
						
						|  | """Input shape: Batch x Time x Channel""" | 
					
						
						|  |  | 
					
						
						|  | bsz, tgt_len, embed_dim = hidden_states.size() | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | query_states = self.q_proj(hidden_states) | 
					
						
						|  | key_states = self._shape(self.k_proj(hidden_states), -1, bsz) | 
					
						
						|  | value_states = self._shape(self.v_proj(hidden_states), -1, bsz) | 
					
						
						|  |  | 
					
						
						|  | proj_shape = (bsz * self.num_heads, -1, self.head_dim) | 
					
						
						|  | query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) | 
					
						
						|  | key_states = key_states.view(*proj_shape) | 
					
						
						|  | value_states = value_states.view(*proj_shape) | 
					
						
						|  |  | 
					
						
						|  | src_len = key_states.size(1) | 
					
						
						|  | attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) | 
					
						
						|  |  | 
					
						
						|  | if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): | 
					
						
						|  | raise ValueError( | 
					
						
						|  | f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" | 
					
						
						|  | f" {attn_weights.size()}" | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | if causal_attention_mask is not None: | 
					
						
						|  | if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): | 
					
						
						|  | raise ValueError( | 
					
						
						|  | f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" | 
					
						
						|  | f" {causal_attention_mask.size()}" | 
					
						
						|  | ) | 
					
						
						|  | attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask | 
					
						
						|  | attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) | 
					
						
						|  |  | 
					
						
						|  | if attention_mask is not None: | 
					
						
						|  | if attention_mask.size() != (bsz, 1, tgt_len, src_len): | 
					
						
						|  | raise ValueError( | 
					
						
						|  | f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" | 
					
						
						|  | ) | 
					
						
						|  | attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask | 
					
						
						|  | attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) | 
					
						
						|  |  | 
					
						
						|  | attn_weights = nn.functional.softmax(attn_weights, dim=-1) | 
					
						
						|  |  | 
					
						
						|  | if output_attentions: | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) | 
					
						
						|  | attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) | 
					
						
						|  | else: | 
					
						
						|  | attn_weights_reshaped = None | 
					
						
						|  |  | 
					
						
						|  | attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) | 
					
						
						|  |  | 
					
						
						|  | attn_output = torch.bmm(attn_probs, value_states) | 
					
						
						|  |  | 
					
						
						|  | if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): | 
					
						
						|  | raise ValueError( | 
					
						
						|  | f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" | 
					
						
						|  | f" {attn_output.size()}" | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) | 
					
						
						|  | attn_output = attn_output.transpose(1, 2) | 
					
						
						|  | attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) | 
					
						
						|  |  | 
					
						
						|  | attn_output = self.out_proj(attn_output) | 
					
						
						|  |  | 
					
						
						|  | return attn_output, attn_weights_reshaped | 
					
						
						|  |  | 
					
						
						|  | class EvaCLIPMLP(nn.Module): | 
					
						
						|  | def __init__(self, config): | 
					
						
						|  | super().__init__() | 
					
						
						|  | self.config = config | 
					
						
						|  | self.activation_fn = ACT2FN[config.hidden_act] | 
					
						
						|  | self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) | 
					
						
						|  | self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) | 
					
						
						|  |  | 
					
						
						|  | def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: | 
					
						
						|  | hidden_states = self.fc1(hidden_states) | 
					
						
						|  | hidden_states = self.activation_fn(hidden_states) | 
					
						
						|  | hidden_states = self.fc2(hidden_states) | 
					
						
						|  | return hidden_states | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | class EvaCLIPEncoderLayer(nn.Module): | 
					
						
						|  | def __init__(self, config: EvaCLIPConfig): | 
					
						
						|  | super().__init__() | 
					
						
						|  | self.config = config | 
					
						
						|  | norm_layer = RMSNorm if config.use_rms_norm else nn.LayerNorm | 
					
						
						|  | self.embed_dim = config.hidden_size | 
					
						
						|  | self.post_layernorm = config.post_layernorm if config.post_layernorm is not None else False | 
					
						
						|  | self.self_attn = EvaCLIPAttention(config) | 
					
						
						|  | self.layer_norm1 = norm_layer(self.embed_dim, eps=config.layer_norm_eps) | 
					
						
						|  | self.mlp = EvaCLIPMLP(config) | 
					
						
						|  | self.layer_norm2 = norm_layer(self.embed_dim, eps=config.layer_norm_eps) | 
					
						
						|  |  | 
					
						
						|  | def forward( | 
					
						
						|  | self, | 
					
						
						|  | hidden_states: torch.Tensor, | 
					
						
						|  | attention_mask: torch.Tensor, | 
					
						
						|  | causal_attention_mask: torch.Tensor, | 
					
						
						|  | output_attentions: Optional[bool] = False, | 
					
						
						|  | ) -> Tuple[torch.FloatTensor]: | 
					
						
						|  | residual = hidden_states | 
					
						
						|  |  | 
					
						
						|  | if not self.post_layernorm: | 
					
						
						|  | hidden_states = self.layer_norm1(hidden_states) | 
					
						
						|  | hidden_states, attn_weights = self.self_attn( | 
					
						
						|  | hidden_states=hidden_states, | 
					
						
						|  | attention_mask=attention_mask, | 
					
						
						|  | causal_attention_mask=causal_attention_mask, | 
					
						
						|  | output_attentions=output_attentions, | 
					
						
						|  | ) | 
					
						
						|  | if self.post_layernorm: | 
					
						
						|  | hidden_states = self.layer_norm1(hidden_states) | 
					
						
						|  | hidden_states = residual + hidden_states | 
					
						
						|  | residual = hidden_states | 
					
						
						|  | if not self.post_layernorm: | 
					
						
						|  | hidden_states = self.layer_norm2(hidden_states) | 
					
						
						|  | hidden_states = self.mlp(hidden_states) | 
					
						
						|  | if self.post_layernorm: | 
					
						
						|  | hidden_states = self.layer_norm2(hidden_states) | 
					
						
						|  | hidden_states = residual + hidden_states | 
					
						
						|  |  | 
					
						
						|  | outputs = (hidden_states,) | 
					
						
						|  |  | 
					
						
						|  | if output_attentions: | 
					
						
						|  | outputs += (attn_weights,) | 
					
						
						|  |  | 
					
						
						|  | return outputs | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | class EvaCLIPPreTrainedModel(PreTrainedModel): | 
					
						
						|  | """ | 
					
						
						|  | An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained | 
					
						
						|  | models. | 
					
						
						|  | """ | 
					
						
						|  |  | 
					
						
						|  | config_class = EvaCLIPConfig | 
					
						
						|  | base_model_prefix = "clip" | 
					
						
						|  | supports_gradient_checkpointing = True | 
					
						
						|  | _keys_to_ignore_on_load_missing = [r"position_ids"] | 
					
						
						|  |  | 
					
						
						|  | def _init_weights(self, module): | 
					
						
						|  | """Initialize the weights""" | 
					
						
						|  | factor = self.config.initializer_factor | 
					
						
						|  | if isinstance(module, EvaCLIPTextEmbeddings): | 
					
						
						|  | module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) | 
					
						
						|  | module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) | 
					
						
						|  | elif isinstance(module, EvaCLIPVisionEmbeddings): | 
					
						
						|  | factor = self.config.initializer_factor | 
					
						
						|  | nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor) | 
					
						
						|  | nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) | 
					
						
						|  | nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) | 
					
						
						|  | elif isinstance(module, EvaCLIPAttention): | 
					
						
						|  | factor = self.config.initializer_factor | 
					
						
						|  | in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor | 
					
						
						|  | out_proj_std = (module.embed_dim**-0.5) * factor | 
					
						
						|  | nn.init.normal_(module.q_proj.weight, std=in_proj_std) | 
					
						
						|  | nn.init.normal_(module.k_proj.weight, std=in_proj_std) | 
					
						
						|  | nn.init.normal_(module.v_proj.weight, std=in_proj_std) | 
					
						
						|  | nn.init.normal_(module.out_proj.weight, std=out_proj_std) | 
					
						
						|  | elif isinstance(module, EvaCLIPMLP): | 
					
						
						|  | factor = self.config.initializer_factor | 
					
						
						|  | in_proj_std = ( | 
					
						
						|  | (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor | 
					
						
						|  | ) | 
					
						
						|  | fc_std = (2 * module.config.hidden_size) ** -0.5 * factor | 
					
						
						|  | nn.init.normal_(module.fc1.weight, std=fc_std) | 
					
						
						|  | nn.init.normal_(module.fc2.weight, std=in_proj_std) | 
					
						
						|  | elif isinstance(module, EvaCLIPModel): | 
					
						
						|  | nn.init.normal_( | 
					
						
						|  | module.text_projection.weight, | 
					
						
						|  | std=module.text_embed_dim**-0.5 * self.config.initializer_factor, | 
					
						
						|  | ) | 
					
						
						|  | nn.init.normal_( | 
					
						
						|  | module.visual_projection.weight, | 
					
						
						|  | std=module.vision_embed_dim**-0.5 * self.config.initializer_factor, | 
					
						
						|  | ) | 
					
						
						|  | elif isinstance(module, EvaCLIPVisionModelWithProjection): | 
					
						
						|  | nn.init.normal_( | 
					
						
						|  | module.visual_projection.weight, | 
					
						
						|  | std=self.config.hidden_size**-0.5 * self.config.initializer_factor, | 
					
						
						|  | ) | 
					
						
						|  | elif isinstance(module, EvaCLIPTextModelWithProjection): | 
					
						
						|  | nn.init.normal_( | 
					
						
						|  | module.text_projection.weight, | 
					
						
						|  | std=self.config.hidden_size**-0.5 * self.config.initializer_factor, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | if isinstance(module, nn.LayerNorm): | 
					
						
						|  | module.bias.data.zero_() | 
					
						
						|  | module.weight.data.fill_(1.0) | 
					
						
						|  | if isinstance(module, nn.Linear) and module.bias is not None: | 
					
						
						|  | module.bias.data.zero_() | 
					
						
						|  |  | 
					
						
						|  | def _set_gradient_checkpointing(self, module, value=False): | 
					
						
						|  | if isinstance(module, EvaCLIPEncoder): | 
					
						
						|  | module.gradient_checkpointing = value | 
					
						
						|  |  | 
					
						
						|  | class EvaCLIPEncoder(nn.Module): | 
					
						
						|  | """ | 
					
						
						|  | Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a | 
					
						
						|  | [`CLIPEncoderLayer`]. | 
					
						
						|  |  | 
					
						
						|  | Args: | 
					
						
						|  | config: CLIPConfig | 
					
						
						|  | """ | 
					
						
						|  |  | 
					
						
						|  | def __init__(self, config: EvaCLIPConfig): | 
					
						
						|  | super().__init__() | 
					
						
						|  | self.config = config | 
					
						
						|  | self.layers = nn.ModuleList([EvaCLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)]) | 
					
						
						|  | self.gradient_checkpointing = False | 
					
						
						|  |  | 
					
						
						|  | def forward( | 
					
						
						|  | self, | 
					
						
						|  | inputs_embeds, | 
					
						
						|  | attention_mask: Optional[torch.Tensor] = None, | 
					
						
						|  | causal_attention_mask: Optional[torch.Tensor] = None, | 
					
						
						|  | output_attentions: Optional[bool] = None, | 
					
						
						|  | output_hidden_states: Optional[bool] = None, | 
					
						
						|  | return_dict: Optional[bool] = None, | 
					
						
						|  | ) -> Union[Tuple, BaseModelOutput]: | 
					
						
						|  | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions | 
					
						
						|  | output_hidden_states = ( | 
					
						
						|  | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states | 
					
						
						|  | ) | 
					
						
						|  | return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 
					
						
						|  |  | 
					
						
						|  | encoder_states = () if output_hidden_states else None | 
					
						
						|  | all_attentions = () if output_attentions else None | 
					
						
						|  |  | 
					
						
						|  | hidden_states = inputs_embeds | 
					
						
						|  | for idx, encoder_layer in enumerate(self.layers): | 
					
						
						|  | if output_hidden_states: | 
					
						
						|  | encoder_states = encoder_states + (hidden_states,) | 
					
						
						|  | if self.gradient_checkpointing and self.training: | 
					
						
						|  |  | 
					
						
						|  | def create_custom_forward(module): | 
					
						
						|  | def custom_forward(*inputs): | 
					
						
						|  | return module(*inputs, output_attentions) | 
					
						
						|  |  | 
					
						
						|  | return custom_forward | 
					
						
						|  |  | 
					
						
						|  | layer_outputs = torch.utils.checkpoint.checkpoint( | 
					
						
						|  | create_custom_forward(encoder_layer), | 
					
						
						|  | hidden_states, | 
					
						
						|  | attention_mask, | 
					
						
						|  | causal_attention_mask, | 
					
						
						|  | ) | 
					
						
						|  | else: | 
					
						
						|  | layer_outputs = encoder_layer( | 
					
						
						|  | hidden_states, | 
					
						
						|  | attention_mask, | 
					
						
						|  | causal_attention_mask, | 
					
						
						|  | output_attentions=output_attentions, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | hidden_states = layer_outputs[0] | 
					
						
						|  |  | 
					
						
						|  | if output_attentions: | 
					
						
						|  | all_attentions = all_attentions + (layer_outputs[1],) | 
					
						
						|  |  | 
					
						
						|  | if output_hidden_states: | 
					
						
						|  | encoder_states = encoder_states + (hidden_states,) | 
					
						
						|  |  | 
					
						
						|  | if not return_dict: | 
					
						
						|  | return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) | 
					
						
						|  | return BaseModelOutput( | 
					
						
						|  | last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | class EvaCLIPTextTransformer(EvaCLIPPreTrainedModel): | 
					
						
						|  | def __init__(self, config: EvaCLIPTextConfig): | 
					
						
						|  | super().__init__(config) | 
					
						
						|  | self.config = config | 
					
						
						|  | embed_dim = config.hidden_size | 
					
						
						|  | norm_layer = RMSNorm if config.use_rms_norm else nn.LayerNorm | 
					
						
						|  | self.embeddings = EvaCLIPTextEmbeddings(config) | 
					
						
						|  | self.encoder = EvaCLIPEncoder(config) | 
					
						
						|  | self.final_layer_norm = norm_layer(embed_dim, eps=config.layer_norm_eps) | 
					
						
						|  |  | 
					
						
						|  | def gradient_checkpointing_enable(self): | 
					
						
						|  | self.encoder.gradient_checkpointing = True | 
					
						
						|  |  | 
					
						
						|  | def forward( | 
					
						
						|  | self, | 
					
						
						|  | input_ids: Optional[torch.Tensor] = None, | 
					
						
						|  | attention_mask: Optional[torch.Tensor] = None, | 
					
						
						|  | position_ids: Optional[torch.Tensor] = None, | 
					
						
						|  | output_attentions: Optional[bool] = None, | 
					
						
						|  | output_hidden_states: Optional[bool] = None, | 
					
						
						|  | return_dict: Optional[bool] = None, | 
					
						
						|  | ) -> Union[Tuple, BaseModelOutputWithPooling]: | 
					
						
						|  | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions | 
					
						
						|  | output_hidden_states = ( | 
					
						
						|  | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states | 
					
						
						|  | ) | 
					
						
						|  | return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 
					
						
						|  |  | 
					
						
						|  | if input_ids is None: | 
					
						
						|  | raise ValueError("You have to specify input_ids") | 
					
						
						|  |  | 
					
						
						|  | input_shape = input_ids.size() | 
					
						
						|  | input_ids = input_ids.view(-1, input_shape[-1]) | 
					
						
						|  |  | 
					
						
						|  | hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) | 
					
						
						|  |  | 
					
						
						|  | bsz, seq_len = input_shape | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | causal_attention_mask = self._build_causal_attention_mask(bsz, seq_len, hidden_states.dtype).to( | 
					
						
						|  | hidden_states.device | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | if attention_mask is not None: | 
					
						
						|  |  | 
					
						
						|  | attention_mask = _expand_mask(attention_mask, hidden_states.dtype) | 
					
						
						|  |  | 
					
						
						|  | encoder_outputs = self.encoder( | 
					
						
						|  | inputs_embeds=hidden_states, | 
					
						
						|  | attention_mask=attention_mask, | 
					
						
						|  | causal_attention_mask=causal_attention_mask, | 
					
						
						|  | output_attentions=output_attentions, | 
					
						
						|  | output_hidden_states=output_hidden_states, | 
					
						
						|  | return_dict=return_dict, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | last_hidden_state = encoder_outputs[0] | 
					
						
						|  | last_hidden_state = self.final_layer_norm(last_hidden_state) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | pooled_output = last_hidden_state[ | 
					
						
						|  | torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), | 
					
						
						|  | input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1), | 
					
						
						|  | ] | 
					
						
						|  |  | 
					
						
						|  | if not return_dict: | 
					
						
						|  | return (last_hidden_state, pooled_output) + encoder_outputs[1:] | 
					
						
						|  |  | 
					
						
						|  | return BaseModelOutputWithPooling( | 
					
						
						|  | last_hidden_state=last_hidden_state, | 
					
						
						|  | pooler_output=pooled_output, | 
					
						
						|  | hidden_states=encoder_outputs.hidden_states, | 
					
						
						|  | attentions=encoder_outputs.attentions, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | def _build_causal_attention_mask(self, bsz, seq_len, dtype): | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype) | 
					
						
						|  | mask.fill_(torch.tensor(torch.finfo(dtype).min)) | 
					
						
						|  | mask.triu_(1) | 
					
						
						|  | mask = mask.unsqueeze(1) | 
					
						
						|  | return mask | 
					
						
						|  |  | 
					
						
						|  | class EvaCLIPTextModel(EvaCLIPPreTrainedModel): | 
					
						
						|  | config_class = EvaCLIPTextConfig | 
					
						
						|  |  | 
					
						
						|  | _no_split_modules = ["EvaCLIPEncoderLayer"] | 
					
						
						|  |  | 
					
						
						|  | def __init__(self, config: EvaCLIPTextConfig): | 
					
						
						|  | super().__init__(config) | 
					
						
						|  | self.text_model = EvaCLIPTextTransformer(config) | 
					
						
						|  |  | 
					
						
						|  | self.post_init() | 
					
						
						|  |  | 
					
						
						|  | def get_input_embeddings(self) -> nn.Module: | 
					
						
						|  | return self.text_model.embeddings.token_embedding | 
					
						
						|  |  | 
					
						
						|  | def set_input_embeddings(self, value): | 
					
						
						|  | self.text_model.embeddings.token_embedding = value | 
					
						
						|  |  | 
					
						
						|  | def forward( | 
					
						
						|  | self, | 
					
						
						|  | input_ids: Optional[torch.Tensor] = None, | 
					
						
						|  | attention_mask: Optional[torch.Tensor] = None, | 
					
						
						|  | position_ids: Optional[torch.Tensor] = None, | 
					
						
						|  | output_attentions: Optional[bool] = None, | 
					
						
						|  | output_hidden_states: Optional[bool] = None, | 
					
						
						|  | return_dict: Optional[bool] = None, | 
					
						
						|  | ) -> Union[Tuple, BaseModelOutputWithPooling]: | 
					
						
						|  | return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 
					
						
						|  |  | 
					
						
						|  | return self.text_model( | 
					
						
						|  | input_ids=input_ids, | 
					
						
						|  | attention_mask=attention_mask, | 
					
						
						|  | position_ids=position_ids, | 
					
						
						|  | output_attentions=output_attentions, | 
					
						
						|  | output_hidden_states=output_hidden_states, | 
					
						
						|  | return_dict=return_dict, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | class EvaCLIPVisionTransformer(EvaCLIPPreTrainedModel): | 
					
						
						|  | def __init__(self, config: EvaCLIPVisionConfig): | 
					
						
						|  | super().__init__(config) | 
					
						
						|  | self.config = config | 
					
						
						|  | embed_dim = config.hidden_size | 
					
						
						|  | norm_layer = RMSNorm if config.use_rms_norm else nn.LayerNorm | 
					
						
						|  | self.embeddings = EvaCLIPVisionEmbeddings(config) | 
					
						
						|  | self.encoder = EvaCLIPEncoder(config) | 
					
						
						|  | self.post_layernorm = norm_layer(embed_dim, eps=config.layer_norm_eps) | 
					
						
						|  |  | 
					
						
						|  | def gradient_checkpointing_enable(self): | 
					
						
						|  | self.encoder.gradient_checkpointing = True | 
					
						
						|  |  | 
					
						
						|  | def forward( | 
					
						
						|  | self, | 
					
						
						|  | pixel_values: Optional[torch.FloatTensor] = None, | 
					
						
						|  | output_attentions: Optional[bool] = None, | 
					
						
						|  | output_hidden_states: Optional[bool] = None, | 
					
						
						|  | return_dict: Optional[bool] = None, | 
					
						
						|  | ) -> Union[Tuple, BaseModelOutputWithPooling]: | 
					
						
						|  | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions | 
					
						
						|  | output_hidden_states = ( | 
					
						
						|  | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states | 
					
						
						|  | ) | 
					
						
						|  | return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 
					
						
						|  |  | 
					
						
						|  | if pixel_values is None: | 
					
						
						|  | raise ValueError("You have to specify pixel_values") | 
					
						
						|  |  | 
					
						
						|  | hidden_states = self.embeddings(pixel_values) | 
					
						
						|  |  | 
					
						
						|  | encoder_outputs = self.encoder( | 
					
						
						|  | inputs_embeds=hidden_states, | 
					
						
						|  | output_attentions=output_attentions, | 
					
						
						|  | output_hidden_states=output_hidden_states, | 
					
						
						|  | return_dict=return_dict, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | last_hidden_state = encoder_outputs[0] | 
					
						
						|  | pooled_output = last_hidden_state[:, 0, :] | 
					
						
						|  | pooled_output = self.post_layernorm(pooled_output) | 
					
						
						|  |  | 
					
						
						|  | if not return_dict: | 
					
						
						|  | return (last_hidden_state, pooled_output) + encoder_outputs[1:] | 
					
						
						|  |  | 
					
						
						|  | return BaseModelOutputWithPooling( | 
					
						
						|  | last_hidden_state=last_hidden_state, | 
					
						
						|  | pooler_output=pooled_output, | 
					
						
						|  | hidden_states=encoder_outputs.hidden_states, | 
					
						
						|  | attentions=encoder_outputs.attentions, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | class EvaCLIPVisionModel(nn.Module): | 
					
						
						|  | config_class = EvaCLIPVisionConfig | 
					
						
						|  | main_input_name = "pixel_values" | 
					
						
						|  |  | 
					
						
						|  | def __init__(self, config: EvaCLIPVisionConfig): | 
					
						
						|  | super().__init__(config) | 
					
						
						|  |  | 
					
						
						|  | self.vision_model = EvaCLIPVisionTransformer(config) | 
					
						
						|  |  | 
					
						
						|  | self.post_init() | 
					
						
						|  |  | 
					
						
						|  | def get_input_embeddings(self) -> nn.Module: | 
					
						
						|  | return self.vision_model.embeddings.patch_embedding | 
					
						
						|  |  | 
					
						
						|  | def forward( | 
					
						
						|  | self, | 
					
						
						|  | pixel_values: Optional[torch.FloatTensor] = None, | 
					
						
						|  | output_attentions: Optional[bool] = None, | 
					
						
						|  | output_hidden_states: Optional[bool] = None, | 
					
						
						|  | return_dict: Optional[bool] = None, | 
					
						
						|  | ) -> Union[Tuple, BaseModelOutputWithPooling]: | 
					
						
						|  |  | 
					
						
						|  | return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 
					
						
						|  |  | 
					
						
						|  | return self.vision_model( | 
					
						
						|  | pixel_values=pixel_values, | 
					
						
						|  | output_attentions=output_attentions, | 
					
						
						|  | output_hidden_states=output_hidden_states, | 
					
						
						|  | return_dict=return_dict, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | class EvaCLIPModel(EvaCLIPPreTrainedModel): | 
					
						
						|  | config_class = EvaCLIPConfig | 
					
						
						|  |  | 
					
						
						|  | def __init__(self, config: EvaCLIPConfig): | 
					
						
						|  | super().__init__(config) | 
					
						
						|  |  | 
					
						
						|  | if not (type(config.text_config).__name__ == "EvaCLIPTextConfig"): | 
					
						
						|  | raise ValueError( | 
					
						
						|  | "config.text_config is expected to be of type EvaCLIPTextConfig but is of type" | 
					
						
						|  | f" {type(config.text_config)}." | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | if not (type(config.vision_config).__name__ == "EvaCLIPVisionConfig"): | 
					
						
						|  | raise ValueError( | 
					
						
						|  | "config.vision_config is expected to be of type EvaCLIPVisionConfig but is of type" | 
					
						
						|  | f" {type(config.vision_config)}." | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | text_config = config.text_config | 
					
						
						|  | vision_config = config.vision_config | 
					
						
						|  |  | 
					
						
						|  | self.projection_dim = config.projection_dim | 
					
						
						|  | self.text_embed_dim = text_config.hidden_size | 
					
						
						|  | self.vision_embed_dim = vision_config.hidden_size | 
					
						
						|  |  | 
					
						
						|  | self.text_model = EvaCLIPTextTransformer(text_config) | 
					
						
						|  | self.vision_model = EvaCLIPVisionTransformer(vision_config) | 
					
						
						|  |  | 
					
						
						|  | self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) | 
					
						
						|  | self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) | 
					
						
						|  | self.logit_scale = torch.tensor(100., requires_grad=False) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | self.post_init() | 
					
						
						|  |  | 
					
						
						|  | def encode_text( | 
					
						
						|  | self, | 
					
						
						|  | input_ids: Optional[torch.Tensor] = None, | 
					
						
						|  | attention_mask: Optional[torch.Tensor] = None, | 
					
						
						|  | position_ids: Optional[torch.Tensor] = None, | 
					
						
						|  | output_attentions: Optional[bool] = None, | 
					
						
						|  | output_hidden_states: Optional[bool] = None, | 
					
						
						|  | return_dict: Optional[bool] = None, | 
					
						
						|  | ) -> torch.FloatTensor: | 
					
						
						|  |  | 
					
						
						|  | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions | 
					
						
						|  | output_hidden_states = ( | 
					
						
						|  | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states | 
					
						
						|  | ) | 
					
						
						|  | return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 
					
						
						|  |  | 
					
						
						|  | text_outputs = self.text_model( | 
					
						
						|  | input_ids=input_ids, | 
					
						
						|  | attention_mask=attention_mask, | 
					
						
						|  | position_ids=position_ids, | 
					
						
						|  | output_attentions=output_attentions, | 
					
						
						|  | output_hidden_states=output_hidden_states, | 
					
						
						|  | return_dict=return_dict, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | pooled_output = text_outputs[1] | 
					
						
						|  | text_features = self.text_projection(pooled_output) | 
					
						
						|  |  | 
					
						
						|  | return text_features | 
					
						
						|  |  | 
					
						
						|  | def encode_image( | 
					
						
						|  | self, | 
					
						
						|  | pixel_values: Optional[torch.FloatTensor] = None, | 
					
						
						|  | output_attentions: Optional[bool] = None, | 
					
						
						|  | output_hidden_states: Optional[bool] = None, | 
					
						
						|  | return_dict: Optional[bool] = None, | 
					
						
						|  | ) -> torch.FloatTensor: | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions | 
					
						
						|  | output_hidden_states = ( | 
					
						
						|  | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states | 
					
						
						|  | ) | 
					
						
						|  | return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 
					
						
						|  |  | 
					
						
						|  | vision_outputs = self.vision_model( | 
					
						
						|  | pixel_values=pixel_values, | 
					
						
						|  | output_attentions=output_attentions, | 
					
						
						|  | output_hidden_states=output_hidden_states, | 
					
						
						|  | return_dict=return_dict, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | pooled_output = vision_outputs[1] | 
					
						
						|  | image_features = self.visual_projection(pooled_output) | 
					
						
						|  |  | 
					
						
						|  | return image_features | 
					
						
						|  |  | 
					
						
						|  | def forward( | 
					
						
						|  | self, | 
					
						
						|  | input_ids: Optional[torch.LongTensor] = None, | 
					
						
						|  | pixel_values: Optional[torch.FloatTensor] = None, | 
					
						
						|  | attention_mask: Optional[torch.Tensor] = None, | 
					
						
						|  | position_ids: Optional[torch.LongTensor] = None, | 
					
						
						|  | return_loss: Optional[bool] = None, | 
					
						
						|  | output_attentions: Optional[bool] = None, | 
					
						
						|  | output_hidden_states: Optional[bool] = None, | 
					
						
						|  | return_dict: Optional[bool] = None, | 
					
						
						|  | ) -> Union[Tuple, EvaCLIPOutput]: | 
					
						
						|  |  | 
					
						
						|  | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions | 
					
						
						|  | output_hidden_states = ( | 
					
						
						|  | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states | 
					
						
						|  | ) | 
					
						
						|  | return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 
					
						
						|  |  | 
					
						
						|  | vision_outputs = self.vision_model( | 
					
						
						|  | pixel_values=pixel_values, | 
					
						
						|  | output_attentions=output_attentions, | 
					
						
						|  | output_hidden_states=output_hidden_states, | 
					
						
						|  | return_dict=return_dict, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | text_outputs = self.text_model( | 
					
						
						|  | input_ids=input_ids, | 
					
						
						|  | attention_mask=attention_mask, | 
					
						
						|  | position_ids=position_ids, | 
					
						
						|  | output_attentions=output_attentions, | 
					
						
						|  | output_hidden_states=output_hidden_states, | 
					
						
						|  | return_dict=return_dict, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | image_embeds = vision_outputs[1] | 
					
						
						|  | image_embeds = self.visual_projection(image_embeds) | 
					
						
						|  |  | 
					
						
						|  | text_embeds = text_outputs[1] | 
					
						
						|  | text_embeds = self.text_projection(text_embeds) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) | 
					
						
						|  | text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | logit_scale = self.logit_scale.exp() | 
					
						
						|  | logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale | 
					
						
						|  | logits_per_image = logits_per_text.t() | 
					
						
						|  |  | 
					
						
						|  | loss = None | 
					
						
						|  | if return_loss: | 
					
						
						|  | loss = clip_loss(logits_per_text) | 
					
						
						|  |  | 
					
						
						|  | if not return_dict: | 
					
						
						|  | output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) | 
					
						
						|  | return ((loss,) + output) if loss is not None else output | 
					
						
						|  |  | 
					
						
						|  | return EvaCLIPOutput( | 
					
						
						|  | loss=loss, | 
					
						
						|  | logits_per_image=logits_per_image, | 
					
						
						|  | logits_per_text=logits_per_text, | 
					
						
						|  | text_embeds=text_embeds, | 
					
						
						|  | image_embeds=image_embeds, | 
					
						
						|  | text_model_output=text_outputs, | 
					
						
						|  | vision_model_output=vision_outputs, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | class EvaCLIPTextModelWithProjection(EvaCLIPPreTrainedModel): | 
					
						
						|  | config_class = EvaCLIPTextConfig | 
					
						
						|  |  | 
					
						
						|  | _no_split_modules = ["EvaCLIPEncoderLayer"] | 
					
						
						|  |  | 
					
						
						|  | def __init__(self, config: EvaCLIPTextConfig): | 
					
						
						|  | super().__init__(config) | 
					
						
						|  |  | 
					
						
						|  | self.text_model = EvaCLIPTextTransformer(config) | 
					
						
						|  |  | 
					
						
						|  | self.text_projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False) | 
					
						
						|  |  | 
					
						
						|  | def get_input_embeddings(self) -> nn.Module: | 
					
						
						|  | return self.text_model.embeddings.token_embedding | 
					
						
						|  |  | 
					
						
						|  | def set_input_embeddings(self, value): | 
					
						
						|  | self.text_model.embeddings.token_embedding = value | 
					
						
						|  |  | 
					
						
						|  | def forward( | 
					
						
						|  | self, | 
					
						
						|  | input_ids: Optional[torch.Tensor] = None, | 
					
						
						|  | attention_mask: Optional[torch.Tensor] = None, | 
					
						
						|  | position_ids: Optional[torch.Tensor] = None, | 
					
						
						|  | output_attentions: Optional[bool] = None, | 
					
						
						|  | output_hidden_states: Optional[bool] = None, | 
					
						
						|  | return_dict: Optional[bool] = None, | 
					
						
						|  | ) -> Union[Tuple, EvaCLIPTextModelOutput]: | 
					
						
						|  |  | 
					
						
						|  | return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 
					
						
						|  |  | 
					
						
						|  | text_outputs = self.text_model( | 
					
						
						|  | input_ids=input_ids, | 
					
						
						|  | attention_mask=attention_mask, | 
					
						
						|  | position_ids=position_ids, | 
					
						
						|  | output_attentions=output_attentions, | 
					
						
						|  | output_hidden_states=output_hidden_states, | 
					
						
						|  | return_dict=return_dict, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | pooled_output = text_outputs[1] | 
					
						
						|  |  | 
					
						
						|  | text_embeds = self.text_projection(pooled_output) | 
					
						
						|  |  | 
					
						
						|  | if not return_dict: | 
					
						
						|  | outputs = (text_embeds, text_outputs[0]) + text_outputs[2:] | 
					
						
						|  | return tuple(output for output in outputs if output is not None) | 
					
						
						|  |  | 
					
						
						|  | return EvaCLIPTextModelOutput( | 
					
						
						|  | text_embeds=text_embeds, | 
					
						
						|  | last_hidden_state=text_outputs.last_hidden_state, | 
					
						
						|  | hidden_states=text_outputs.hidden_states, | 
					
						
						|  | attentions=text_outputs.attentions, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | class EvaCLIPVisionModelWithProjection(EvaCLIPPreTrainedModel): | 
					
						
						|  | config_class = EvaCLIPVisionConfig | 
					
						
						|  | main_input_name = "pixel_values" | 
					
						
						|  |  | 
					
						
						|  | def __init__(self, config: EvaCLIPVisionConfig): | 
					
						
						|  | super().__init__(config) | 
					
						
						|  |  | 
					
						
						|  | self.vision_model = EvaCLIPVisionTransformer(config) | 
					
						
						|  |  | 
					
						
						|  | self.visual_projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | self.post_init() | 
					
						
						|  |  | 
					
						
						|  | def get_input_embeddings(self) -> nn.Module: | 
					
						
						|  | return self.vision_model.embeddings.patch_embedding | 
					
						
						|  |  | 
					
						
						|  | def forward( | 
					
						
						|  | self, | 
					
						
						|  | pixel_values: Optional[torch.FloatTensor] = None, | 
					
						
						|  | output_attentions: Optional[bool] = None, | 
					
						
						|  | output_hidden_states: Optional[bool] = None, | 
					
						
						|  | return_dict: Optional[bool] = None, | 
					
						
						|  | ) -> Union[Tuple, EvaCLIPVisionModelOutput]: | 
					
						
						|  |  | 
					
						
						|  | return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 
					
						
						|  |  | 
					
						
						|  | vision_outputs = self.vision_model( | 
					
						
						|  | pixel_values=pixel_values, | 
					
						
						|  | output_attentions=output_attentions, | 
					
						
						|  | output_hidden_states=output_hidden_states, | 
					
						
						|  | return_dict=return_dict, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | pooled_output = vision_outputs[1] | 
					
						
						|  |  | 
					
						
						|  | image_embeds = self.visual_projection(pooled_output) | 
					
						
						|  |  | 
					
						
						|  | if not return_dict: | 
					
						
						|  | outputs = (image_embeds, vision_outputs[0]) + vision_outputs[2:] | 
					
						
						|  | return tuple(output for output in outputs if output is not None) | 
					
						
						|  |  | 
					
						
						|  | return EvaCLIPVisionModelOutput( | 
					
						
						|  | image_embeds=image_embeds, | 
					
						
						|  | last_hidden_state=vision_outputs.last_hidden_state, | 
					
						
						|  | hidden_states=vision_outputs.hidden_states, | 
					
						
						|  | attentions=vision_outputs.attentions, | 
					
						
						|  | ) |