| import torch | |
| import torch.nn as nn | |
| from transformers import PreTrainedModel, PretrainedConfig | |
| def GN(c, groups=16): | |
| return nn.GroupNorm(min(groups, c), c) | |
| class LightResNetCNN(nn.Module): | |
| def __init__(self, in_channels=1, adaptive_height=8): | |
| super().__init__() | |
| self.adaptive_height = adaptive_height | |
| self.layer1 = nn.Sequential(nn.Conv2d(in_channels, 32, 3, 1, 1), GN(32), nn.ReLU(), nn.MaxPool2d(2, 2)) | |
| self.layer2 = nn.Sequential(nn.Conv2d(32, 64, 3, 1, 1), GN(64), nn.ReLU(), nn.MaxPool2d(2, 2)) | |
| self.layer3 = nn.Sequential(nn.Conv2d(64, 128, 3, 1, 1), GN(128), nn.ReLU(), nn.MaxPool2d(2, 2)) | |
| self.layer4 = nn.Sequential(nn.Conv2d(128, 256, 3, 1, 1), GN(256), nn.ReLU()) | |
| self.layer5 = nn.Sequential(nn.Conv2d(256, 256, 3, 1, 1), GN(256), nn.ReLU()) | |
| self.layer6 = nn.Sequential(nn.Conv2d(256, 128, 3, 1, 1), GN(128), nn.ReLU()) | |
| self.adaptive_pool = nn.AdaptiveAvgPool2d((adaptive_height, None)) | |
| def forward(self, x): | |
| for i in range(1, 7): | |
| x = getattr(self, f"layer{i}")(x) | |
| x = self.adaptive_pool(x) | |
| return x | |
| class PositionalEncoding(nn.Module): | |
| def __init__(self, d_model, max_len=2000): | |
| super().__init__() | |
| pe = torch.zeros(max_len, d_model) | |
| position = torch.arange(0, max_len).unsqueeze(1) | |
| div_term = torch.exp(torch.arange(0, d_model, 2) * (-torch.log(torch.tensor(10000.0)) / d_model)) | |
| pe[:, 0::2] = torch.sin(position * div_term) | |
| pe[:, 1::2] = torch.cos(position * div_term) | |
| self.register_buffer("pe", pe.unsqueeze(0)) | |
| def forward(self, x): | |
| return x + self.pe[:, :x.size(1), :] | |
| class PersianOCRConfig(PretrainedConfig): | |
| model_type = "persianocr" | |
| def __init__(self, num_classes=100, d_model=1280, nhead=16, num_layers=8, dropout=0.2, adaptive_height=8, **kwargs): | |
| super().__init__(**kwargs) | |
| self.num_classes = num_classes | |
| self.d_model = d_model | |
| self.nhead = nhead | |
| self.num_layers = num_layers | |
| self.dropout = dropout | |
| self.adaptive_height = adaptive_height | |
| class PersianOCRModel(PreTrainedModel): | |
| config_class = PersianOCRConfig | |
| def __init__(self, config): | |
| super().__init__(config) | |
| self.cnn = LightResNetCNN(in_channels=1, adaptive_height=config.adaptive_height) | |
| self.proj = nn.Linear(128 * config.adaptive_height, config.d_model) | |
| self.posenc = PositionalEncoding(config.d_model) | |
| encoder_layer = nn.TransformerEncoderLayer(config.d_model, config.nhead, batch_first=True, dropout=config.dropout) | |
| self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=config.num_layers) | |
| self.fc = nn.Linear(config.d_model, config.num_classes) | |
| self.post_init() | |
| def forward(self, x, labels=None): | |
| f = self.cnn(x) | |
| B, C, H, W = f.size() | |
| f = f.permute(0, 3, 1, 2).reshape(B, W, C * H) | |
| f = self.posenc(self.proj(f)) | |
| out = self.transformer(f) | |
| logits = self.fc(out) | |
| return {"logits": logits} | |