gsumbul commited on
Commit
4214d91
·
verified ·
1 Parent(s): 009716e

Hugging Face transformers model files

Browse files
Files changed (4) hide show
  1. config.json +24 -0
  2. smarties_config.py +48 -0
  3. smarties_model.py +328 -0
  4. spectrum_specs.yaml +140 -0
config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "SMARTIES-v1-ViT-B",
3
+ "auto_map": {
4
+ "AutoModel": "smarties_model.SMARTIESHF",
5
+ "AutoConfig": "smarties_config.SMARTIESConfig"
6
+ },
7
+ "transformers_weights": "smarties-v1-vitb.safetensors",
8
+ "spectrum_specs": null,
9
+ "img_size": 224,
10
+ "patch_size": 16,
11
+ "embed_dim": 768,
12
+ "depth": 12,
13
+ "num_heads": 12,
14
+ "mlp_ratio": 4.0,
15
+ "qkv_bias": true,
16
+ "norm_eps": 1e-6,
17
+ "global_pool": false,
18
+ "pos_drop_rate": 0.0,
19
+ "norm_layer_eps": 1e-6,
20
+ "mixed_precision": "no",
21
+ "decoder_embed_dim": 512,
22
+ "decoder_depth": 8,
23
+ "decoder_num_heads": 16
24
+ }
smarties_config.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+ import os
3
+ import yaml
4
+ import requests
5
+ from functools import partial
6
+ import torch.nn as nn
7
+
8
+ class SMARTIESConfig(PretrainedConfig):
9
+ model_type = "SMARTIES-v1-ViT-B"
10
+
11
+ def __init__(
12
+ self,
13
+ img_size=224,
14
+ patch_size=16,
15
+ embed_dim=768,
16
+ depth=12,
17
+ num_heads=12,
18
+ mlp_ratio=4.0,
19
+ qkv_bias=True,
20
+ norm_eps=1e-6,
21
+ spectrum_specs=None,
22
+ global_pool=False,
23
+ norm_layer_eps=1e-6,
24
+ mixed_precision='no',
25
+ decoder_embed_dim=512,
26
+ decoder_depth=8,
27
+ decoder_num_heads=16,
28
+ pos_drop_rate=0.0,
29
+ **kwargs
30
+ ):
31
+ super().__init__(**kwargs)
32
+ self.img_size = img_size
33
+ self.patch_size = patch_size
34
+ self.embed_dim = embed_dim
35
+ self.depth = depth
36
+ self.num_heads = num_heads
37
+ self.mlp_ratio = mlp_ratio
38
+ self.qkv_bias = qkv_bias
39
+ self.norm_eps = norm_eps
40
+ self.spectrum_specs = spectrum_specs
41
+ self.global_pool = global_pool
42
+ self.pos_drop_rate = pos_drop_rate
43
+ self.num_heads = self.num_heads
44
+ self.norm_layer_eps = norm_layer_eps
45
+ self.mixed_precision = mixed_precision
46
+ self.decoder_embed_dim = decoder_embed_dim
47
+ self.decoder_depth = decoder_depth
48
+ self.decoder_num_heads = decoder_num_heads
smarties_model.py ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ import torch
3
+ import torch.nn as nn
4
+ from transformers import PreTrainedModel
5
+ from transformers.utils import cached_file
6
+ from .smarties_config import SMARTIESConfig
7
+ from functools import partial
8
+ import numpy as np
9
+ from timm.models.vision_transformer import Block
10
+ import os
11
+ import yaml
12
+
13
+ class SpectrumRangeProjection(nn.Module):
14
+ """Patch Embedding of a sensor without patchify"""
15
+ def __init__(
16
+ self,
17
+ spectral_range,
18
+ spectrum_spec,
19
+ patch_size,
20
+ embed_dim,
21
+ bias=True
22
+ ):
23
+ super().__init__()
24
+ self.spectral_range = spectral_range
25
+ self.name = spectrum_spec['name']
26
+ self.min_wavelength = spectrum_spec['min_wavelength']
27
+ self.max_wavelength = spectrum_spec['max_wavelength']
28
+ self.sensors = spectrum_spec['sensors']
29
+ self.nb_pixels = patch_size**2
30
+ self.proj = nn.Linear(self.nb_pixels, embed_dim, bias=bias)
31
+
32
+ def forward(self, x):
33
+ return self.proj(x.view(-1, self.nb_pixels))
34
+
35
+ class SpectrumRangeProjectionAvg(nn.Module):
36
+ """Patch Embedding of a sensor without patchify"""
37
+ def __init__(
38
+ self,
39
+ spectrum_projections,
40
+ spectrum_spec,
41
+ embed_dim
42
+ ):
43
+ super().__init__()
44
+ self.min_wavelength = spectrum_spec['min_wavelength']
45
+ self.max_wavelength = spectrum_spec['max_wavelength']
46
+ self.central_lambda = 0.5*(float(self.min_wavelength) + float(self.max_wavelength))
47
+ self.spectrum_projections = spectrum_projections
48
+ self.weights = []
49
+ for spectrum_proj in self.spectrum_projections:
50
+ central_lambda = 0.5*(float(spectrum_proj.min_wavelength) + float(spectrum_proj.max_wavelength))
51
+ self.weights.append(abs(self.central_lambda-central_lambda))
52
+ self.weights = np.array(self.weights) / sum(self.weights)
53
+ self.embed_dim = embed_dim
54
+
55
+ def forward(self, x):
56
+ out = 0. #torch.zeros((len(x),self.embed_dim))
57
+ for i, spectrum_proj in enumerate(self.spectrum_projections):
58
+ out += spectrum_proj(x) * self.weights[i]
59
+ return out
60
+
61
+
62
+ class SpectrumAwareProjection(nn.Module):
63
+ """Patch Embedding of a sensor without patchify"""
64
+ def __init__(
65
+ self,
66
+ spectrum_specs,
67
+ patch_size,
68
+ embed_dim,
69
+ bias=True
70
+ ):
71
+ super().__init__()
72
+ self.nb_pixels = patch_size**2
73
+
74
+ self.spectrum_embeds = torch.nn.ModuleList()
75
+ for spectral_range in sorted(spectrum_specs,key=lambda key:spectrum_specs[key]['projection_idx']):
76
+ if ((spectrum_specs[spectral_range]['projection_idx'] != -1) and (len(spectrum_specs[spectral_range]['agg_projections']) == 0)) :
77
+ self.spectrum_embeds.append(SpectrumRangeProjection(
78
+ spectral_range, spectrum_specs[spectral_range], patch_size, embed_dim
79
+ ))
80
+
81
+ for spectral_range in sorted(spectrum_specs,key=lambda key:spectrum_specs[key]['projection_idx']):
82
+ if ((spectrum_specs[spectral_range]['projection_idx'] != -1) and (len(spectrum_specs[spectral_range]['agg_projections']) > 0)):
83
+ self.spectrum_embeds.append(
84
+ SpectrumRangeProjectionAvg(
85
+ [self.spectrum_embeds[agg_proj_idx] for agg_proj_idx in spectrum_specs[spectral_range]['agg_projections']],
86
+ spectrum_specs[spectral_range],
87
+ embed_dim))
88
+
89
+ def forward(self, x, projection_idx):
90
+ return self.spectrum_embeds[projection_idx](x)
91
+
92
+ # --------------------------------------------------------
93
+ # 2D sine-cosine position embedding
94
+ # References:
95
+ # Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py
96
+ # MoCo v3: https://github.com/facebookresearch/moco-v3
97
+ # --------------------------------------------------------
98
+ def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
99
+ """
100
+ grid_size: int of the grid height and width
101
+ return:
102
+ pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
103
+ """
104
+ grid_h = np.arange(grid_size, dtype=float)
105
+ grid_w = np.arange(grid_size, dtype=float)
106
+ grid = np.meshgrid(grid_w, grid_h) # here w goes first
107
+ grid = np.stack(grid, axis=0)
108
+
109
+ grid = grid.reshape([2, 1, grid_size, grid_size])
110
+ pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
111
+ if cls_token:
112
+ pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
113
+ return pos_embed
114
+
115
+
116
+ def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
117
+ assert embed_dim % 2 == 0
118
+
119
+ # use half of dimensions to encode grid_h
120
+ emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
121
+ emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
122
+
123
+ emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
124
+ return emb
125
+
126
+
127
+ def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
128
+ """
129
+ embed_dim: output dimension for each position
130
+ pos: a list of positions to be encoded: size (M,)
131
+ out: (M, D)
132
+ """
133
+ assert embed_dim % 2 == 0
134
+ omega = np.arange(embed_dim // 2, dtype=float)
135
+ omega /= embed_dim / 2.0
136
+ omega = 1.0 / 10000**omega # (D/2,)
137
+
138
+ pos = pos.reshape(-1) # (M,)
139
+ out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product
140
+
141
+ emb_sin = np.sin(out) # (M, D/2)
142
+ emb_cos = np.cos(out) # (M, D/2)
143
+
144
+ emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
145
+ return emb
146
+
147
+ def get_dtype(mixed_precision):
148
+ if mixed_precision == 'no':
149
+ return torch.float32
150
+ elif mixed_precision == 'bf16':
151
+ return torch.bfloat16
152
+ elif mixed_precision == 'fp16':
153
+ return torch.float16
154
+ else:
155
+ raise NotImplementedError
156
+
157
+ class SMARTIESHF(PreTrainedModel):
158
+ config_class = SMARTIESConfig
159
+ def __init__(self, config: SMARTIESConfig):
160
+ super().__init__(config)
161
+ try:
162
+ if config.spectrum_specs is None:
163
+ spectrum_path = cached_file(
164
+ config.name_or_path,
165
+ "spectrum_specs.yaml"
166
+ )
167
+ with open(spectrum_path, "r") as f:
168
+ config.spectrum_specs = yaml.safe_load(f)
169
+ except Exception as e:
170
+ raise RuntimeError(
171
+ "spectrum_specs couldn't be loaded from spectrum_specs.yaml. " \
172
+ "Please load yaml file yourself and provide the argument spectrum_specs with the loaded file."
173
+ ) from e
174
+ self.model_dtype = get_dtype(config.mixed_precision)
175
+ self.embed_dim = config.embed_dim
176
+ self.decoder_embed_dim = config.decoder_embed_dim
177
+ self.projection_conversion = {i: config.spectrum_specs[i]['projection_idx'] for i in config.spectrum_specs}
178
+ self.sensor_band_specs = {
179
+ 'S2': [
180
+ 'aerosol',
181
+ 'blue_1',
182
+ 'green_2',
183
+ 'red_2',
184
+ 'red_edge_1',
185
+ 'red_edge_2',
186
+ 'near_infrared_2',
187
+ 'near_infrared_1',
188
+ 'near_infrared_3',
189
+ 'short_wave_infrared_1',
190
+ 'short_wave_infrared_3',
191
+ 'short_wave_infrared_4'
192
+ ],
193
+ 'S1': [
194
+ 'microwave_1',
195
+ 'microwave_2'
196
+ ],
197
+ 'RGB': [
198
+ 'red_1',
199
+ 'green_1',
200
+ 'blue_3'
201
+ ]
202
+ }
203
+ self.sensor_projection_specs = {}
204
+ for sensor_name in self.sensor_band_specs:
205
+ self.sensor_projection_specs[sensor_name] = np.array(
206
+ [self.projection_conversion[i] for i in self.sensor_band_specs[sensor_name]])
207
+
208
+ self.patch_size = config.patch_size
209
+ self.pos_drop = nn.Dropout(p=config.pos_drop_rate)
210
+ self.nb_patch_length = int(config.img_size / self.patch_size)
211
+ self.num_patches = self.nb_patch_length**2
212
+
213
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
214
+ self.pos_embed = nn.Parameter(torch.zeros(1, self.num_patches + 1, self.embed_dim), requires_grad=False) # fixed sin-cos embedding
215
+
216
+ self.spectrum_projection = SpectrumAwareProjection(
217
+ spectrum_specs=config.spectrum_specs,
218
+ patch_size=self.patch_size,
219
+ embed_dim=self.embed_dim
220
+ )
221
+
222
+ pos_embed = get_2d_sincos_pos_embed(
223
+ self.pos_embed.shape[-1],
224
+ self.nb_patch_length,
225
+ cls_token=True,
226
+ )
227
+ self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0))
228
+ self.projection_scaler = 12
229
+ self.norm_layer = partial(nn.LayerNorm, eps=config.norm_layer_eps)
230
+
231
+ self.blocks = nn.ModuleList([
232
+ Block(self.embed_dim, config.num_heads, config.mlp_ratio, qkv_bias=config.qkv_bias, norm_layer=self.norm_layer)
233
+ for i in range(config.depth)])
234
+ self.norm = self.norm_layer(self.embed_dim)
235
+ self.global_pool = config.global_pool
236
+ if self.global_pool:
237
+ self.fc_norm = self.norm_layer(self.embed_dim)
238
+
239
+ # decoder specifics
240
+ self.decoder_embed = nn.Linear(self.embed_dim, self.decoder_embed_dim, bias=True)
241
+
242
+ self.mask_token = nn.Parameter(torch.zeros(1, 1, self.decoder_embed_dim))
243
+
244
+ self.decoder_pos_embed = nn.Parameter(torch.zeros(1, self.num_patches + 1, self.decoder_embed_dim), requires_grad=False) # fixed sin-cos embedding
245
+ self.projection_scaler = 12
246
+
247
+ self.decoder_blocks = nn.ModuleList([
248
+ Block(self.decoder_embed_dim, config.decoder_num_heads, config.mlp_ratio, qkv_bias=True, norm_layer=self.norm_layer)
249
+ for i in range(config.decoder_depth)])
250
+
251
+ self.decoder_norm = self.norm_layer(self.decoder_embed_dim)
252
+ self.decoder_preds = torch.nn.ModuleList()
253
+ for band_idx in sorted(config.spectrum_specs, key=lambda key: config.spectrum_specs[key]['projection_idx']):
254
+ if ((config.spectrum_specs[band_idx]['projection_idx'] != -1) and (len(config.spectrum_specs[band_idx]['agg_projections']) == 0)):
255
+ self.decoder_preds.append(nn.Linear(self.decoder_embed_dim, self.patch_size**2, bias=True))
256
+
257
+ def tensor_patchify(self, imgs):
258
+ """
259
+ imgs: (N, nb_bands, H, W)
260
+ x: (N, L, patch_size**2 *nb_bands)
261
+ """
262
+ p = self.patch_size
263
+ assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0
264
+
265
+ h = w = imgs.shape[2] // p
266
+ x = imgs.reshape(shape=(imgs.shape[0], imgs.shape[1], h, p, w, p))
267
+ x = torch.einsum('nchpwq->nhwpqc', x)
268
+ x = x.reshape(shape=(imgs.shape[0], h, w, p, p, imgs.shape[1])).permute(0,1,2,5,3,4)
269
+ return x
270
+
271
+ def forward_encoder(self, imgs, proj_indices, is_patchify, all_tokens):
272
+ if is_patchify:
273
+ img_patches = self.tensor_patchify(imgs)
274
+ else:
275
+ img_patches = imgs
276
+ B, nb_patch_h, nb_patch_w, nb_bands, _, _ = img_patches.shape
277
+ device = img_patches.device
278
+
279
+ img_spectrum_embeds = torch.zeros((B, nb_patch_h, nb_patch_w, nb_bands, self.embed_dim), device=device, dtype=self.model_dtype)
280
+
281
+ for projection_idx in torch.unbind(torch.unique(proj_indices)):
282
+ mask = (proj_indices==projection_idx)
283
+ img_spectrum_embeds[mask] = self.spectrum_projection(img_patches[mask], projection_idx)
284
+
285
+ img_embeddings = self.projection_scaler*img_spectrum_embeds.mean(dim=3)
286
+ img_embeddings = img_embeddings.reshape(-1,nb_patch_h*nb_patch_w,self.embed_dim)
287
+
288
+ cls_tokens = self.cls_token.expand(
289
+ B, -1, -1
290
+ )
291
+ x = torch.cat((cls_tokens, img_embeddings), dim=1)
292
+ x = x + self.pos_embed
293
+ x = self.pos_drop(x)
294
+
295
+ for blk in self.blocks:
296
+ x = blk(x)
297
+
298
+ if all_tokens:
299
+ return self.norm(x) # B, L, embed_dim (L=1+patch_size**2)
300
+
301
+ if self.global_pool:
302
+ x = x[:, 1:, :].mean(dim=1)
303
+ outcome = self.fc_norm(x)
304
+ else:
305
+ x = self.norm(x)
306
+ outcome = x[:, 0]
307
+
308
+ return outcome
309
+
310
+ def forward(self, imgs, is_patchify=True, sensor_type='S2', bands=None, proj_indices=None, all_tokens=False):
311
+ if proj_indices is None:
312
+ if bands is None:
313
+ assert sensor_type in self.sensor_band_specs.keys(), f"Sensor type {sensor_type} not recognized. Available types: {list(self.sensor_band_specs.keys())}. Otherwise provide bands."
314
+ proj_indices = self.sensor_projection_specs[sensor_type]
315
+ else:
316
+ proj_indices = []
317
+ for i in bands:
318
+ if i in self.projection_conversion.keys():
319
+ proj_indices.append(self.projection_conversion[i])
320
+ assert len(proj_indices) > 0, \
321
+ "No valid bands provided. Please check the bands to be aligned with the spectrum_specs definition \
322
+ (default version can be accessed at https://github.com/gsumbul/SMARTIES/blob/main/config/electromagnetic_spectrum.yaml)."
323
+ proj_indices = np.array(proj_indices)
324
+ proj_indices = torch.as_tensor(np.tile(proj_indices.reshape(
325
+ 1,1,1,-1), (imgs.shape[0], self.nb_patch_length, self.nb_patch_length, 1)).astype(np.int32), device=imgs.device)
326
+
327
+ return self.forward_encoder(imgs, proj_indices, is_patchify=is_patchify, all_tokens=all_tokens)
328
+
spectrum_specs.yaml ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aerosol:
2
+ min_wavelength: 422
3
+ max_wavelength: 463
4
+ sensors: ['SENTINEL2-L1C', 'SENTINEL2-L2A']
5
+ name: 'B01 (aerosol)'
6
+ projection_idx: 0
7
+ agg_projections: []
8
+ blue_1:
9
+ min_wavelength: 427
10
+ max_wavelength: 558
11
+ sensors: ['SENTINEL2-L1C', 'SENTINEL2-L2A']
12
+ name: 'B02 (blue)'
13
+ projection_idx: 1
14
+ agg_projections: []
15
+ blue_2:
16
+ min_wavelength: 452
17
+ max_wavelength: 512
18
+ sensors: ['Landsat8-L2']
19
+ name: 'B2 (blue)'
20
+ projection_idx: 18
21
+ agg_projections: [0, 1]
22
+ blue_3:
23
+ min_wavelength: 430
24
+ max_wavelength: 545
25
+ sensors: ['RGB']
26
+ name: 'blue'
27
+ projection_idx: 2
28
+ agg_projections: []
29
+ green_1:
30
+ min_wavelength: 466
31
+ max_wavelength: 620
32
+ sensors: ['RGB']
33
+ name: 'green'
34
+ projection_idx: 3
35
+ agg_projections: []
36
+ green_2:
37
+ min_wavelength: 524
38
+ max_wavelength: 595
39
+ sensors: ['SENTINEL2-L1C', 'SENTINEL2-L2A']
40
+ name: 'B03 (green)'
41
+ projection_idx: 4
42
+ agg_projections: []
43
+ red_1:
44
+ min_wavelength: 590
45
+ max_wavelength: 710
46
+ sensors: ['RGB']
47
+ name: 'red'
48
+ projection_idx: 5
49
+ agg_projections: []
50
+ red_2:
51
+ min_wavelength: 634
52
+ max_wavelength: 696
53
+ sensors: ['SENTINEL2']
54
+ name: 'B04 (red)'
55
+ projection_idx: 6
56
+ agg_projections: []
57
+ red_edge_1:
58
+ min_wavelength: 689
59
+ max_wavelength: 719
60
+ sensors: ['SENTINEL2-L1C', 'SENTINEL2-L2A']
61
+ name: 'B05 (red edge 1)'
62
+ projection_idx: 7
63
+ agg_projections: []
64
+ red_edge_2:
65
+ min_wavelength: 726
66
+ max_wavelength: 755
67
+ sensors: ['SENTINEL2-L1C', 'SENTINEL2-L2A']
68
+ name: 'B06 (red edge 2)'
69
+ projection_idx: 8
70
+ agg_projections: []
71
+ near_infrared_1:
72
+ min_wavelength: 728
73
+ max_wavelength: 938
74
+ sensors: ['SENTINEL2-L1C', 'SENTINEL2-L2A']
75
+ name: 'B08 (NIR 1)'
76
+ projection_idx: 9
77
+ agg_projections: []
78
+ near_infrared_2:
79
+ min_wavelength: 761
80
+ max_wavelength: 802
81
+ sensors: ['SENTINEL2-L1C', 'SENTINEL2-L2A']
82
+ name: 'B07 (NIR 2)'
83
+ projection_idx: 10
84
+ agg_projections: []
85
+ near_infrared_3:
86
+ min_wavelength: 843
87
+ max_wavelength: 886
88
+ sensors: ['SENTINEL2-L1C', 'SENTINEL2-L2A']
89
+ name: 'B8A (NIR 3)'
90
+ projection_idx: 11
91
+ agg_projections: []
92
+ short_wave_infrared_1:
93
+ min_wavelength: 923
94
+ max_wavelength: 964
95
+ sensors: ['SENTINEL2-L1C', 'SENTINEL2-L2A']
96
+ name: 'B09 (SWIR water vapour)'
97
+ projection_idx: 12
98
+ agg_projections: []
99
+ short_wave_infrared_2:
100
+ min_wavelength: 1345
101
+ max_wavelength: 1406
102
+ sensors: ['SENTINEL2-L1C']
103
+ name: 'B10 (SWIR circus)'
104
+ projection_idx: -1
105
+ agg_projections: []
106
+ short_wave_infrared_3:
107
+ min_wavelength: 1516
108
+ max_wavelength: 1704
109
+ sensors: ['SENTINEL2-L1C', 'SENTINEL2-L2A']
110
+ name: 'B11 (SWIR 1)'
111
+ projection_idx: 13
112
+ agg_projections: []
113
+ short_wave_infrared_4:
114
+ min_wavelength: 2002
115
+ max_wavelength: 2376
116
+ sensors: ['SENTINEL2-L1C', 'SENTINEL2-L2A']
117
+ name: 'B12 (SWIR 2)'
118
+ projection_idx: 14
119
+ agg_projections: []
120
+ thermal_infrared_1:
121
+ min_wavelength: 10600
122
+ max_wavelength: 11190
123
+ sensors: ['Landsat8-L2']
124
+ name: 'B10 (surface temperature)'
125
+ projection_idx: 17
126
+ agg_projections: [14, 15]
127
+ microwave_1:
128
+ min_wavelength: 5.5e7
129
+ max_wavelength: 5.6e7
130
+ sensors: ['SENTINEL1-GRD']
131
+ name: 'VV'
132
+ projection_idx: 15
133
+ agg_projections: []
134
+ microwave_2:
135
+ min_wavelength: 5.5e7
136
+ max_wavelength: 5.6e7
137
+ sensors: ['SENTINEL1-GRD']
138
+ name: 'VH'
139
+ projection_idx: 16
140
+ agg_projections: []