id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/benchmark/benchmarks_entrypoint.py
|
benchmark.benchmarks_entrypoint.ImportModuleException
|
class ImportModuleException(Exception):
pass
|
class ImportModuleException(Exception):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
1
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/benchmark/benchmarks_entrypoint.py
|
benchmark.benchmarks_entrypoint.MetricsRecorder
|
import pandas as pd
import os
from datetime import datetime
import uuid
import logging
import json
class MetricsRecorder:
def __init__(self, connection, logger: logging.Logger, repository: str, branch: str, commit_id: str, commit_msg: str, collect_csv_data: bool=True):
self.conn = connection
self.use_database = connection is not None
if self.use_database:
self.conn.autocommit = True
self.logger = logger
self.repository = repository
self.branch = branch
self.commit_id = commit_id
self.commit_msg = commit_msg
self.collect_csv_data = collect_csv_data
if self.collect_csv_data:
self.benchmarks_df = pd.DataFrame(columns=['benchmark_id', 'repository', 'branch', 'commit_id', 'commit_message', 'metadata', 'created_at'])
self.device_measurements_df = pd.DataFrame(columns=['benchmark_id', 'cpu_util', 'mem_megabytes', 'gpu_util', 'gpu_mem_megabytes', 'time'])
self.model_measurements_df = pd.DataFrame(columns=['benchmark_id', 'time', 'model_load_time', 'first_eager_forward_pass_time_secs', 'second_eager_forward_pass_time_secs', 'first_eager_generate_time_secs', 'second_eager_generate_time_secs', 'time_to_first_token_secs', 'time_to_second_token_secs', 'time_to_third_token_secs', 'time_to_next_token_mean_secs', 'first_compile_generate_time_secs', 'second_compile_generate_time_secs', 'third_compile_generate_time_secs', 'fourth_compile_generate_time_secs'])
else:
self.benchmarks_df = None
self.device_measurements_df = None
self.model_measurements_df = None
def initialise_benchmark(self, metadata: dict[str, str]) -> str:
"""
Creates a new benchmark, returns the benchmark id (UUID)
"""
benchmark_id = str(uuid.uuid4())
if self.use_database:
with self.conn.cursor() as cur:
cur.execute('INSERT INTO benchmarks (benchmark_id, repository, branch, commit_id, commit_message, metadata) VALUES (%s, %s, %s, %s, %s, %s)', (benchmark_id, self.repository, self.branch, self.commit_id, self.commit_msg, metadata))
self.logger.debug(f'initialised benchmark #{benchmark_id}')
if self.collect_csv_data:
new_row = pd.DataFrame([{'benchmark_id': benchmark_id, 'repository': self.repository, 'branch': self.branch, 'commit_id': self.commit_id, 'commit_message': self.commit_msg, 'metadata': json.dumps(metadata), 'created_at': datetime.utcnow().isoformat()}])
self.benchmarks_df = pd.concat([self.benchmarks_df, new_row], ignore_index=True)
mode_info = []
if self.use_database:
mode_info.append('database')
if self.collect_csv_data:
mode_info.append('CSV')
mode_str = ' + '.join(mode_info) if mode_info else 'no storage'
self.logger.debug(f'initialised benchmark #{benchmark_id} ({mode_str} mode)')
return benchmark_id
def collect_device_measurements(self, benchmark_id: str, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes):
"""
Collect device metrics, such as CPU & GPU usage. These are "static", as in you cannot pass arbitrary arguments to the function.
"""
if self.collect_csv_data:
new_row = pd.DataFrame([{'benchmark_id': benchmark_id, 'cpu_util': cpu_util, 'mem_megabytes': mem_megabytes, 'gpu_util': gpu_util, 'gpu_mem_megabytes': gpu_mem_megabytes, 'time': datetime.utcnow().isoformat()}])
self.device_measurements_df = pd.concat([self.device_measurements_df, new_row], ignore_index=True)
if self.use_database:
with self.conn.cursor() as cur:
cur.execute('INSERT INTO device_measurements (benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes) VALUES (%s, %s, %s, %s, %s)', (benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes))
self.logger.debug(f'collected device measurements for benchmark #{benchmark_id} [CPU util: {cpu_util}, mem MBs: {mem_megabytes}, GPU util: {gpu_util}, GPU mem MBs: {gpu_mem_megabytes}]')
def collect_model_measurements(self, benchmark_id: str, measurements: dict[str, float]):
if self.collect_csv_data:
row_data = {'benchmark_id': benchmark_id, 'time': datetime.utcnow().isoformat()}
row_data.update(measurements)
new_row = pd.DataFrame([row_data])
self.model_measurements_df = pd.concat([self.model_measurements_df, new_row], ignore_index=True)
if self.use_database:
with self.conn.cursor() as cur:
cur.execute('\n INSERT INTO model_measurements (\n benchmark_id,\n measurements\n ) VALUES (%s, %s)\n ', (benchmark_id, measurements))
self.logger.debug(f'collected model measurements for benchmark #{benchmark_id}: {measurements}')
def export_to_csv(self, output_dir: str='benchmark_results'):
"""
Export all collected data to CSV files using pandas DataFrames
"""
if not self.collect_csv_data:
self.logger.warning('CSV data collection is disabled - no CSV files will be generated')
return
if not os.path.exists(output_dir):
os.makedirs(output_dir)
self.logger.info(f'Created output directory: {output_dir}')
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
files_created = []
self._export_pandas_data(output_dir, timestamp, files_created)
self.logger.info(f'CSV export complete! Created {len(files_created)} files in {output_dir}')
def _export_pandas_data(self, output_dir: str, timestamp: str, files_created: list):
"""
Export CSV files using pandas DataFrames
"""
benchmarks_file = os.path.join(output_dir, f'benchmarks_{timestamp}.csv')
self.benchmarks_df.to_csv(benchmarks_file, index=False)
files_created.append(benchmarks_file)
self.logger.info(f'Exported {len(self.benchmarks_df)} benchmark records to {benchmarks_file}')
device_file = os.path.join(output_dir, f'device_measurements_{timestamp}.csv')
self.device_measurements_df.to_csv(device_file, index=False)
files_created.append(device_file)
self.logger.info(f'Exported {len(self.device_measurements_df)} device measurement records to {device_file}')
model_file = os.path.join(output_dir, f'model_measurements_{timestamp}.csv')
self.model_measurements_df.to_csv(model_file, index=False)
files_created.append(model_file)
self.logger.info(f'Exported {len(self.model_measurements_df)} model measurement records to {model_file}')
summary_file = os.path.join(output_dir, f'benchmark_summary_{timestamp}.csv')
self._create_summary(summary_file)
files_created.append(summary_file)
def _create_summary(self, summary_file: str):
"""
Create a comprehensive summary CSV using pandas operations
"""
if len(self.benchmarks_df) == 0:
summary_df = pd.DataFrame()
summary_df.to_csv(summary_file, index=False)
self.logger.info(f'Created empty benchmark summary at {summary_file}')
return
summary_df = self.benchmarks_df.copy()
if len(self.model_measurements_df) > 0:
model_df = self.model_measurements_df.drop(columns=['time'], errors='ignore')
summary_df = summary_df.merge(model_df, on='benchmark_id', how='left')
if len(self.device_measurements_df) > 0:
device_agg = self.device_measurements_df.groupby('benchmark_id').agg({'cpu_util': ['mean', 'max', 'std', 'count'], 'mem_megabytes': ['mean', 'max', 'std'], 'gpu_util': ['mean', 'max', 'std'], 'gpu_mem_megabytes': ['mean', 'max', 'std']}).round(3)
device_agg.columns = [f'{col[0]}_{col[1]}' for col in device_agg.columns]
device_agg = device_agg.reset_index()
if 'cpu_util_count' in device_agg.columns:
device_agg = device_agg.rename(columns={'cpu_util_count': 'device_measurement_count'})
summary_df = summary_df.merge(device_agg, on='benchmark_id', how='left')
summary_df.to_csv(summary_file, index=False)
self.logger.info(f'Created comprehensive benchmark summary with {len(summary_df)} records at {summary_file}')
def close(self):
if self.use_database and self.conn:
self.conn.close()
|
class MetricsRecorder:
def __init__(self, connection, logger: logging.Logger, repository: str, branch: str, commit_id: str, commit_msg: str, collect_csv_data: bool=True):
pass
def initialise_benchmark(self, metadata: dict[str, str]) -> str:
'''
Creates a new benchmark, returns the benchmark id (UUID)
'''
pass
def collect_device_measurements(self, benchmark_id: str, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes):
'''
Collect device metrics, such as CPU & GPU usage. These are "static", as in you cannot pass arbitrary arguments to the function.
'''
pass
def collect_model_measurements(self, benchmark_id: str, measurements: dict[str, float]):
pass
def export_to_csv(self, output_dir: str='benchmark_results'):
'''
Export all collected data to CSV files using pandas DataFrames
'''
pass
def _export_pandas_data(self, output_dir: str, timestamp: str, files_created: list):
'''
Export CSV files using pandas DataFrames
'''
pass
def _create_summary(self, summary_file: str):
'''
Create a comprehensive summary CSV using pandas operations
'''
pass
def close(self):
pass
| 9
| 5
| 10
| 0
| 8
| 2
| 1
| 0.23
| 0
| 4
| 0
| 0
| 5
| 5
| 5
| 5
| 54
| 4
| 43
| 15
| 37
| 10
| 24
| 12
| 18
| 1
| 0
| 1
| 5
|
2
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/configuration_my_new_model.py
|
configuration_my_new_model.MyNewModelConfig
|
from ...modeling_rope_utils import rope_config_validation
from ...configuration_utils import PretrainedConfig
class MyNewModelConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`MyNewModelModel`]. It is used to instantiate an MyNewModel
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the MyNewModel-7B.
e.g. [meta-my_new_model/MyNewModel-2-7b-hf](https://huggingface.co/meta-my_new_model/MyNewModel-2-7b-hf)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the MyNewModel model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MyNewModelModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. MyNewModel 1 supports up to 2048 tokens,
MyNewModel 2 up to 4096, CodeLlama up to 16384.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'my_new_model3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'my_new_model3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'my_new_model3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'my_new_model3'. Scaling factor applied to high frequency components of the RoPE
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_attention_heads
```python
>>> from transformers import MyNewModelModel, MyNewModelConfig
>>> # Initializing a MyNewModel my_new_model-7b style configuration
>>> configuration = MyNewModelConfig()
>>> # Initializing a model from the my_new_model-7b style configuration
>>> model = MyNewModelModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = 'my_new_model'
keys_to_ignore_at_inference = ['past_key_values']
base_model_tp_plan = {'layers.*.self_attn.q_proj': 'colwise', 'layers.*.self_attn.k_proj': 'colwise', 'layers.*.self_attn.v_proj': 'colwise', 'layers.*.self_attn.o_proj': 'rowwise', 'layers.*.mlp.gate_proj': 'colwise', 'layers.*.mlp.up_proj': 'colwise', 'layers.*.mlp.down_proj': 'rowwise'}
base_model_pp_plan = {'embed_tokens': (['input_ids'], ['inputs_embeds']), 'layers': (['hidden_states', 'attention_mask'], ['hidden_states']), 'norm': (['hidden_states'], ['hidden_states'])}
def __init__(self, vocab_size=32000, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, pretraining_tp=1, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, mlp_bias=True, head_dim=None, new_param=0, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.pretraining_tp = pretraining_tp
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
if self.rope_scaling is not None and 'type' in self.rope_scaling:
self.rope_scaling['rope_type'] = self.rope_scaling['type']
rope_config_validation(self)
self.new_param = new_param
|
class MyNewModelConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`MyNewModelModel`]. It is used to instantiate an MyNewModel
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the MyNewModel-7B.
e.g. [meta-my_new_model/MyNewModel-2-7b-hf](https://huggingface.co/meta-my_new_model/MyNewModel-2-7b-hf)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the MyNewModel model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MyNewModelModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. MyNewModel 1 supports up to 2048 tokens,
MyNewModel 2 up to 4096, CodeLlama up to 16384.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'my_new_model3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'my_new_model3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'my_new_model3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'my_new_model3'. Scaling factor applied to high frequency components of the RoPE
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_attention_heads
```python
>>> from transformers import MyNewModelModel, MyNewModelConfig
>>> # Initializing a MyNewModel my_new_model-7b style configuration
>>> configuration = MyNewModelConfig()
>>> # Initializing a model from the my_new_model-7b style configuration
>>> model = MyNewModelModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
'''
def __init__(self, vocab_size=32000, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, pretraining_tp=1, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, mlp_bias=True, head_dim=None, new_param=0, **kwargs):
pass
| 2
| 1
| 63
| 2
| 58
| 3
| 4
| 1.63
| 1
| 1
| 0
| 0
| 1
| 19
| 1
| 1
| 195
| 11
| 70
| 50
| 42
| 114
| 30
| 24
| 28
| 4
| 1
| 1
| 4
|
3
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/configuration_my_new_model2.py
|
configuration_my_new_model2.MyNewModel2Config
|
from ...modeling_rope_utils import rope_config_validation
from ...configuration_utils import PretrainedConfig
class MyNewModel2Config(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`GemmaModel`]. It is used to instantiate an Gemma
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Gemma-7B.
e.g. [google/gemma-7b](https://huggingface.co/google/gemma-7b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 256000):
Vocabulary size of the Gemma model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`GemmaModel`]
```python
>>> from transformers import GemmaModel, GemmaConfig
>>> # Initializing a Gemma gemma-7b style configuration
>>> configuration = GemmaConfig()
>>> # Initializing a model from the gemma-7b style configuration
>>> model = GemmaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'my_new_model2'
keys_to_ignore_at_inference = ['past_key_values']
base_model_tp_plan = {'layers.*.self_attn.q_proj': 'colwise', 'layers.*.self_attn.k_proj': 'colwise', 'layers.*.self_attn.v_proj': 'colwise', 'layers.*.self_attn.o_proj': 'rowwise', 'layers.*.mlp.gate_proj': 'colwise', 'layers.*.mlp.up_proj': 'colwise', 'layers.*.mlp.down_proj': 'rowwise'}
base_model_pp_plan = {'embed_tokens': (['input_ids'], ['inputs_embeds']), 'layers': (['hidden_states', 'attention_mask'], ['hidden_states']), 'norm': (['hidden_states'], ['hidden_states'])}
def __init__(self, vocab_size=32000, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, pretraining_tp=1, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, mlp_bias=False, head_dim=None, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.pretraining_tp = pretraining_tp
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
if self.rope_scaling is not None and 'type' in self.rope_scaling:
self.rope_scaling['rope_type'] = self.rope_scaling['type']
rope_config_validation(self)
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
|
class MyNewModel2Config(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`GemmaModel`]. It is used to instantiate an Gemma
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Gemma-7B.
e.g. [google/gemma-7b](https://huggingface.co/google/gemma-7b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 256000):
Vocabulary size of the Gemma model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`GemmaModel`]
```python
>>> from transformers import GemmaModel, GemmaConfig
>>> # Initializing a Gemma gemma-7b style configuration
>>> configuration = GemmaConfig()
>>> # Initializing a model from the gemma-7b style configuration
>>> model = GemmaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=32000, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, pretraining_tp=1, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, mlp_bias=False, head_dim=None, **kwargs):
pass
| 2
| 1
| 62
| 3
| 56
| 3
| 4
| 0.35
| 1
| 1
| 0
| 0
| 1
| 18
| 1
| 1
| 97
| 5
| 68
| 48
| 41
| 24
| 29
| 23
| 27
| 4
| 1
| 1
| 4
|
4
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/configuration_new_model.py
|
configuration_new_model.NewModelConfig
|
from ...configuration_utils import PretrainedConfig
class NewModelConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`NewModelModel`]. It is used to instantiate an NewModel
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the NewModel-7B.
e.g. [google/new_model-7b](https://huggingface.co/google/new_model-7b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 256000):
Vocabulary size of the NewModel model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`NewModelModel`]
hidden_size (`int`, *optional*, defaults to 3072):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 24576):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 28):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 16):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
head_dim (`int`, *optional*, defaults to 256):
The attention head dimension.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The legacy activation function. It is overwritten by the `hidden_activation`.
hidden_activation (`str` or `function`, *optional*):
The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"`
if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function.
max_position_embeddings (`int`, *optional*, defaults to 8192):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
eos_token_id (`int`, *optional*, defaults to 1):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 2):
Beginning of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
```python
>>> from transformers import NewModelModel, NewModelConfig
>>> # Initializing a NewModel new_model-7b style configuration
>>> configuration = NewModelConfig()
>>> # Initializing a model from the new_model-7b style configuration
>>> model = NewModelModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'new_model'
keys_to_ignore_at_inference = ['past_key_values']
base_model_tp_plan = {'layers.*.self_attn.q_proj': 'colwise', 'layers.*.self_attn.k_proj': 'colwise', 'layers.*.self_attn.v_proj': 'colwise', 'layers.*.self_attn.o_proj': 'rowwise', 'layers.*.mlp.gate_proj': 'colwise', 'layers.*.mlp.up_proj': 'colwise', 'layers.*.mlp.down_proj': 'rowwise'}
base_model_pp_plan = {'embed_tokens': (['input_ids'], ['inputs_embeds']), 'layers': (['hidden_states', 'attention_mask'], ['hidden_states']), 'norm': (['hidden_states'], ['hidden_states'])}
def __init__(self, vocab_size=256030, hidden_size=64, intermediate_size=90, num_hidden_layers=28, num_attention_heads=16, num_key_value_heads=16, head_dim=256, hidden_act='gelu_pytorch_tanh', hidden_activation=None, max_position_embeddings=1500, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=0, eos_token_id=1, bos_token_id=2, tie_word_embeddings=True, rope_theta=10000.0, attention_bias=False, attention_dropout=0.0, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.head_dim = head_dim
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.hidden_activation = hidden_activation
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_theta = rope_theta
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
@property
def num_heads(self):
return self.num_attention_heads
|
class NewModelConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`NewModelModel`]. It is used to instantiate an NewModel
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the NewModel-7B.
e.g. [google/new_model-7b](https://huggingface.co/google/new_model-7b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 256000):
Vocabulary size of the NewModel model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`NewModelModel`]
hidden_size (`int`, *optional*, defaults to 3072):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 24576):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 28):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 16):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
head_dim (`int`, *optional*, defaults to 256):
The attention head dimension.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The legacy activation function. It is overwritten by the `hidden_activation`.
hidden_activation (`str` or `function`, *optional*):
The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"`
if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function.
max_position_embeddings (`int`, *optional*, defaults to 8192):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
eos_token_id (`int`, *optional*, defaults to 1):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 2):
Beginning of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
```python
>>> from transformers import NewModelModel, NewModelConfig
>>> # Initializing a NewModel new_model-7b style configuration
>>> configuration = NewModelConfig()
>>> # Initializing a model from the new_model-7b style configuration
>>> model = NewModelModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=256030, hidden_size=64, intermediate_size=90, num_hidden_layers=28, num_attention_heads=16, num_key_value_heads=16, head_dim=256, hidden_act='gelu_pytorch_tanh', hidden_activation=None, max_position_embeddings=1500, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=0, eos_token_id=1, bos_token_id=2, tie_word_embeddings=True, rope_theta=10000.0, attention_bias=False, attention_dropout=0.0, **kwargs):
pass
@property
def num_heads(self):
pass
| 4
| 1
| 25
| 0
| 25
| 0
| 1
| 1.25
| 1
| 1
| 0
| 0
| 2
| 16
| 2
| 2
| 122
| 3
| 53
| 45
| 26
| 66
| 23
| 21
| 20
| 1
| 1
| 0
| 2
|
5
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/conftest.py
|
conftest.CustomOutputChecker
|
class CustomOutputChecker(OutputChecker):
def check_output(self, want, got, optionflags):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self, want, got, optionflags)
|
class CustomOutputChecker(OutputChecker):
def check_output(self, want, got, optionflags):
pass
| 2
| 0
| 4
| 0
| 4
| 0
| 2
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 5
| 0
| 5
| 2
| 3
| 0
| 5
| 2
| 3
| 2
| 1
| 1
| 2
|
6
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/.circleci/create_circleci_config.py
|
create_circleci_config.CircleCIJob
|
import copy
from typing import Any, Optional
from dataclasses import dataclass
import os
@dataclass
class CircleCIJob:
name: str
additional_env: dict[str, Any] = None
docker_image: list[dict[str, str]] = None
install_steps: list[str] = None
marker: Optional[str] = None
parallelism: Optional[int] = 0
pytest_num_workers: int = 8
pytest_options: dict[str, Any] = None
resource_class: Optional[str] = 'xlarge'
tests_to_run: Optional[list[str]] = None
num_test_files_per_worker: Optional[int] = 10
command_timeout: Optional[int] = None
def __post_init__(self):
if self.additional_env is None:
self.additional_env = {}
if self.docker_image is None:
self.docker_image = copy.deepcopy(DEFAULT_DOCKER_IMAGE)
else:
print(os.environ.get('GIT_COMMIT_MESSAGE'))
if '[build-ci-image]' in os.environ.get('GIT_COMMIT_MESSAGE', '') or os.environ.get('GIT_COMMIT_MESSAGE', '') == 'dev-ci':
self.docker_image[0]['image'] = f"{self.docker_image[0]['image']}:dev"
print(f'Using {self.docker_image} docker image')
if self.install_steps is None:
self.install_steps = ['uv pip install .']
self.install_steps.append('uv pip install git+https://github.com/ydshieh/[email protected]')
if self.pytest_options is None:
self.pytest_options = {}
if isinstance(self.tests_to_run, str):
self.tests_to_run = [self.tests_to_run]
else:
test_file = os.path.join('test_preparation', f'{self.job_name}_test_list.txt')
print('Looking for ', test_file)
if os.path.exists(test_file):
with open(test_file) as f:
expanded_tests = f.read().strip().split('\n')
self.tests_to_run = expanded_tests
print('Found:', expanded_tests)
else:
self.tests_to_run = []
print('not Found')
def to_dict(self):
env = COMMON_ENV_VARIABLES.copy()
env['RUN_FLAKY'] = os.environ.get('CIRCLE_PULL_REQUEST', '') == ''
env.update(self.additional_env)
job = {'docker': self.docker_image, 'environment': env}
if self.resource_class is not None:
job['resource_class'] = self.resource_class
all_options = {**COMMON_PYTEST_OPTIONS, **self.pytest_options}
pytest_flags = [f'--{key}={value}' if value is not None or key in ['doctest-modules'] else f'-{key}' for key, value in all_options.items()]
pytest_flags.append(f'--make-reports={self.name}' if 'examples' in self.name else f'--make-reports=tests_{self.name}')
timeout_cmd = f'timeout {self.command_timeout} ' if self.command_timeout else ''
marker_cmd = f"-m '{self.marker}'" if self.marker is not None else ''
junit_flags = ' -p no:warning -o junit_family=xunit1 --junitxml=test-results/junit.xml'
joined_flaky_patterns = '|'.join(FLAKY_TEST_FAILURE_PATTERNS)
repeat_on_failure_flags = f"--reruns 5 --reruns-delay 2 --only-rerun '({joined_flaky_patterns})'"
parallel = f' << pipeline.parameters.{self.job_name}_parallelism >> '
steps = ['checkout', {'attach_workspace': {'at': 'test_preparation'}}, {'run': 'apt-get update && apt-get install -y curl'}, {'run': ' && '.join(self.install_steps)}, {'run': {'name': 'Download NLTK files', 'command': 'python -c "import nltk; nltk.download(\'punkt\', quiet=True)" '} if 'example' in self.name else 'echo Skipping'}, {'run': {'name': 'Show installed libraries and their size', 'command': 'du -h -d 1 "$(pip -V | cut -d \' \' -f 4 | sed \'s/pip//g\')" | grep -vE "dist-info|_distutils_hack|__pycache__" | sort -h | tee installed.txt || true'}}, {'run': {'name': 'Show installed libraries and their versions', 'command': 'pip list --format=freeze | tee installed.txt || true'}}, {'run': {'name': 'Show biggest libraries', 'command': 'dpkg-query --show --showformat=\'${Installed-Size}\t${Package}\n\' | sort -rh | head -25 | sort -h | awk \'{ package=$2; sub(".*/", "", package); printf("%.5f GB %s\n", $1/1024/1024, package)}\' || true'}}, {'run': {'name': 'Create `test-results` directory', 'command': 'mkdir test-results'}}, {'run': {'name': 'Get files to test', 'command': f'curl -L -o {self.job_name}_test_list.txt <<pipeline.parameters.{self.job_name}_test_list>> --header "Circle-Token: $CIRCLE_TOKEN"' if self.name != 'pr_documentation_tests' else 'echo "Skipped"'}}, {'run': {'name': 'Split tests across parallel nodes: show current parallel tests', 'command': f"TESTS=$(circleci tests split --split-by=timings {self.job_name}_test_list.txt) && echo $TESTS > splitted_tests.txt && echo $TESTS | tr ' ' '\n'" if self.parallelism else f"""awk '{{printf "%s ", $0}}' {self.job_name}_test_list.txt > splitted_tests.txt"""}}, {'run': {'name': 'fetch hub objects before pytest', 'command': 'cp -r /test_data/* . 2>/dev/null || true; python3 utils/fetch_hub_objects_for_ci.py'}}, {'run': {'name': 'Run tests', 'command': f"({timeout_cmd} python3 -m pytest {marker_cmd} -n {self.pytest_num_workers} {junit_flags} {repeat_on_failure_flags} {' '.join(pytest_flags)} $(cat splitted_tests.txt) | tee tests_output.txt)"}}, {'run': {'name': 'Check for test crashes', 'when': 'always', 'command': 'if [ ! -f tests_output.txt ]; then\n echo "ERROR: tests_output.txt does not exist - tests may not have run properly"\n exit 1\n elif grep -q "crashed and worker restarting disabled" tests_output.txt; then\n echo "ERROR: Worker crash detected in test output"\n echo "Found: crashed and worker restarting disabled"\n exit 1\n else\n echo "Tests output file exists and no worker crashes detected"\n fi'}}, {'run': {'name': 'Expand to show skipped tests', 'when': 'always', 'command': 'python3 .circleci/parse_test_outputs.py --file tests_output.txt --skip'}}, {'run': {'name': 'Failed tests: show reasons', 'when': 'always', 'command': 'python3 .circleci/parse_test_outputs.py --file tests_output.txt --fail'}}, {'run': {'name': 'Errors', 'when': 'always', 'command': 'python3 .circleci/parse_test_outputs.py --file tests_output.txt --errors'}}, {'store_test_results': {'path': 'test-results'}}, {'store_artifacts': {'path': 'test-results/junit.xml'}}, {'store_artifacts': {'path': 'reports'}}, {'store_artifacts': {'path': 'tests.txt'}}, {'store_artifacts': {'path': 'splitted_tests.txt'}}, {'store_artifacts': {'path': 'installed.txt'}}]
if self.parallelism:
job['parallelism'] = parallel
job['steps'] = steps
return job
@property
def job_name(self):
return self.name if 'examples' in self.name or 'pipeline' in self.name or 'pr_documentation' in self.name else f'tests_{self.name}'
|
@dataclass
class CircleCIJob:
def __post_init__(self):
pass
def to_dict(self):
pass
@property
def job_name(self):
pass
| 6
| 0
| 32
| 1
| 30
| 1
| 7
| 0.05
| 0
| 1
| 0
| 0
| 3
| 0
| 3
| 3
| 113
| 5
| 103
| 28
| 98
| 5
| 57
| 26
| 53
| 10
| 0
| 3
| 20
|
7
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/.circleci/create_circleci_config.py
|
create_circleci_config.EmptyJob
|
import copy
class EmptyJob:
job_name = 'empty'
def to_dict(self):
steps = [{'run': 'ls -la'}]
if self.job_name == 'collection_job':
steps.extend(['checkout', {'run': 'pip install requests || true'}, {'run': 'while [[ $(curl --location --request GET "https://circleci.com/api/v2/workflow/$CIRCLE_WORKFLOW_ID/job" --header "Circle-Token: $CCI_TOKEN"| jq -r \'.items[]|select(.name != "collection_job")|.status\' | grep -c "running") -gt 0 ]]; do sleep 5; done || true'}, {'run': 'python utils/process_circleci_workflow_test_reports.py --workflow_id $CIRCLE_WORKFLOW_ID || true'}, {'store_artifacts': {'path': 'outputs'}}, {'run': 'echo "All required jobs have now completed"'}])
return {'docker': copy.deepcopy(DEFAULT_DOCKER_IMAGE), 'resource_class': 'small', 'steps': steps}
|
class EmptyJob:
def to_dict(self):
pass
| 2
| 0
| 19
| 1
| 18
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 22
| 2
| 20
| 4
| 18
| 0
| 7
| 4
| 5
| 2
| 0
| 1
| 2
|
8
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/image_processing_new_imgproc_model.py
|
image_processing_new_imgproc_model.ImgprocModelImageProcessor
|
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
import torch
from ...image_transforms import convert_to_rgb, resize, to_channel_dimension_format
import numpy as np
from typing import Optional, Union
from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging
from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
class ImgprocModelImageProcessor(BaseImageProcessor):
"""
Constructs a IMGPROC_MODEL image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`):
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
overridden by the `resample` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
`do_rescale` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
overridden by the `rescale_factor` parameter in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
"""
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None:
super().__init__(**kwargs)
size = size if size is not None else {'height': 384, 'width': 384}
size = get_size_dict(size, default_to_square=True)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.do_convert_rgb = do_convert_rgb
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
"""
size = get_size_dict(size)
if 'height' not in size or 'width' not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}')
output_size = (size['height'], size['width'])
return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, do_convert_rgb: Optional[bool]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Controls the size of the image after `resize`. The shortest edge of the image is resized to
`size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to normalize the image by if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to normalize the image by if `do_normalize` is set to `True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
if do_resize:
images = [self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images]
if do_rescale:
images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images]
if do_normalize:
images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images]
images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images]
encoded_outputs = BatchFeature(data={'pixel_values': images}, tensor_type=return_tensors)
return encoded_outputs
def new_image_processing_method(self, pixel_values: torch.FloatTensor):
return pixel_values / 2
|
class ImgprocModelImageProcessor(BaseImageProcessor):
'''
Constructs a IMGPROC_MODEL image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`):
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
overridden by the `resample` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
`do_rescale` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
overridden by the `rescale_factor` parameter in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
'''
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None:
pass
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
'''
pass
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, do_convert_rgb: Optional[bool]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
'''
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Controls the size of the image after `resize`. The shortest edge of the image is resized to
`size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to normalize the image by if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to normalize the image by if `do_normalize` is set to `True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
def new_image_processing_method(self, pixel_values: torch.FloatTensor):
pass
| 6
| 3
| 53
| 4
| 30
| 18
| 6
| 0.85
| 1
| 6
| 0
| 0
| 4
| 9
| 4
| 4
| 251
| 23
| 123
| 53
| 82
| 105
| 54
| 17
| 49
| 17
| 1
| 1
| 24
|
9
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/pytorch-lightning/lightning_base.py
|
lightning_base.BaseTransformer
|
import os
from transformers.optimization import Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup
from typing import Any
import argparse
import pytorch_lightning as pl
from pathlib import Path
from transformers import AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, is_torch_available
class BaseTransformer(pl.LightningModule):
def __init__(self, hparams: argparse.Namespace, num_labels=None, mode='base', config=None, tokenizer=None, model=None, **config_kwargs):
"""Initialize a model, tokenizer and config."""
super().__init__()
self.save_hyperparameters(hparams)
self.step_count = 0
self.output_dir = Path(self.hparams.output_dir)
cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
self.config = AutoConfig.from_pretrained(self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, **{'num_labels': num_labels} if num_labels is not None else {}, cache_dir=cache_dir, **config_kwargs)
else:
self.config: PretrainedConfig = config
extra_model_params = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams, p, None):
assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute"
setattr(self.config, p, getattr(self.hparams, p))
if tokenizer is None:
self.tokenizer = AutoTokenizer.from_pretrained(self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, cache_dir=cache_dir)
else:
self.tokenizer: PreTrainedTokenizer = tokenizer
self.model_type = MODEL_MODES[mode]
if model is None:
self.model = self.model_type.from_pretrained(self.hparams.model_name_or_path, from_tf=bool('.ckpt' in self.hparams.model_name_or_path), config=self.config, cache_dir=cache_dir)
else:
self.model = model
def load_hf_checkpoint(self, *args, **kwargs):
self.model = self.model_type.from_pretrained(*args, **kwargs)
def get_lr_scheduler(self):
get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler]
scheduler = get_schedule_func(self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps())
scheduler = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def configure_optimizers(self):
"""Prepare optimizer and schedule (linear warmup and decay)"""
model = self.model
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for n, p in model.named_parameters() if not any((nd in n for nd in no_decay))], 'weight_decay': self.hparams.weight_decay}, {'params': [p for n, p in model.named_parameters() if any((nd in n for nd in no_decay))], 'weight_decay': 0.0}]
if self.hparams.adafactor:
optimizer = Adafactor(optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False)
else:
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)
self.opt = optimizer
scheduler = self.get_lr_scheduler()
return ([optimizer], [scheduler])
def test_step(self, batch, batch_nb):
return self.validation_step(batch, batch_nb)
def test_epoch_end(self, outputs):
return self.validation_end(outputs)
def total_steps(self) -> int:
"""The number of total training steps that will be run. Used for lr scheduler purposes."""
num_devices = max(1, self.hparams.gpus)
effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return self.dataset_size / effective_batch_size * self.hparams.max_epochs
def setup(self, mode):
if mode == 'test':
self.dataset_size = len(self.test_dataloader().dataset)
else:
self.train_loader = self.get_dataloader('train', self.hparams.train_batch_size, shuffle=True)
self.dataset_size = len(self.train_dataloader().dataset)
def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool=False):
raise NotImplementedError('You must implement this for your task')
def train_dataloader(self):
return self.train_loader
def val_dataloader(self):
return self.get_dataloader('dev', self.hparams.eval_batch_size, shuffle=False)
def test_dataloader(self):
return self.get_dataloader('test', self.hparams.eval_batch_size, shuffle=False)
def _feature_file(self, mode):
return os.path.join(self.hparams.data_dir, 'cached_{}_{}_{}'.format(mode, list(filter(None, self.hparams.model_name_or_path.split('/'))).pop(), str(self.hparams.max_seq_length)))
@pl.utilities.rank_zero_only
def on_save_checkpoint(self, checkpoint: dict[str, Any]) -> None:
save_path = self.output_dir.joinpath('best_tfmr')
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
@staticmethod
def add_model_specific_args(parser, root_dir):
parser.add_argument('--model_name_or_path', default=None, type=str, required=True, help='Path to pretrained model or model identifier from huggingface.co/models')
parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', default=None, type=str, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from huggingface.co')
parser.add_argument('--encoder_layerdrop', type=float, help='Encoder layer dropout probability (Optional). Goes into model.config')
parser.add_argument('--decoder_layerdrop', type=float, help='Decoder layer dropout probability (Optional). Goes into model.config')
parser.add_argument('--dropout', type=float, help='Dropout probability (Optional). Goes into model.config')
parser.add_argument('--attention_dropout', type=float, help='Attention dropout probability (Optional). Goes into model.config')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--lr_scheduler', default='linear', choices=arg_to_scheduler_choices, metavar=arg_to_scheduler_metavar, type=str, help='Learning rate scheduler')
parser.add_argument('--weight_decay', default=0.0, type=float, help='Weight decay if we apply some.')
parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.')
parser.add_argument('--warmup_steps', default=0, type=int, help='Linear warmup over warmup_steps.')
parser.add_argument('--num_workers', default=4, type=int, help='kwarg passed to DataLoader')
parser.add_argument('--num_train_epochs', dest='max_epochs', default=3, type=int)
parser.add_argument('--train_batch_size', default=32, type=int)
parser.add_argument('--eval_batch_size', default=32, type=int)
parser.add_argument('--adafactor', action='store_true')
|
class BaseTransformer(pl.LightningModule):
def __init__(self, hparams: argparse.Namespace, num_labels=None, mode='base', config=None, tokenizer=None, model=None, **config_kwargs):
'''Initialize a model, tokenizer and config.'''
pass
def load_hf_checkpoint(self, *args, **kwargs):
pass
def get_lr_scheduler(self):
pass
def configure_optimizers(self):
'''Prepare optimizer and schedule (linear warmup and decay)'''
pass
def test_step(self, batch, batch_nb):
pass
def test_epoch_end(self, outputs):
pass
def total_steps(self) -> int:
'''The number of total training steps that will be run. Used for lr scheduler purposes.'''
pass
def setup(self, mode):
pass
def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool=False):
pass
def train_dataloader(self):
pass
def val_dataloader(self):
pass
def test_dataloader(self):
pass
def _feature_file(self, mode):
pass
@pl.utilities.rank_zero_only
def on_save_checkpoint(self, checkpoint: dict[str, Any]) -> None:
pass
@staticmethod
def add_model_specific_args(parser, root_dir):
pass
| 18
| 3
| 12
| 0
| 12
| 0
| 2
| 0.04
| 1
| 17
| 0
| 2
| 14
| 9
| 15
| 15
| 204
| 20
| 178
| 50
| 151
| 7
| 86
| 37
| 70
| 10
| 1
| 2
| 26
|
10
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/pytorch-lightning/lightning_base.py
|
lightning_base.LoggingCallback
|
import pytorch_lightning as pl
import os
from pytorch_lightning.utilities import rank_zero_info
class LoggingCallback(pl.Callback):
def on_batch_end(self, trainer, pl_module):
lr_scheduler = trainer.lr_schedulers[0]['scheduler']
lrs = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr())}
pl_module.logger.log_metrics(lrs)
def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
rank_zero_info('***** Validation results *****')
metrics = trainer.callback_metrics
for key in sorted(metrics):
if key not in ['log', 'progress_bar']:
rank_zero_info(f'{key} = {str(metrics[key])}\n')
def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
rank_zero_info('***** Test results *****')
metrics = trainer.callback_metrics
output_test_results_file = os.path.join(pl_module.hparams.output_dir, 'test_results.txt')
with open(output_test_results_file, 'w') as writer:
for key in sorted(metrics):
if key not in ['log', 'progress_bar']:
rank_zero_info(f'{key} = {str(metrics[key])}\n')
writer.write(f'{key} = {str(metrics[key])}\n')
|
class LoggingCallback(pl.Callback):
def on_batch_end(self, trainer, pl_module):
pass
def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
pass
def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
pass
| 4
| 0
| 7
| 0
| 6
| 1
| 2
| 0.1
| 1
| 2
| 0
| 0
| 3
| 0
| 3
| 3
| 24
| 2
| 20
| 12
| 16
| 2
| 20
| 11
| 16
| 3
| 1
| 3
| 7
|
11
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_add_function.py
|
modeling_add_function.TestAttention
|
from ...utils.deprecation import deprecate_kwarg
import torch
from torch import nn
from typing import Optional
class TestAttention(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
and "Generating Long Sequences with Sparse Transformers".
Adapted from transformers.models.mistral.modeling_mistral.MistralAttention:
The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads.
The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer
(see fig. 2 in https://huggingface.co/papers/2405.16712).
Additionally, replaced
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2)
"""
def __init__(self):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
_ = apply_rotary_pos_emb(1, 1, 1, 1)
|
class TestAttention(nn.Module):
'''
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
and "Generating Long Sequences with Sparse Transformers".
Adapted from transformers.models.mistral.modeling_mistral.MistralAttention:
The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads.
The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer
(see fig. 2 in https://huggingface.co/papers/2405.16712).
Additionally, replaced
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2)
'''
def __init__(self):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
pass
| 4
| 1
| 2
| 0
| 2
| 0
| 1
| 2.2
| 1
| 1
| 0
| 0
| 2
| 0
| 2
| 12
| 19
| 3
| 5
| 4
| 2
| 11
| 5
| 4
| 2
| 1
| 1
| 0
| 2
|
12
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_dummy_bert.py
|
modeling_dummy_bert.DummyBertAttention
|
import torch
from torch import nn
from typing import Optional, Union
from ...utils.deprecation import deprecate_kwarg
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
class DummyBertAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, layer_idx=None):
super().__init__()
self.self = DUMMY_BERT_SELF_ATTENTION_CLASSES[config._attn_implementation](config, position_embedding_type=position_embedding_type, layer_idx=layer_idx)
self.output = DummyBertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class DummyBertAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, layer_idx=None):
pass
def prune_heads(self, heads):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
pass
| 5
| 0
| 15
| 1
| 14
| 1
| 1
| 0.07
| 1
| 5
| 1
| 0
| 3
| 3
| 3
| 13
| 49
| 4
| 43
| 20
| 30
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
13
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_dummy_bert.py
|
modeling_dummy_bert.DummyBertEmbeddings
|
import torch
from torch import nn
from typing import Optional, Union
class DummyBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False)
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor:
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length]
if token_type_ids is None:
if hasattr(self, 'token_type_ids'):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == 'absolute':
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
class DummyBertEmbeddings(nn.Module):
'''Construct the embeddings from word, position and token_type embeddings.'''
def __init__(self, config):
pass
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor:
pass
| 3
| 1
| 29
| 3
| 23
| 3
| 4
| 0.15
| 1
| 3
| 0
| 0
| 2
| 6
| 2
| 12
| 62
| 8
| 47
| 23
| 37
| 7
| 34
| 16
| 31
| 7
| 1
| 2
| 8
|
14
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_dummy_bert.py
|
modeling_dummy_bert.DummyBertEncoder
|
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from torch import nn
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions
import torch
class DummyBertEncoder(nn.Module):
def __init__(self, config, layer_idx=None):
super().__init__()
self.config = config
self.layer = nn.ModuleList([DummyBertLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[tuple[tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
use_cache = False
if use_cache and self.config.is_decoder and (past_key_values is None):
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if use_cache and self.config.is_decoder and isinstance(past_key_values, tuple):
logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None))
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions)
|
class DummyBertEncoder(nn.Module):
def __init__(self, config, layer_idx=None):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[tuple[tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
pass
| 3
| 0
| 45
| 4
| 41
| 0
| 9
| 0
| 1
| 7
| 1
| 0
| 2
| 3
| 2
| 12
| 91
| 8
| 83
| 26
| 68
| 0
| 35
| 14
| 32
| 17
| 1
| 3
| 18
|
15
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_dummy_bert.py
|
modeling_dummy_bert.DummyBertIntermediate
|
import torch
from torch import nn
from ...activations import ACT2FN
class DummyBertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class DummyBertIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
16
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_dummy_bert.py
|
modeling_dummy_bert.DummyBertLayer
|
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
import torch
from ...modeling_layers import GradientCheckpointingLayer
from typing import Optional, Union
from ...utils.deprecation import deprecate_kwarg
class DummyBertLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = DummyBertAttention(config, layer_idx=layer_idx)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f'{self} should be used as a decoder model if cross attention is added')
self.crossattention = DummyBertAttention(config, position_embedding_type='absolute', layer_idx=layer_idx)
self.intermediate = DummyBertIntermediate(config)
self.output = DummyBertOutput(config)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
self_attention_outputs = self.attention(hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, past_key_values=past_key_values, cache_position=cache_position)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, 'crossattention'):
raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`')
cross_attention_outputs = self.crossattention(attention_output, attention_mask=encoder_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:]
layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
|
class DummyBertLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
pass
def feed_forward_chunk(self, attention_output):
pass
| 5
| 0
| 27
| 2
| 23
| 2
| 4
| 0.1
| 1
| 7
| 3
| 0
| 3
| 8
| 3
| 13
| 84
| 9
| 70
| 32
| 57
| 7
| 41
| 23
| 37
| 7
| 1
| 2
| 11
|
17
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_dummy_bert.py
|
modeling_dummy_bert.DummyBertModel
|
from ...utils import auto_docstring, logging
import torch
from typing import Optional, Union
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask_for_sdpa
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
@auto_docstring(custom_intro='\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in [Attention is\n all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,\n Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set\n to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and\n `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.\n ')
class DummyBertModel(DummyBertPreTrainedModel):
_no_split_modules = ['DummyBertEmbeddings', 'DummyBertLayer']
def __init__(self, config, add_pooling_layer=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = DummyBertEmbeddings(config)
self.encoder = DummyBertEncoder(config)
self.pooler = DummyBertPooler(config) if add_pooling_layer else None
self.attn_implementation = config._attn_implementation
self.position_embedding_type = config.position_embedding_type
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[list[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
past_key_values_length = 0
if past_key_values is not None:
past_key_values_length = past_key_values[0][0].shape[-2] if not isinstance(past_key_values, Cache) else past_key_values.get_seq_length()
if token_type_ids is None:
if hasattr(self.embeddings, 'token_type_ids'):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length)
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device)
use_sdpa_attention_masks = self.attn_implementation == 'sdpa' and self.position_embedding_type == 'absolute' and (head_mask is None) and (not output_attentions)
if use_sdpa_attention_masks and attention_mask.dim() == 2:
if self.config.is_decoder:
extended_attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(attention_mask, input_shape, embedding_output, past_key_values_length)
else:
extended_attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, embedding_output.dtype, tgt_len=seq_length)
else:
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
if use_sdpa_attention_masks and encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, embedding_output.dtype, tgt_len=seq_length)
else:
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions)
| null | 8
| 2
| 37
| 4
| 25
| 8
| 5
| 0.35
| 1
| 7
| 3
| 0
| 5
| 6
| 5
| 6
| 211
| 29
| 135
| 45
| 108
| 47
| 65
| 29
| 59
| 21
| 2
| 2
| 27
|
18
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_dummy_bert.py
|
modeling_dummy_bert.DummyBertOutput
|
import torch
from torch import nn
class DummyBertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class DummyBertOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
19
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_dummy_bert.py
|
modeling_dummy_bert.DummyBertPooler
|
from torch import nn
import torch
class DummyBertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
class DummyBertPooler(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.2
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 10
| 7
| 7
| 2
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
20
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_dummy_bert.py
|
modeling_dummy_bert.DummyBertPreTrainedModel
|
from ...modeling_utils import PreTrainedModel
from .configuration_dummy_bert import DummyBertConfig
from ...utils import auto_docstring, logging
from torch import nn
@auto_docstring
class DummyBertPreTrainedModel(PreTrainedModel):
config: DummyBertConfig
base_model_prefix = 'dummy_bert'
supports_gradient_checkpointing = True
_supports_sdpa = True
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, DummyBertLMPredictionHead):
module.bias.data.zero_()
|
@auto_docstring
class DummyBertPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 15
| 0
| 12
| 3
| 6
| 0.39
| 1
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 27
| 2
| 18
| 7
| 16
| 7
| 16
| 7
| 14
| 6
| 1
| 2
| 6
|
21
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_dummy_bert.py
|
modeling_dummy_bert.DummyBertSdpaSelfAttention
|
from ...utils.deprecation import deprecate_kwarg
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
import torch
class DummyBertSdpaSelfAttention(DummyBertSelfAttention):
def __init__(self, config, position_embedding_type=None, layer_idx=None):
super().__init__(config, position_embedding_type=position_embedding_type, layer_idx=layer_idx)
self.dropout_prob = config.attention_probs_dropout_prob
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
if self.position_embedding_type != 'absolute' or output_attentions or head_mask is not None:
logger.warning_once('DummyBertSdpaSelfAttention is used but `torch.nn.functional.scaled_dot_product_attention` does not support non-absolute `position_embedding_type` or `output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.')
return super().forward(hidden_states, attention_mask, head_mask, encoder_hidden_states, past_key_values, output_attentions, cache_position)
bsz, tgt_len, _ = hidden_states.size()
query_layer = self.query(hidden_states).view(bsz, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
is_updated = False
is_cross_attention = encoder_hidden_states is not None
current_states = encoder_hidden_states if is_cross_attention else hidden_states
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
curr_past_key_value = past_key_values.cross_attention_cache
else:
curr_past_key_value = past_key_values.self_attention_cache
else:
curr_past_key_value = past_key_values
current_states = encoder_hidden_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
key_layer = curr_past_key_value.layers[self.layer_idx].keys
value_layer = curr_past_key_value.layers[self.layer_idx].values
else:
key_layer = self.key(current_states).view(bsz, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = self.value(current_states).view(bsz, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
if past_key_values is not None:
cache_position = cache_position if not is_cross_attention else None
key_layer, value_layer = curr_past_key_value.update(key_layer, value_layer, self.layer_idx, {'cache_position': cache_position})
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
is_causal = self.is_decoder and (not is_cross_attention) and (attention_mask is None) and (tgt_len > 1)
attn_output = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, attn_mask=attention_mask, dropout_p=self.dropout_prob if self.training else 0.0, is_causal=is_causal)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, self.all_head_size)
return (attn_output, None)
|
class DummyBertSdpaSelfAttention(DummyBertSelfAttention):
def __init__(self, config, position_embedding_type=None, layer_idx=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 48
| 6
| 34
| 9
| 6
| 0.28
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 15
| 99
| 12
| 68
| 22
| 56
| 19
| 35
| 13
| 32
| 11
| 2
| 2
| 12
|
22
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_dummy_bert.py
|
modeling_dummy_bert.DummyBertSelfAttention
|
import math
import torch
from ...utils.deprecation import deprecate_kwarg
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from torch import nn
from typing import Optional, Union
class DummyBertSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, layer_idx=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute')
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
self.layer_idx = layer_idx
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
batch_size, seq_length, _ = hidden_states.shape
query_layer = self.query(hidden_states)
query_layer = query_layer.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
is_updated = False
is_cross_attention = encoder_hidden_states is not None
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
curr_past_key_value = past_key_values.cross_attention_cache
else:
curr_past_key_value = past_key_values.self_attention_cache
else:
curr_past_key_value = past_key_values
current_states = encoder_hidden_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
key_layer = curr_past_key_value.layers[self.layer_idx].keys
value_layer = curr_past_key_value.layers[self.layer_idx].values
else:
key_layer = self.key(current_states)
key_layer = key_layer.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = self.value(current_states)
value_layer = value_layer.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
if past_key_values is not None:
cache_position = cache_position if not is_cross_attention else None
key_layer, value_layer = curr_past_key_value.update(key_layer, value_layer, self.layer_idx, {'cache_position': cache_position})
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
query_length, key_length = (query_layer.shape[2], key_layer.shape[2])
if past_key_values is not None:
position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(-1, 1)
else:
position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype)
if self.position_embedding_type == 'relative_key':
relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == 'relative_key_query':
relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
return (context_layer, attention_probs)
|
class DummyBertSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, layer_idx=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 43
| 7
| 31
| 6
| 6
| 0.19
| 1
| 5
| 0
| 1
| 3
| 11
| 3
| 13
| 132
| 22
| 93
| 44
| 80
| 18
| 72
| 35
| 68
| 13
| 1
| 2
| 17
|
23
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_dummy_bert.py
|
modeling_dummy_bert.DummyBertSelfOutput
|
import torch
from torch import nn
class DummyBertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class DummyBertSelfOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
24
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_from_uppercase_model.py
|
modeling_from_uppercase_model.FromUppercaseModelAttention
|
import torch
from .configuration_from_uppercase_model import FromUppercaseModelTextConfig, FromUppercaseModelVisionConfig
from torch import nn
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from typing import Callable, Optional, Union
class FromUppercaseModelAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Union[FromUppercaseModelVisionConfig, FromUppercaseModelTextConfig]):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).')
self.scale = self.head_dim ** (-0.5)
self.dropout = config.attention_dropout
self.is_causal = False
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
batch_size, seq_length, embed_dim = hidden_states.shape
queries = self.q_proj(hidden_states)
keys = self.k_proj(hidden_states)
values = self.v_proj(hidden_states)
queries = queries.view(batch_size, seq_length, -1, self.head_dim).transpose(1, 2)
keys = keys.view(batch_size, seq_length, -1, self.head_dim).transpose(1, 2)
values = values.view(batch_size, seq_length, -1, self.head_dim).transpose(1, 2)
if self.config._attn_implementation == 'flash_attention_2':
self.is_causal = causal_attention_mask is not None
elif attention_mask is not None and causal_attention_mask is not None:
attention_mask = attention_mask + causal_attention_mask
elif causal_attention_mask is not None:
attention_mask = causal_attention_mask
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, queries, keys, values, attention_mask, is_causal=self.is_causal, scaling=self.scale, dropout=0.0 if not self.training else self.dropout, output_attentions=output_attentions)
attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous()
attn_output = self.out_proj(attn_output)
if not output_attentions:
attn_weights = None
return (attn_output, attn_weights)
|
class FromUppercaseModelAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config: Union[FromUppercaseModelVisionConfig, FromUppercaseModelTextConfig]):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
'''Input shape: Batch x Time x Channel'''
pass
| 3
| 2
| 32
| 5
| 25
| 2
| 4
| 0.11
| 1
| 5
| 0
| 2
| 3
| 10
| 3
| 13
| 102
| 19
| 75
| 30
| 65
| 8
| 54
| 24
| 50
| 8
| 1
| 2
| 11
|
25
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_from_uppercase_model.py
|
modeling_from_uppercase_model.FromUppercaseModelEncoderLayer
|
from ...modeling_layers import GradientCheckpointingLayer
from .configuration_from_uppercase_model import FromUppercaseModelTextConfig, FromUppercaseModelVisionConfig
from torch import nn
from typing import Callable, Optional, Union
import torch
class FromUppercaseModelEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: Union[FromUppercaseModelVisionConfig, FromUppercaseModelTextConfig]):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = FromUppercaseModelAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = FromUppercaseModelMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
class FromUppercaseModelEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: Union[FromUppercaseModelVisionConfig, FromUppercaseModelTextConfig]):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor]:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
| 3
| 1
| 23
| 3
| 16
| 5
| 2
| 0.31
| 1
| 4
| 1
| 0
| 2
| 5
| 2
| 12
| 48
| 6
| 32
| 17
| 23
| 10
| 21
| 11
| 18
| 2
| 1
| 1
| 3
|
26
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_from_uppercase_model.py
|
modeling_from_uppercase_model.FromUppercaseModelMLP
|
import torch
from torch import nn
from ...activations import ACT2FN
class FromUppercaseModelMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
|
class FromUppercaseModelMLP(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 13
| 1
| 12
| 7
| 9
| 0
| 12
| 7
| 9
| 1
| 1
| 0
| 2
|
27
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_multimodal2.py
|
modeling_multimodal2.Multimodal2VisionAttention
|
from torch import nn
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
import torch
from typing import Callable, Optional, Union
from .configuration_multimodal2 import Multimodal2Config, Multimodal2TextConfig, Multimodal2VisionConfig
class Multimodal2VisionAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Union[Multimodal2VisionConfig, Multimodal2TextConfig]):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).')
self.scale = self.head_dim ** (-0.5)
self.dropout = config.attention_dropout
self.is_causal = False
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
batch_size, seq_length, embed_dim = hidden_states.shape
queries = self.q_proj(hidden_states)
keys = self.k_proj(hidden_states)
values = self.v_proj(hidden_states)
queries = queries.view(batch_size, seq_length, -1, self.head_dim).transpose(1, 2)
keys = keys.view(batch_size, seq_length, -1, self.head_dim).transpose(1, 2)
values = values.view(batch_size, seq_length, -1, self.head_dim).transpose(1, 2)
if self.config._attn_implementation == 'flash_attention_2':
self.is_causal = causal_attention_mask is not None
elif attention_mask is not None and causal_attention_mask is not None:
attention_mask = attention_mask + causal_attention_mask
elif causal_attention_mask is not None:
attention_mask = causal_attention_mask
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, queries, keys, values, attention_mask, is_causal=self.is_causal, scaling=self.scale, dropout=0.0 if not self.training else self.dropout, output_attentions=output_attentions)
attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous()
attn_output = self.out_proj(attn_output)
if not output_attentions:
attn_weights = None
return (attn_output, attn_weights)
|
class Multimodal2VisionAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config: Union[Multimodal2VisionConfig, Multimodal2TextConfig]):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
'''Input shape: Batch x Time x Channel'''
pass
| 3
| 2
| 32
| 5
| 25
| 2
| 4
| 0.11
| 1
| 5
| 0
| 2
| 3
| 10
| 3
| 13
| 102
| 19
| 75
| 30
| 65
| 8
| 54
| 24
| 50
| 8
| 1
| 2
| 11
|
28
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_multimodal2.py
|
modeling_multimodal2.Multimodal2VisionEmbeddings
|
from torch import nn
import torch
from .configuration_multimodal2 import Multimodal2Config, Multimodal2TextConfig, Multimodal2VisionConfig
from ...utils import auto_docstring, can_return_tuple, torch_int
class Multimodal2VisionEmbeddings(nn.Module):
def __init__(self, config: Multimodal2VisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
self.patch_embedding = nn.Conv2d(in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches + 1
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
self.register_buffer('position_ids', torch.arange(self.num_positions).expand((1, -1)), persistent=False)
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
position_embedding = self.position_embedding.weight.unsqueeze(0)
num_positions = position_embedding.shape[1] - 1
if not torch.jit.is_tracing() and num_patches == num_positions and (height == width):
return self.position_embedding(self.position_ids)
class_pos_embed = position_embedding[:, :1]
patch_pos_embed = position_embedding[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions ** 0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
if not interpolate_pos_encoding and (height != self.image_size or width != self.image_size):
raise ValueError(f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size}).")
target_dtype = self.patch_embedding.weight.dtype
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embedding(self.position_ids)
return embeddings
|
class Multimodal2VisionEmbeddings(nn.Module):
def __init__(self, config: Multimodal2VisionConfig):
pass
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
'''
pass
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor:
pass
| 4
| 1
| 26
| 5
| 19
| 3
| 2
| 0.16
| 1
| 4
| 0
| 0
| 3
| 9
| 3
| 13
| 81
| 16
| 57
| 27
| 53
| 9
| 43
| 27
| 39
| 3
| 1
| 1
| 6
|
29
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_multimodal2.py
|
modeling_multimodal2.Multimodal2VisionEncoder
|
from typing import Callable, Optional, Union
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from torch import nn
import torch
class Multimodal2VisionEncoder(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`Multimodal2VisionEncoderLayer`].
Args:
config: Multimodal2VisionConfig
"""
def __init__(self, config):
super().__init__()
self.config = config
self.layers = nn.ModuleList([Multimodal2VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> BaseModelOutput:
"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
class Multimodal2VisionEncoder(nn.Module):
'''
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`Multimodal2VisionEncoderLayer`].
Args:
config: Multimodal2VisionConfig
'''
def __init__(self, config):
pass
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> BaseModelOutput:
'''
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 3
| 2
| 43
| 5
| 25
| 13
| 7
| 0.61
| 1
| 7
| 1
| 0
| 2
| 3
| 2
| 12
| 95
| 13
| 51
| 19
| 40
| 31
| 27
| 11
| 24
| 12
| 1
| 2
| 13
|
30
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_multimodal2.py
|
modeling_multimodal2.Multimodal2VisionEncoderLayer
|
import torch
from ...modeling_layers import GradientCheckpointingLayer
from typing import Callable, Optional, Union
from torch import nn
class Multimodal2VisionEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = Multimodal2Attention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = Multimodal2VisionMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
class Multimodal2VisionEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor]:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
| 3
| 1
| 23
| 3
| 16
| 5
| 2
| 0.31
| 1
| 4
| 1
| 0
| 2
| 5
| 2
| 12
| 48
| 6
| 32
| 17
| 23
| 10
| 21
| 11
| 18
| 2
| 1
| 1
| 3
|
31
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_multimodal2.py
|
modeling_multimodal2.Multimodal2VisionMLP
|
from torch import nn
from ...activations import ACT2FN
import torch
class Multimodal2VisionMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
|
class Multimodal2VisionMLP(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 13
| 1
| 12
| 7
| 9
| 0
| 12
| 7
| 9
| 1
| 1
| 0
| 2
|
32
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_multimodal2.py
|
modeling_multimodal2.Multimodal2VisionModel
|
from typing import Callable, Optional, Union
from torch import nn
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from .configuration_multimodal2 import Multimodal2Config, Multimodal2TextConfig, Multimodal2VisionConfig
from ...utils import auto_docstring, can_return_tuple, torch_int
from transformers.utils import add_start_docstrings
@add_start_docstrings('New doc', MULTIMODAL2_VISION_START_DOCSTRING)
class Multimodal2VisionModel(Multimodal2VisionPreTrainedModel):
config: Multimodal2VisionConfig
main_input_name = 'pixel_values'
_no_split_modules = ['Multimodal2VisionEncoderLayer']
def __init__(self, config: Multimodal2VisionConfig):
super().__init__(config)
self.vision_model = Multimodal2VisionTransformer(config)
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.vision_model.embeddings.patch_embedding
@can_return_tuple
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> BaseModelOutputWithPooling:
"""
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, Multimodal2VisionModel
>>> model = Multimodal2VisionModel.from_pretrained("openai/multimodal2-vit-base-patch32")
>>> processor = AutoProcessor.from_pretrained("openai/multimodal2-vit-base-patch32")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled CLS states
```"""
return self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding)
|
@add_start_docstrings('New doc', MULTIMODAL2_VISION_START_DOCSTRING)
class Multimodal2VisionModel(Multimodal2VisionPreTrainedModel):
def __init__(self, config: Multimodal2VisionConfig):
pass
def get_input_embeddings(self) -> nn.Module:
pass
@can_return_tuple
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> BaseModelOutputWithPooling:
'''
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, Multimodal2VisionModel
>>> model = Multimodal2VisionModel.from_pretrained("openai/multimodal2-vit-base-patch32")
>>> processor = AutoProcessor.from_pretrained("openai/multimodal2-vit-base-patch32")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled CLS states
```'''
pass
| 7
| 1
| 15
| 2
| 7
| 6
| 1
| 0.61
| 1
| 3
| 1
| 0
| 3
| 1
| 3
| 4
| 55
| 10
| 28
| 16
| 15
| 17
| 13
| 8
| 9
| 2
| 2
| 0
| 4
|
33
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_multimodal2.py
|
modeling_multimodal2.Multimodal2VisionPreTrainedModel
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from .configuration_multimodal2 import Multimodal2Config, Multimodal2TextConfig, Multimodal2VisionConfig
from ...utils import auto_docstring, can_return_tuple, torch_int
@auto_docstring
class Multimodal2VisionPreTrainedModel(PreTrainedModel):
config: Multimodal2Config
base_model_prefix = 'multimodal2_vision'
supports_gradient_checkpointing = True
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, Multimodal2VisionMLP):
pass
|
@auto_docstring
class Multimodal2VisionPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 4
| 0
| 3
| 1
| 2
| 0.56
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 1
| 16
| 2
| 9
| 7
| 7
| 5
| 9
| 7
| 7
| 2
| 1
| 1
| 2
|
34
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_multimodal2.py
|
modeling_multimodal2.Multimodal2VisionTransformer
|
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from typing import Callable, Optional, Union
from ...utils import auto_docstring, can_return_tuple, torch_int
from torch import nn
class Multimodal2VisionTransformer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
embed_dim = config.hidden_size
self.embeddings = Multimodal2VisionEmbeddings(config)
self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.encoder = Multimodal2VisionEncoder(config)
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False) -> BaseModelOutputWithPooling:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
hidden_states = self.pre_layrnorm(hidden_states)
encoder_outputs: BaseModelOutput = self.encoder(inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states)
last_hidden_state = encoder_outputs.last_hidden_state
pooled_output = last_hidden_state[:, 0, :]
pooled_output = self.post_layernorm(pooled_output)
return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
class Multimodal2VisionTransformer(nn.Module):
def __init__(self, config):
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False) -> BaseModelOutputWithPooling:
pass
| 4
| 0
| 27
| 4
| 21
| 2
| 4
| 0.07
| 1
| 5
| 2
| 0
| 2
| 5
| 2
| 12
| 57
| 9
| 45
| 21
| 33
| 3
| 24
| 13
| 21
| 6
| 1
| 1
| 7
|
35
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_my_new_model2.py
|
modeling_my_new_model2.MyNewModel2Attention
|
from .configuration_my_new_model2 import MyNewModel2Config
from ...processing_utils import Unpack
from typing import Callable, Optional
from ...utils.deprecation import deprecate_kwarg
from ...cache_utils import Cache
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from torch import nn
from ...utils import TransformersKwargs, auto_docstring
import torch
class MyNewModel2Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: MyNewModel2Config, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim ** (-0.5)
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class MyNewModel2Attention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config: MyNewModel2Config, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 4
| 1
| 35
| 4
| 31
| 1
| 3
| 0.03
| 1
| 4
| 1
| 0
| 2
| 11
| 2
| 12
| 74
| 9
| 63
| 31
| 52
| 2
| 34
| 23
| 31
| 5
| 1
| 2
| 6
|
36
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_my_new_model2.py
|
modeling_my_new_model2.MyNewModel2DecoderLayer
|
from ...modeling_layers import GenericForSequenceClassification, GradientCheckpointingLayer
from ...cache_utils import Cache
from .configuration_my_new_model2 import MyNewModel2Config
from ...utils.deprecation import deprecate_kwarg
from ...utils import TransformersKwargs, auto_docstring
import torch
from typing import Callable, Optional
from ...processing_utils import Unpack
class MyNewModel2DecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: MyNewModel2Config, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = MyNewModel2Attention(config=config, layer_idx=layer_idx)
self.mlp = MyNewModel2MLP(config)
self.input_layernorm = MyNewModel2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = MyNewModel2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
class MyNewModel2DecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: MyNewModel2Config, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
pass
| 4
| 0
| 25
| 4
| 21
| 2
| 2
| 0.07
| 1
| 8
| 4
| 0
| 2
| 5
| 2
| 12
| 52
| 8
| 42
| 22
| 28
| 3
| 21
| 11
| 18
| 2
| 1
| 1
| 3
|
37
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_my_new_model2.py
|
modeling_my_new_model2.MyNewModel2ForSequenceClassification
|
from ...modeling_layers import GenericForSequenceClassification, GradientCheckpointingLayer
class MyNewModel2ForSequenceClassification(GenericForSequenceClassification, MyNewModel2PreTrainedModel):
pass
|
class MyNewModel2ForSequenceClassification(GenericForSequenceClassification, MyNewModel2PreTrainedModel):
pass
| 1
| 0
| 21
| 2
| 17
| 2
| 3
| 0.12
| 1
| 5
| 1
| 0
| 4
| 3
| 4
| 5
| 87
| 11
| 68
| 29
| 50
| 8
| 36
| 16
| 31
| 9
| 2
| 2
| 12
|
38
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_my_new_model2.py
|
modeling_my_new_model2.MyNewModel2MLP
|
from torch import nn
from ...activations import ACT2FN
class MyNewModel2MLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
|
class MyNewModel2MLP(nn.Module):
def __init__(self, config):
pass
def forward(self, x):
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 7
| 2
| 12
| 14
| 1
| 13
| 11
| 10
| 0
| 13
| 11
| 10
| 1
| 1
| 0
| 2
|
39
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_my_new_model2.py
|
modeling_my_new_model2.MyNewModel2PreTrainedModel
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from .configuration_my_new_model2 import MyNewModel2Config
from ...utils import TransformersKwargs, auto_docstring
@auto_docstring
class MyNewModel2PreTrainedModel(PreTrainedModel):
config: MyNewModel2Config
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['MyNewModel2DecoderLayer']
_skip_keys_device_placement = ['past_key_values']
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': MyNewModel2DecoderLayer, 'attentions': MyNewModel2Attention}
|
@auto_docstring
class MyNewModel2PreTrainedModel(PreTrainedModel):
pass
| 2
| 0
| 10
| 0
| 10
| 0
| 5
| 0
| 1
| 0
| 0
| 2
| 1
| 0
| 1
| 1
| 23
| 1
| 22
| 14
| 20
| 0
| 21
| 14
| 19
| 5
| 1
| 2
| 5
|
40
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_my_new_model2.py
|
modeling_my_new_model2.MyNewModel2RMSNorm
|
import torch
from torch import nn
class MyNewModel2RMSNorm(nn.Module):
def __init__(self, dim: int, eps: float=1e-06):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.zeros(dim))
def _norm(self, x):
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
def forward(self, x):
output = self._norm(x.float())
output = output * (1.0 + self.weight.float())
return output.type_as(x)
def extra_repr(self):
return f'{tuple(self.weight.shape)}, eps={self.eps}'
|
class MyNewModel2RMSNorm(nn.Module):
def __init__(self, dim: int, eps: float=1e-06):
pass
def _norm(self, x):
pass
def forward(self, x):
pass
def extra_repr(self):
pass
| 5
| 0
| 4
| 0
| 3
| 1
| 1
| 0.15
| 1
| 4
| 0
| 0
| 4
| 2
| 4
| 14
| 18
| 3
| 13
| 8
| 8
| 2
| 13
| 8
| 8
| 1
| 1
| 0
| 4
|
41
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_new_task_model.py
|
modeling_new_task_model.NewTaskModelCausalLMOutputWithPast
|
import torch
from dataclasses import dataclass
from ...cache_utils import Cache, StaticCache
from typing import ClassVar, Optional, Union
from ...utils import ModelOutput, auto_docstring, can_return_tuple
@dataclass
@auto_docstring(custom_intro='\n Base class for NewTaskModel causal language model (or autoregressive) outputs.\n ')
class NewTaskModelCausalLMOutputWithPast(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.text_config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder after projecting last hidden state.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Union[list[torch.FloatTensor], Cache]] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[torch.FloatTensor] = None
|
@dataclass
@auto_docstring(custom_intro='\n Base class for NewTaskModel causal language model (or autoregressive) outputs.\n ')
class NewTaskModelCausalLMOutputWithPast(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.text_config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder after projecting last hidden state.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.57
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 37
| 5
| 7
| 7
| 6
| 25
| 7
| 7
| 6
| 0
| 1
| 0
| 0
|
42
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_new_task_model.py
|
modeling_new_task_model.NewTaskModelForNewTask
|
from ...utils import ModelOutput, auto_docstring, can_return_tuple
from ...cache_utils import Cache, StaticCache
from ...generation import GenerationMixin
from torch import nn
from typing import ClassVar, Optional, Union
import torch
@auto_docstring(custom_intro='\n The Base NewTaskModel model which consists of a vision backbone and a language model without language modeling head.,\n ')
class NewTaskModelForNewTask(NewTaskModelPreTrainedModel, GenerationMixin):
_checkpoint_conversion_mapping = {'^language_model.model': 'model.language_model', '^vision_tower': 'model.vision_tower', '^multi_modal_projector': 'model.multi_modal_projector', '^language_model.lm_head': 'lm_head'}
_tied_weights_keys = ['lm_head.weight']
main_input_name: ClassVar[str] = 'doc_input_ids'
def __init__(self, config):
super().__init__(config)
self.model = NewTaskModelModel(config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.embedding_dim = self.config.embedding_dim
self.custom_text_proj = nn.Linear(self.config.text_config.hidden_size, self.embedding_dim)
if self.language_model._tied_weights_keys is not None:
self._tied_weights_keys = [f'model.language_model.{k}' for k in self.language_model._tied_weights_keys]
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def set_decoder(self, decoder):
self.model.set_decoder(decoder)
def get_decoder(self):
return self.model.get_decoder()
def get_image_features(self, pixel_values):
return self.model.get_image_features(pixel_values)
@property
def language_model(self):
return self.model.language_model
@property
def vision_tower(self):
return self.model.vision_tower
@property
def multi_modal_projector(self):
return self.model.multi_modal_projector
@can_return_tuple
@auto_docstring
def forward(self, input_ids: torch.LongTensor=None, pixel_values: torch.FloatTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[list[torch.FloatTensor], Cache]]=None, token_type_ids: Optional[torch.LongTensor]=None, cache_position: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, num_logits_to_keep: int=0) -> Union[tuple, NewTaskModelCausalLMOutputWithPast]:
"""
Returns:
"""
vlm_outputs = super().forward(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, token_type_ids=token_type_ids, cache_position=cache_position, inputs_embeds=inputs_embeds, labels=labels, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=True, return_dict=True, num_logits_to_keep=num_logits_to_keep)
last_hidden_states = vlm_outputs.hidden_states[-1]
proj = self.custom_text_proj(last_hidden_states)
embeddings = proj / proj.norm(dim=-1, keepdim=True)
if attention_mask is not None:
embeddings = embeddings * attention_mask.unsqueeze(-1)
return (embeddings,) + vlm_outputs
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, cache_position=None, position_ids=None, pixel_values=None, attention_mask=None, token_type_ids=None, use_cache=True, logits_to_keep=None, labels=None, **kwargs):
model_inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, cache_position=cache_position, use_cache=use_cache, logits_to_keep=logits_to_keep, token_type_ids=token_type_ids, **kwargs)
if model_inputs.get('position_ids') is not None:
model_inputs['position_ids'] += 1
if cache_position[0] == 0:
model_inputs['pixel_values'] = pixel_values
is_training = token_type_ids is not None and labels is not None
is_static_hybrid_cache = isinstance(past_key_values, StaticCache) and any(past_key_values.is_sliding)
if cache_position[0] == 0 and is_static_hybrid_cache:
input_tensor = inputs_embeds if inputs_embeds is not None else input_ids
causal_mask = self.model._update_causal_mask(attention_mask, token_type_ids, past_key_values, cache_position, input_tensor, is_training)
model_inputs['attention_mask'] = causal_mask
return model_inputs
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone()
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)
return causal_mask
def resize_token_embeddings(self, new_num_tokens: Optional[int]=None, pad_to_multiple_of=None, mean_resizing=True) -> nn.Embedding:
model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)
self.config.text_config.vocab_size = model_embeds.num_embeddings
self.config.vocab_size = model_embeds.num_embeddings
self.vocab_size = model_embeds.num_embeddings
return model_embeds
|
@auto_docstring(custom_intro='\n The Base NewTaskModel model which consists of a vision backbone and a language model without language modeling head.,\n ')
class NewTaskModelForNewTask(NewTaskModelPreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def set_decoder(self, decoder):
pass
def get_decoder(self):
pass
def get_image_features(self, pixel_values):
pass
@property
def language_model(self):
pass
@property
def vision_tower(self):
pass
@property
def multi_modal_projector(self):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: torch.LongTensor=None, pixel_values: torch.FloatTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[list[torch.FloatTensor], Cache]]=None, token_type_ids: Optional[torch.LongTensor]=None, cache_position: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, num_logits_to_keep: int=0) -> Union[tuple, NewTaskModelCausalLMOutputWithPast]:
'''
Returns:
'''
pass
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, cache_position=None, position_ids=None, pixel_values=None, attention_mask=None, token_type_ids=None, use_cache=True, logits_to_keep=None, labels=None, **kwargs):
pass
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):
'''
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
'''
pass
def resize_token_embeddings(self, new_num_tokens: Optional[int]=None, pad_to_multiple_of=None, mean_resizing=True) -> nn.Embedding:
pass
| 21
| 2
| 20
| 2
| 14
| 4
| 3
| 0.3
| 2
| 6
| 2
| 0
| 12
| 9
| 12
| 13
| 256
| 36
| 174
| 85
| 118
| 52
| 90
| 42
| 77
| 13
| 2
| 2
| 30
|
43
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_new_task_model.py
|
modeling_new_task_model.NewTaskModelMultiModalProjector
|
from .configuration_new_task_model import NewTaskModelConfig
from torch import nn
class NewTaskModelMultiModalProjector(nn.Module):
def __init__(self, config: NewTaskModelConfig):
super().__init__()
self.linear = nn.Linear(config.vision_config.hidden_size, config.vision_config.projection_dim, bias=True)
def forward(self, image_features):
hidden_states = self.linear(image_features)
return hidden_states
|
class NewTaskModelMultiModalProjector(nn.Module):
def __init__(self, config: NewTaskModelConfig):
pass
def forward(self, image_features):
pass
| 3
| 0
| 4
| 1
| 3
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 9
| 2
| 7
| 5
| 4
| 0
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
44
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_new_task_model.py
|
modeling_new_task_model.NewTaskModelPreTrainedModel
|
from .configuration_new_task_model import NewTaskModelConfig
from ...utils import ModelOutput, auto_docstring, can_return_tuple
from torch import nn
from ...modeling_utils import PreTrainedModel
@auto_docstring
class NewTaskModelPreTrainedModel(PreTrainedModel):
config: NewTaskModelConfig
base_model_prefix = ''
supports_gradient_checkpointing = True
_no_split_modules = ['NewTaskModelMultiModalProjector']
_skip_keys_device_placement = 'past_key_values'
_can_compile_fullgraph = False
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
def _init_weights(self, module):
std = getattr(self.config, 'initializer_range', self.config.get_text_config().initializer_range)
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
|
@auto_docstring
class NewTaskModelPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
| 3
| 0
| 20
| 2
| 16
| 2
| 7
| 0.07
| 1
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 32
| 3
| 27
| 13
| 25
| 2
| 22
| 13
| 20
| 7
| 1
| 2
| 7
|
45
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_roberta.py
|
modeling_roberta.RobertaAttention
|
import torch.nn as nn
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
import torch
from typing import Optional, Union
from ...utils.deprecation import deprecate_kwarg
class RobertaAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, layer_idx=None):
super().__init__()
self.self = ROBERTA_SELF_ATTENTION_CLASSES[config._attn_implementation](config, position_embedding_type=position_embedding_type, layer_idx=layer_idx)
self.output = RobertaSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class RobertaAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, layer_idx=None):
pass
def prune_heads(self, heads):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
pass
| 5
| 0
| 15
| 1
| 14
| 1
| 1
| 0.07
| 1
| 5
| 1
| 0
| 3
| 3
| 3
| 13
| 49
| 4
| 43
| 20
| 30
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
46
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_roberta.py
|
modeling_roberta.RobertaEmbeddings
|
import torch.nn as nn
from typing import Optional, Union
import torch
class RobertaEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, config.pad_token_id)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False)
self.pad_token_id = config.pad_token_id
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor:
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length]
if token_type_ids is None:
if hasattr(self, 'token_type_ids'):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == 'absolute':
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
class RobertaEmbeddings(nn.Module):
'''Construct the embeddings from word, position and token_type embeddings.'''
def __init__(self, config):
pass
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor:
pass
| 3
| 1
| 31
| 3
| 25
| 3
| 4
| 0.14
| 1
| 3
| 0
| 0
| 2
| 7
| 2
| 12
| 65
| 8
| 50
| 24
| 40
| 7
| 35
| 17
| 32
| 7
| 1
| 2
| 8
|
47
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_roberta.py
|
modeling_roberta.RobertaEncoder
|
from typing import Optional, Union
import torch.nn as nn
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions
import torch
class RobertaEncoder(nn.Module):
def __init__(self, config, layer_idx=None):
super().__init__()
self.config = config
self.layer = nn.ModuleList([RobertaLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[tuple[tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
use_cache = False
if use_cache and self.config.is_decoder and (past_key_values is None):
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if use_cache and self.config.is_decoder and isinstance(past_key_values, tuple):
logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None))
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions)
|
class RobertaEncoder(nn.Module):
def __init__(self, config, layer_idx=None):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[tuple[tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
pass
| 3
| 0
| 45
| 4
| 41
| 0
| 9
| 0
| 1
| 7
| 1
| 0
| 2
| 3
| 2
| 12
| 91
| 8
| 83
| 26
| 68
| 0
| 35
| 14
| 32
| 17
| 1
| 3
| 18
|
48
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_roberta.py
|
modeling_roberta.RobertaIntermediate
|
from ...activations import ACT2FN
import torch.nn as nn
import torch
class RobertaIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class RobertaIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
49
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_roberta.py
|
modeling_roberta.RobertaLayer
|
from typing import Optional, Union
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils.deprecation import deprecate_kwarg
from ...modeling_layers import GradientCheckpointingLayer
import torch.nn as nn
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
import torch
class RobertaLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = RobertaAttention(config, layer_idx=layer_idx)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f'{self} should be used as a decoder model if cross attention is added')
self.crossattention = RobertaAttention(config, position_embedding_type='absolute', layer_idx=layer_idx)
self.intermediate = RobertaIntermediate(config)
self.output = RobertaOutput(config)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
self_attention_outputs = self.attention(hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, past_key_values=past_key_values, cache_position=cache_position)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, 'crossattention'):
raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`')
cross_attention_outputs = self.crossattention(attention_output, attention_mask=encoder_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:]
layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
|
class RobertaLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
pass
def feed_forward_chunk(self, attention_output):
pass
| 5
| 0
| 27
| 2
| 23
| 2
| 4
| 0.1
| 1
| 7
| 3
| 0
| 3
| 8
| 3
| 13
| 84
| 9
| 70
| 32
| 57
| 7
| 41
| 23
| 37
| 7
| 1
| 2
| 11
|
50
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_roberta.py
|
modeling_roberta.RobertaModel
|
import torch
import torch.nn as nn
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask_for_sdpa
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions
from ...utils import auto_docstring, logging
@auto_docstring(custom_intro='\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in [Attention is\n all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,\n Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set\n to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and\n `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.\n ')
class RobertaModel(RobertaPreTrainedModel):
_no_split_modules = ['RobertaEmbeddings', 'RobertaLayer']
def __init__(self, config, add_pooling_layer=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = RobertaEmbeddings(config)
self.encoder = RobertaEncoder(config)
self.pooler = RobertaPooler(config) if add_pooling_layer else None
self.attn_implementation = config._attn_implementation
self.position_embedding_type = config.position_embedding_type
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[list[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
past_key_values_length = 0
if past_key_values is not None:
past_key_values_length = past_key_values[0][0].shape[-2] if not isinstance(past_key_values, Cache) else past_key_values.get_seq_length()
if token_type_ids is None:
if hasattr(self.embeddings, 'token_type_ids'):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length)
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device)
use_sdpa_attention_masks = self.attn_implementation == 'sdpa' and self.position_embedding_type == 'absolute' and (head_mask is None) and (not output_attentions)
if use_sdpa_attention_masks and attention_mask.dim() == 2:
if self.config.is_decoder:
extended_attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(attention_mask, input_shape, embedding_output, past_key_values_length)
else:
extended_attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, embedding_output.dtype, tgt_len=seq_length)
else:
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
if use_sdpa_attention_masks and encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, embedding_output.dtype, tgt_len=seq_length)
else:
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions)
| null | 8
| 2
| 37
| 4
| 25
| 8
| 5
| 0.35
| 1
| 7
| 3
| 0
| 5
| 6
| 5
| 6
| 211
| 29
| 135
| 45
| 108
| 47
| 65
| 29
| 59
| 21
| 2
| 2
| 27
|
51
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_roberta.py
|
modeling_roberta.RobertaPooler
|
import torch
import torch.nn as nn
class RobertaPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
class RobertaPooler(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.2
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 10
| 7
| 7
| 2
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
52
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_roberta.py
|
modeling_roberta.RobertaPreTrainedModel
|
import torch.nn as nn
from ...modeling_utils import PreTrainedModel
from .configuration_roberta import RobertaConfig
from ...utils import auto_docstring, logging
@auto_docstring
class RobertaPreTrainedModel(PreTrainedModel):
config: RobertaConfig
base_model_prefix = 'roberta'
supports_gradient_checkpointing = True
_supports_sdpa = True
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, RobertaLMPredictionHead):
module.bias.data.zero_()
|
@auto_docstring
class RobertaPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 15
| 0
| 12
| 3
| 6
| 0.39
| 1
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 27
| 2
| 18
| 7
| 16
| 7
| 16
| 7
| 14
| 6
| 1
| 2
| 6
|
53
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_roberta.py
|
modeling_roberta.RobertaSdpaSelfAttention
|
from typing import Optional, Union
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
import torch
import torch.nn as nn
from ...utils.deprecation import deprecate_kwarg
class RobertaSdpaSelfAttention(RobertaSelfAttention):
def __init__(self, config, position_embedding_type=None, layer_idx=None):
super().__init__(config, position_embedding_type=position_embedding_type, layer_idx=layer_idx)
self.dropout_prob = config.attention_probs_dropout_prob
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
if self.position_embedding_type != 'absolute' or output_attentions or head_mask is not None:
logger.warning_once('RobertaSdpaSelfAttention is used but `torch.nn.functional.scaled_dot_product_attention` does not support non-absolute `position_embedding_type` or `output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.')
return super().forward(hidden_states, attention_mask, head_mask, encoder_hidden_states, past_key_values, output_attentions, cache_position)
bsz, tgt_len, _ = hidden_states.size()
query_layer = self.query(hidden_states).view(bsz, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
is_updated = False
is_cross_attention = encoder_hidden_states is not None
current_states = encoder_hidden_states if is_cross_attention else hidden_states
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
curr_past_key_value = past_key_values.cross_attention_cache
else:
curr_past_key_value = past_key_values.self_attention_cache
else:
curr_past_key_value = past_key_values
current_states = encoder_hidden_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
key_layer = curr_past_key_value.layers[self.layer_idx].keys
value_layer = curr_past_key_value.layers[self.layer_idx].values
else:
key_layer = self.key(current_states).view(bsz, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = self.value(current_states).view(bsz, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
if past_key_values is not None:
cache_position = cache_position if not is_cross_attention else None
key_layer, value_layer = curr_past_key_value.update(key_layer, value_layer, self.layer_idx, {'cache_position': cache_position})
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
is_causal = self.is_decoder and (not is_cross_attention) and (attention_mask is None) and (tgt_len > 1)
attn_output = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, attn_mask=attention_mask, dropout_p=self.dropout_prob if self.training else 0.0, is_causal=is_causal)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, self.all_head_size)
return (attn_output, None)
|
class RobertaSdpaSelfAttention(RobertaSelfAttention):
def __init__(self, config, position_embedding_type=None, layer_idx=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 48
| 6
| 34
| 9
| 6
| 0.28
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 15
| 99
| 12
| 68
| 22
| 56
| 19
| 35
| 13
| 32
| 11
| 2
| 2
| 12
|
54
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_roberta.py
|
modeling_roberta.RobertaSelfAttention
|
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
import math
import torch.nn as nn
from ...utils.deprecation import deprecate_kwarg
import torch
class RobertaSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, layer_idx=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute')
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
self.layer_idx = layer_idx
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
batch_size, seq_length, _ = hidden_states.shape
query_layer = self.query(hidden_states)
query_layer = query_layer.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
is_updated = False
is_cross_attention = encoder_hidden_states is not None
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
curr_past_key_value = past_key_values.cross_attention_cache
else:
curr_past_key_value = past_key_values.self_attention_cache
else:
curr_past_key_value = past_key_values
current_states = encoder_hidden_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
key_layer = curr_past_key_value.layers[self.layer_idx].keys
value_layer = curr_past_key_value.layers[self.layer_idx].values
else:
key_layer = self.key(current_states)
key_layer = key_layer.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = self.value(current_states)
value_layer = value_layer.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
if past_key_values is not None:
cache_position = cache_position if not is_cross_attention else None
key_layer, value_layer = curr_past_key_value.update(key_layer, value_layer, self.layer_idx, {'cache_position': cache_position})
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
query_length, key_length = (query_layer.shape[2], key_layer.shape[2])
if past_key_values is not None:
position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(-1, 1)
else:
position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype)
if self.position_embedding_type == 'relative_key':
relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == 'relative_key_query':
relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
return (context_layer, attention_probs)
|
class RobertaSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, layer_idx=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 43
| 7
| 31
| 6
| 6
| 0.19
| 1
| 5
| 0
| 1
| 3
| 11
| 3
| 13
| 132
| 22
| 93
| 44
| 80
| 18
| 72
| 35
| 68
| 13
| 1
| 2
| 17
|
55
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_super.py
|
modeling_super.SuperAttention
|
from typing import Callable, Optional, Union
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring
import torch
from ...utils.deprecation import deprecate_kwarg
from torch import nn
from ...cache_utils import Cache
from .configuration_super import SuperConfig
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
class SuperAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: SuperConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim ** (-0.5)
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class SuperAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config: SuperConfig, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 4
| 1
| 35
| 4
| 31
| 1
| 3
| 0.03
| 1
| 3
| 0
| 0
| 2
| 11
| 2
| 12
| 74
| 9
| 63
| 31
| 52
| 2
| 34
| 23
| 31
| 5
| 1
| 2
| 6
|
56
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_super.py
|
modeling_super.SuperDecoderLayer
|
from ...modeling_layers import GradientCheckpointingLayer
from ...cache_utils import Cache
from ...utils import TransformersKwargs, auto_docstring
import torch
from typing import Callable, Optional, Union
from .configuration_super import SuperConfig
from ...processing_utils import Unpack
from ...utils.deprecation import deprecate_kwarg
class SuperDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: SuperConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = SuperAttention(config=config, layer_idx=layer_idx)
self.mlp = SuperMLP(config)
self.input_layernorm = SuperRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = SuperRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
class SuperDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: SuperConfig, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
pass
| 4
| 0
| 25
| 4
| 21
| 2
| 2
| 0.07
| 1
| 7
| 3
| 0
| 2
| 5
| 2
| 12
| 52
| 8
| 42
| 22
| 28
| 3
| 21
| 11
| 18
| 2
| 1
| 1
| 3
|
57
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_super.py
|
modeling_super.SuperMLP
|
from torch import nn
from ...activations import ACT2FN
class SuperMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
|
class SuperMLP(nn.Module):
def __init__(self, config):
pass
def forward(self, x):
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 7
| 2
| 12
| 14
| 1
| 13
| 11
| 10
| 0
| 13
| 11
| 10
| 1
| 1
| 0
| 2
|
58
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_super.py
|
modeling_super.SuperModel
|
from torch import nn
from ...cache_utils import Cache
from typing import Callable, Optional, Union
from transformers.modeling_outputs import CausalLMOutputWithPast
from ...utils.generic import check_model_inputs
from ...utils import TransformersKwargs, auto_docstring
from .configuration_super import SuperConfig
import torch
@auto_docstring
class SuperModel(SuperPreTrainedModel):
def __init__(self, config: SuperConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList([SuperDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
self.norm = SuperRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = SuperRotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.post_init()
@check_model_inputs
@auto_docstring
def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, CausalLMOutputWithPast]:
out = super().forward(input_ids, attention_mask, position_ids, past_key_values, inputs_embeds, use_cache, output_attentions, output_hidden_states, return_dict, cache_position)
out.logits *= 2 ** 4
return out
|
@auto_docstring
class SuperModel(SuperPreTrainedModel):
def __init__(self, config: SuperConfig):
pass
@check_model_inputs
@auto_docstring
def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, CausalLMOutputWithPast]:
pass
| 6
| 0
| 27
| 2
| 21
| 5
| 3
| 0.29
| 1
| 8
| 3
| 0
| 5
| 7
| 6
| 7
| 179
| 17
| 126
| 56
| 89
| 37
| 55
| 26
| 48
| 9
| 2
| 2
| 17
|
59
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_super.py
|
modeling_super.SuperPreTrainedModel
|
from ...utils import TransformersKwargs, auto_docstring
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from .configuration_super import SuperConfig
@auto_docstring
class SuperPreTrainedModel(PreTrainedModel):
config: SuperConfig
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['SuperDecoderLayer']
_skip_keys_device_placement = ['past_key_values']
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': SuperDecoderLayer, 'attentions': SuperAttention}
|
@auto_docstring
class SuperPreTrainedModel(PreTrainedModel):
pass
| 2
| 0
| 10
| 0
| 10
| 0
| 5
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 23
| 1
| 22
| 14
| 20
| 0
| 21
| 14
| 19
| 5
| 1
| 2
| 5
|
60
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_super.py
|
modeling_super.SuperRMSNorm
|
from ...integrations import use_kernel_forward_from_hub
import torch
from torch import nn
@use_kernel_forward_from_hub('RMSNorm')
class SuperRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
"""
SuperRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}'
|
@use_kernel_forward_from_hub('RMSNorm')
class SuperRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
'''
SuperRMSNorm is equivalent to T5LayerNorm
'''
pass
def forward(self, hidden_states):
pass
def extra_repr(self):
pass
| 5
| 1
| 5
| 0
| 4
| 1
| 1
| 0.23
| 1
| 2
| 0
| 0
| 3
| 2
| 3
| 13
| 18
| 2
| 13
| 8
| 9
| 3
| 13
| 8
| 9
| 1
| 1
| 0
| 3
|
61
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_super.py
|
modeling_super.SuperRotaryEmbedding
|
from .configuration_super import SuperConfig
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
import torch
from torch import nn
class SuperRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: SuperConfig, device=None):
super().__init__()
if hasattr(config, 'rope_scaling') and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type'))
else:
self.rope_type = 'default'
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer('inv_freq', inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu'
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
|
class SuperRotaryEmbedding(nn.Module):
def __init__(self, config: SuperConfig, device=None):
pass
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
pass
| 5
| 0
| 18
| 2
| 13
| 5
| 3
| 0.35
| 1
| 3
| 0
| 0
| 3
| 7
| 3
| 13
| 59
| 8
| 40
| 21
| 35
| 14
| 38
| 20
| 34
| 3
| 1
| 1
| 8
|
62
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modeling_switch_function.py
|
modeling_switch_function.SwitchFunctionAttention
|
from ...utils.deprecation import deprecate_kwarg
from ...processing_utils import Unpack
from ...utils import TransformersKwargs
import torch
from .configuration_switch_function import SwitchFunctionConfig
from ...cache_utils import Cache
from torch import nn
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from typing import Callable, Optional
class SwitchFunctionAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: SwitchFunctionConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim ** (-0.5)
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class SwitchFunctionAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config: SwitchFunctionConfig, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 4
| 1
| 35
| 4
| 31
| 1
| 3
| 0.03
| 1
| 3
| 0
| 0
| 2
| 11
| 2
| 12
| 74
| 9
| 63
| 31
| 52
| 2
| 34
| 23
| 31
| 5
| 1
| 2
| 6
|
63
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/utils/models_to_deprecate.py
|
models_to_deprecate.HubModelLister
|
class HubModelLister:
"""
Utility for getting models from the hub based on tags. Handles errors without crashing the script.
"""
def __init__(self, tags):
self.tags = tags
self.model_list = api.list_models(tags=tags)
def __iter__(self):
try:
yield from self.model_list
except Exception as e:
print(f'Error: {e}')
return
|
class HubModelLister:
'''
Utility for getting models from the hub based on tags. Handles errors without crashing the script.
'''
def __init__(self, tags):
pass
def __iter__(self):
pass
| 3
| 1
| 5
| 0
| 5
| 0
| 2
| 0.3
| 0
| 1
| 0
| 0
| 2
| 2
| 2
| 2
| 15
| 2
| 10
| 6
| 7
| 3
| 10
| 5
| 7
| 2
| 0
| 1
| 3
|
64
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modular_add_function.py
|
modular_add_function.TestAttention
|
from transformers.models.zamba.modeling_zamba import ZambaAttention
from transformers.models.llama.modeling_llama import apply_rotary_pos_emb
class TestAttention(ZambaAttention):
def __init__(self):
pass
def forward(self):
_ = apply_rotary_pos_emb(1, 1, 1, 1)
|
class TestAttention(ZambaAttention):
def __init__(self):
pass
def forward(self):
pass
| 3
| 0
| 2
| 0
| 2
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 2
| 0
| 2
| 14
| 6
| 1
| 5
| 4
| 2
| 0
| 5
| 4
| 2
| 1
| 2
| 0
| 2
|
65
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modular_dummy_bert.py
|
modular_dummy_bert.DummyBertModel
|
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions
import torch
from transformers.models.bert.modeling_bert import BertModel
class DummyBertModel(BertModel):
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[list[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
return super().forward(input_ids)
|
class DummyBertModel(BertModel):
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[list[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
pass
| 2
| 0
| 17
| 0
| 17
| 0
| 1
| 0
| 1
| 3
| 0
| 0
| 1
| 0
| 1
| 7
| 18
| 0
| 18
| 17
| 1
| 0
| 3
| 2
| 1
| 1
| 3
| 0
| 1
|
66
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modular_from_uppercase_model.py
|
modular_from_uppercase_model.FromUppercaseModelEncoderLayer
|
from transformers.models.clip.modeling_clip import CLIPEncoderLayer
class FromUppercaseModelEncoderLayer(CLIPEncoderLayer):
pass
|
class FromUppercaseModelEncoderLayer(CLIPEncoderLayer):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
67
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/utils/modular_model_converter.py
|
modular_model_converter.ClassDependencyMapper
|
from libcst import ClassDef, CSTVisitor
from typing import Optional, Union
class ClassDependencyMapper(CSTVisitor):
"""A visitor which is designed to analyze a single class node to get all its dependencies that are shared with the set of
`global_names`.
"""
def __init__(self, class_name: str, global_names: set[str], objects_imported_from_modeling: Optional[set[str]]=None):
super().__init__()
self.class_name = class_name
self.dependencies = set()
self.global_names = global_names
self.objects_imported_from_modeling = set() if objects_imported_from_modeling is None else objects_imported_from_modeling
def visit_Name(self, node):
if node.value != self.class_name and node.value in self.global_names and (node.value not in self.objects_imported_from_modeling):
self.dependencies.add(node.value)
|
class ClassDependencyMapper(CSTVisitor):
'''A visitor which is designed to analyze a single class node to get all its dependencies that are shared with the set of
`global_names`.
'''
def __init__(self, class_name: str, global_names: set[str], objects_imported_from_modeling: Optional[set[str]]=None):
pass
def visit_Name(self, node):
pass
| 3
| 1
| 9
| 0
| 9
| 0
| 2
| 0.17
| 1
| 3
| 0
| 0
| 2
| 4
| 2
| 2
| 23
| 2
| 18
| 9
| 13
| 3
| 10
| 7
| 7
| 2
| 1
| 1
| 4
|
68
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/utils/modular_model_converter.py
|
modular_model_converter.ModelFileMapper
|
from libcst.metadata import MetadataWrapper, ParentNodeProvider, PositionProvider, ScopeProvider
import libcst as cst
import re
class ModelFileMapper(ModuleMapper):
"""A mapper designed to parse modeling files (like `modeling_llama.py`). When encountering such a file
in the `modular_xxx.py` file, we need to correctly visit it and merge the dependencies of the modular and current file.
For this reason, this class should only be instantiated from the class method `visit_and_merge_dependencies`, which takes
care of correctly merging dependencies, then finalizes all dependency graph computations.
Note that we only merge functions and assignments here, as classes will be treated later on as they may be modified.
For example, if you redefine `apply_rotary_pos_emb()` in the modular, the new node should be used in the dependencies
of the modeling files as well.
"""
def __init__(self, python_module: cst.Module):
super().__init__(python_module)
def compute_relative_order(self, missing_dependencies: set[str]) -> dict[str, int]:
"""Compute in which relative order the `missing_dependencies` should appear when the nodes are added to the final file that
will be created based on the modular.
"""
relative_order = {}
idx = 0
classes = sorted([dep for dep in tuple(missing_dependencies) if dep in self.classes], key=lambda x: self.start_lines[x])
if len(classes) > 0 and (not hasattr(self, 'class_dependency_mapping')):
raise ValueError('Cannot correctly find the relative order of the dependencies.')
remaining_dependencies = missing_dependencies.copy()
for class_name in classes:
class_dependencies = tuple(self.class_dependency_mapping[class_name] & remaining_dependencies)
original_dependencies = []
merged_dependencies = []
for class_dep in class_dependencies:
if class_dep in self.start_lines:
original_dependencies.append(class_dep)
else:
merged_dependencies.append(class_dep)
original_dependencies = sorted(original_dependencies, reverse=True)
original_dependencies = sorted(original_dependencies, key=lambda x: self.start_lines.get(x, 10000000000.0))
merged_dependencies = sorted(merged_dependencies, key=lambda x: self.modular_file_start_lines[x])
for dep in original_dependencies + merged_dependencies:
remaining_dependencies.remove(dep)
relative_order[dep] = idx
idx += 1
if class_name in remaining_dependencies:
remaining_dependencies.remove(class_name)
relative_order[class_name] = idx
idx += 1
remaining_dependencies = tuple(remaining_dependencies)
original_dependencies = []
merged_dependencies = []
for dep in remaining_dependencies:
if dep in self.modular_file_start_lines:
merged_dependencies.append(dep)
else:
original_dependencies.append(dep)
original_dependencies = sorted(original_dependencies, reverse=True)
original_dependencies = sorted(original_dependencies, key=lambda x: self.start_lines.get(x, 10000000000.0))
merged_dependencies = sorted(merged_dependencies, key=lambda x: self.modular_file_start_lines[x])
for dep in original_dependencies + merged_dependencies:
relative_order[dep] = idx
idx += 1
return relative_order
def _merge_functions(self, functions: dict[str, cst.CSTNode], object_mapping: dict[str, set]):
"""Update the global nodes and function dependency mapping with those from the modular file.
Merging rule: if any function with the same name was redefined in the modular, use it and its dependencies
instead of the original ones (this may mean to add new functions as well, if any redefined function uses a new one).
"""
self.functions.update(functions)
self.object_dependency_mapping.update({obj: dep for obj, dep in object_mapping.items() if obj in functions})
self.global_nodes.update(self.functions)
def _merge_assignments(self, assignments: dict[str, cst.CSTNode], object_mapping: dict[str, set]):
"""Update the global nodes with the assignment from the modular file.
Merging rule: if any assignment with the same name was redefined in the modular, we use it and its dependencies ONLY if it matches
a pattern in `ASSIGNMENTS_REGEX_TO_KEEP_IF_NOT_NONE` and its value is not None, or if it matches a pattern in `ASSIGNMENTS_REGEX_TO_KEEP.
Otherwise, we use the original value and dependencies. This rule was chosen to avoid having to rewrite the big docstrings.
"""
for assignment, node in assignments.items():
should_keep = any((re.search(pattern, assignment) for pattern in ASSIGNMENTS_REGEX_TO_KEEP))
should_keep_if_not_none = any((re.search(pattern, assignment) for pattern in ASSIGNMENTS_REGEX_TO_KEEP_IF_NOT_NONE)) and (not (hasattr(node.body[0].value, 'value') and node.body[0].value.value == 'None'))
if should_keep or should_keep_if_not_none or assignment not in self.assignments:
self.assignments[assignment] = node
if assignment in object_mapping:
self.object_dependency_mapping[assignment] = object_mapping[assignment]
self.global_nodes.update(self.assignments)
def _merge_classes(self, classes: dict[str, cst.CSTNode]):
"""Update the global nodes with the new classes from the modular (i.e. classes which do not exist in current file, and
are not imported). We do NOT update any dependency mapping here. This is because we only need the names of newly defined
classes in the modular to be discoverable when computing dependencies for new nodes later on. For this reason, we
do not add the new classes to `self.classes`, but only to `global_nodes`.
"""
self.global_nodes.update({name: node for name, node in classes.items() if name not in self.classes and name not in self.objects_imported_from_modeling})
def merge_modular_dependencies(self, classes, functions, assignments, object_mapping, start_lines):
"""Merge classes, functions and assignments from the modular definitions into the current module file,
then record the relative order of all nodes.
Note: This function takes care of updating `global_nodes` and `object_recursive_dependency_mapping` as well after the
merge with other files dependencies.
"""
self._merge_functions(functions, object_mapping)
self._merge_assignments(assignments, object_mapping)
self._merge_classes(classes)
self.modular_file_start_lines = start_lines
self._restrict_dependencies_to_known_entities()
self.object_recursive_dependency_mapping = self._compute_recursive_object_dependencies()
@classmethod
def visit_and_merge_dependencies(cls, module: cst.Module, classes, functions, assignments, object_mapping, start_lines) -> 'ModelFileMapper':
wrapper = MetadataWrapper(module)
mapper = cls(module)
wrapper.visit(mapper)
mapper.merge_modular_dependencies(classes, functions, assignments, object_mapping, start_lines)
mapper.compute_class_dependencies()
return mapper
|
class ModelFileMapper(ModuleMapper):
'''A mapper designed to parse modeling files (like `modeling_llama.py`). When encountering such a file
in the `modular_xxx.py` file, we need to correctly visit it and merge the dependencies of the modular and current file.
For this reason, this class should only be instantiated from the class method `visit_and_merge_dependencies`, which takes
care of correctly merging dependencies, then finalizes all dependency graph computations.
Note that we only merge functions and assignments here, as classes will be treated later on as they may be modified.
For example, if you redefine `apply_rotary_pos_emb()` in the modular, the new node should be used in the dependencies
of the modeling files as well.
'''
def __init__(self, python_module: cst.Module):
pass
def compute_relative_order(self, missing_dependencies: set[str]) -> dict[str, int]:
'''Compute in which relative order the `missing_dependencies` should appear when the nodes are added to the final file that
will be created based on the modular.
'''
pass
def _merge_functions(self, functions: dict[str, cst.CSTNode], object_mapping: dict[str, set]):
'''Update the global nodes and function dependency mapping with those from the modular file.
Merging rule: if any function with the same name was redefined in the modular, use it and its dependencies
instead of the original ones (this may mean to add new functions as well, if any redefined function uses a new one).
'''
pass
def _merge_assignments(self, assignments: dict[str, cst.CSTNode], object_mapping: dict[str, set]):
'''Update the global nodes with the assignment from the modular file.
Merging rule: if any assignment with the same name was redefined in the modular, we use it and its dependencies ONLY if it matches
a pattern in `ASSIGNMENTS_REGEX_TO_KEEP_IF_NOT_NONE` and its value is not None, or if it matches a pattern in `ASSIGNMENTS_REGEX_TO_KEEP.
Otherwise, we use the original value and dependencies. This rule was chosen to avoid having to rewrite the big docstrings.
'''
pass
def _merge_classes(self, classes: dict[str, cst.CSTNode]):
'''Update the global nodes with the new classes from the modular (i.e. classes which do not exist in current file, and
are not imported). We do NOT update any dependency mapping here. This is because we only need the names of newly defined
classes in the modular to be discoverable when computing dependencies for new nodes later on. For this reason, we
do not add the new classes to `self.classes`, but only to `global_nodes`.
'''
pass
def merge_modular_dependencies(self, classes, functions, assignments, object_mapping, start_lines):
'''Merge classes, functions and assignments from the modular definitions into the current module file,
then record the relative order of all nodes.
Note: This function takes care of updating `global_nodes` and `object_recursive_dependency_mapping` as well after the
merge with other files dependencies.
'''
pass
@classmethod
def visit_and_merge_dependencies(cls, module: cst.Module, classes, functions, assignments, object_mapping, start_lines) -> 'ModelFileMapper':
pass
| 9
| 6
| 20
| 1
| 12
| 7
| 3
| 0.63
| 1
| 7
| 0
| 0
| 6
| 2
| 7
| 42
| 157
| 17
| 86
| 26
| 75
| 54
| 71
| 23
| 63
| 10
| 5
| 3
| 19
|
69
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/utils/modular_model_converter.py
|
modular_model_converter.ModularFileMapper
|
from collections import Counter, defaultdict, deque
import libcst as cst
import re
from libcst import matchers as m
class ModularFileMapper(ModuleMapper):
"""This is a Mapper to visit a modular file (like `modular_llama.py`). It visits the whole file, recording dependency,
then visits all imported modeling files (like `modeling_llama.py`), and manages their mutual dependencies.
Calling the method `create_modules()` after visit will create all modules based on this modular file.
"""
def __init__(self, python_module, new_name):
super().__init__(python_module)
self.model_name = new_name
self.model_specific_imported_objects: dict[str, str] = {}
self.model_specific_modules: dict[str, cst.Module] = {}
self.all_all_to_add = {}
def visit_ImportFrom(self, node: cst.ImportFrom) -> None:
"""When visiting imports from modeling files (i.e. `transformers.models.xxx`) we get the code, parse it,
and save it in `self.model_specific_modules` to later visit. The imported objects are saved in `self.model_specific_imported_objects`.
"""
import_module = self.python_module.code_for_node(node.module)
import_statement = '.' * len(node.relative) + import_module
if any((import_to_skip in import_statement for import_to_skip in IMPORTS_TO_SKIP_IN_MODULAR)):
return
if m.matches(node.module, m.Attribute()):
for imported_ in node.names:
_import = re.search(f'(?:transformers\\.models\\.)|(?:\\.\\.\\.models\\.)|(?:\\.\\.)\\w+\\.({self.match_patterns})_.*', import_statement)
if _import:
source = _import.group(1)
if source == 'modeling' and 'Config' in self.python_module.code_for_node(imported_):
raise ValueError(f'You are importing {self.python_module.code_for_node(imported_)} from the modeling file. Import from the `configuration_xxxx.py` file instead')
if import_module not in self.model_specific_modules:
if 'models' not in import_module:
import_module = 'models.' + import_module
if 'transformers' not in import_module:
import_module = 'transformers.' + import_module
source_code = get_module_source_from_name(import_module)
tree = cst.parse_module(source_code)
self.model_specific_modules[import_module] = tree
imported_object = self.python_module.code_for_node(imported_.name)
self.model_specific_imported_objects[imported_object] = import_module
if m.matches(node.module, m.Name()):
if 'transformers' == import_module:
raise ValueError(f'You are importing from {import_module} directly using global imports. Import from the correct local path')
def visit_SimpleStatementLine(self, node):
"""If we visit an import statement not previously visited, record it. If we visit a module-scope assignment,
simply record it or, if it is `__all__`, split it between files where we should dispatch it.
"""
parent_node = self.get_metadata(cst.metadata.ParentNodeProvider, node)
simple_top_level_assign_structure = m.SimpleStatementLine(body=[m.Assign(targets=[m.AssignTarget(target=m.Name())])])
simple_top_level_variable_indexing = m.SimpleStatementLine(body=[m.Assign(targets=[m.AssignTarget(target=m.Subscript(value=m.Name()) | m.Attribute(value=m.Name()))])])
if m.matches(parent_node, m.Module()):
if m.matches(node, m.SimpleStatementLine(body=[m.Import()])):
self.imports.append(node)
elif m.matches(node, m.SimpleStatementLine(body=[m.ImportFrom()])):
import_module = self.python_module.code_for_node(node.body[0].module)
import_statement = '.' * len(node.body[0].relative) + import_module
if not (re.search(f'(?:transformers\\.models\\.)|(?:\\.\\.)\\w+\\.({self.match_patterns})_.*', import_statement) and (not any((import_to_skip in import_statement for import_to_skip in IMPORTS_TO_SKIP_IN_MODULAR)))):
self.imports.append(node)
elif m.matches(node, simple_top_level_assign_structure):
assigned_variable = node.body[0].targets[0].target.value
if assigned_variable == '__all__':
self.all_all_to_add = split_all_assignment(node, self.model_name)
else:
self.current_assignment = assigned_variable
self.assignments[assigned_variable] = node
elif m.matches(node, simple_top_level_variable_indexing):
indexed_variable = node.body[0].targets[0].target.value.value
self.current_assignment = indexed_variable
node_name = self.python_module.code_for_node(node)
self.assignments[node_name] = node
self.object_dependency_mapping[indexed_variable].add(node_name)
def leave_Module(self, node):
"""When we leave the modular file, we do the following in order:
1. for each modeling file found in the imports, rename it with the new model name, visit it, and update
its dependency graph with the new function and assignment definitions found in the modular
2. update the modular dependency graph with the imported functions and assignments (found when visiting the matching files)
3. compute the nested (recursive) function and assignment dependencies
"""
super().leave_Module(node)
self.visited_modules = {}
self.renamers = {}
name_prefixes = self.infer_new_model_name()
for file, module in self.model_specific_modules.items():
file_model_name = file.split('.')[-2]
new_name = name_prefixes[file]
renamer = ReplaceNameTransformer(file_model_name, new_name, self.model_name)
renamed_module = module.visit(renamer)
self.visited_modules[file] = ModelFileMapper.visit_and_merge_dependencies(renamed_module, self.classes, self.functions, self.assignments, self.object_dependency_mapping, self.start_lines)
self.renamers[file] = renamer
self.merge_model_specific_imports(self.visited_modules)
self.object_recursive_dependency_mapping = self._compute_recursive_object_dependencies()
self.imported_objects_per_file = defaultdict(set)
for file, mapper in self.visited_modules.items():
file_type = re.search(f'^transformers\\.models\\.\\w+\\.({self.match_patterns})_.*', file).group(1)
self.imported_objects_per_file[file_type].update(mapper.objects_imported_from_modeling)
def merge_model_specific_imports(self, visited_modules):
"""Merge the functions and assignments imported from the modeling files to the modular nodes and dependency graph,
based on the visited files."""
self.start_lines_file_mapping = {}
self.added_objects_file_mapping = {}
for object_name, file in self.model_specific_imported_objects.items():
visited_module = visited_modules[file]
self.start_lines_file_mapping[file] = visited_module.start_lines
if object_name in visited_module.functions and object_name not in self.functions:
self.functions[object_name] = visited_module.functions[object_name]
self.added_objects_file_mapping[object_name] = file
dependencies = visited_module.object_dependency_mapping.get(object_name, None)
if dependencies is not None:
self.object_dependency_mapping[object_name] = dependencies
for dep in dependencies:
if dep not in self.global_nodes:
self.added_objects_file_mapping[dep] = file
self.functions[dep] = visited_module.global_nodes[dep]
recursive_dependencies = visited_module.object_recursive_dependency_mapping.get(object_name, set())
node_recursive_dependencies_mapping = {dep: visited_module.global_nodes[dep] for dep in recursive_dependencies}
for filename, module_mapper in self.visited_modules.items():
if filename != file:
module_mapper.global_nodes[object_name] = visited_module.functions[object_name]
if len(recursive_dependencies) > 0:
module_mapper.object_recursive_dependency_mapping[object_name] = recursive_dependencies
module_mapper.global_nodes.update(node_recursive_dependencies_mapping)
elif object_name in visited_module.assignments and object_name not in self.assignments:
self.assignments[object_name] = visited_module.assignments[object_name]
self.added_objects_file_mapping[object_name] = file
dependencies = visited_module.object_dependency_mapping.get(object_name, None)
if dependencies is not None:
self.object_dependency_mapping[object_name] = dependencies
for dep in dependencies:
if dep not in self.global_nodes:
self.added_objects_file_mapping[dep] = file
self.assignments[dep] = visited_module.global_nodes[dep]
self.global_nodes = {**self.assignments, **self.classes, **self.functions}
self._restrict_dependencies_to_known_entities()
def compute_relative_order(self, missing_dependencies: set) -> dict[str, int]:
"""Compute in which relative order the `missing_dependencies` should appear when the nodes are added to the final file that
will be created based on the modular.
"""
relative_order = {}
idx = 0
original_dependencies = []
other_files_dependencies = defaultdict(list)
for dep in sorted(missing_dependencies):
if dep in self.added_objects_file_mapping:
file = self.added_objects_file_mapping[dep]
other_files_dependencies[file].append(dep)
else:
original_dependencies.append(dep)
all_dependencies = []
for file, dependencies in other_files_dependencies.items():
sorted_dependencies = sorted(dependencies, key=lambda x: self.start_lines_file_mapping[file][x])
all_dependencies += sorted_dependencies
all_dependencies += sorted(original_dependencies, key=lambda x: self.start_lines[x])
for dep in all_dependencies:
relative_order[dep] = idx
idx += 1
return relative_order
def infer_new_model_name(self) -> dict:
"""Infer whether we are using a model name prefix different from the usual model name as defined from the filename.
This is useful e.g. when we define a new multi-modal model, and only the text part inherits from `LlamaModel`,
so we have something like:
```python
class NewModelNameTextDecoderLayer(LlamaDecoderLayer):
pass
```
with the `Text` prefix added to the model name.
However, in case of multiple prefix used, we raise a warning and use the most frequent prefix, to avoid parsing
the same file multiple times and inconsistencies in the objects added from dependencies.
If the new prefix collides with a prefix of another class in the file where we are importing from, then we also
raise a warning, and use the default prefix (model name) to avoid collisions in dependencies.
"""
prefix_model_name_mapping = defaultdict(Counter)
cased_default_name = get_cased_name(self.model_name)
for class_name, class_node in self.classes.items():
modeling_bases = [k.value.value for k in class_node.bases if k.value.value in self.model_specific_imported_objects]
if len(modeling_bases) > 1:
raise ValueError(f'{class_name} was defined with more than 1 model-specific super class. This is unsupported. We found {(*modeling_bases,)}.')
if len(modeling_bases) == 1:
filename = self.model_specific_imported_objects[modeling_bases[0]]
cased_model_name = cased_default_name
suffix = common_partial_suffix(class_name, modeling_bases[0])
if len(suffix) > 0 and suffix[0].isupper():
cased_model_name = class_name.replace(suffix, '')
if len(cased_model_name) < len(cased_default_name) and cased_default_name in class_name:
cased_model_name = cased_default_name
prefix_model_name_mapping[filename].update([cased_model_name])
final_name_mapping = {}
for file, prefixes_counter in prefix_model_name_mapping.items():
if len(prefixes_counter) > 1:
_, total = prefixes_counter.most_common(1)[0]
most_used_entities = [name for name, count in prefixes_counter.most_common() if count == total]
final_name = cased_default_name if cased_default_name in most_used_entities else most_used_entities[-1]
else:
final_name = list(prefixes_counter)[0]
old_cased_model_name = get_cased_name(file.split('.')[-2])
old_model_name_prefix = final_name.replace(cased_default_name, old_cased_model_name)
has_prefix_collision = f'\nclass {old_model_name_prefix}' in get_module_source_from_name(file)
if final_name != cased_default_name and has_prefix_collision:
if len(prefixes_counter) > 1:
logger.warning(f"We detected multiple prefix names when inheriting from {file}: {(*set(prefixes_counter),)}. However, the most used one, '{final_name}', is already present in the source file and will likely cause consistency issues. For this reason we fallback to the default prefix '{cased_default_name}' when grabbing args and dependencies. Make sure to subclass the intermediate classes with the prefix you want (if different from '{cased_default_name}') or use a single prefix in all the modular (best).")
else:
logger.warning(f"We detected the use of the new default prefix {final_name} when inheriting from {file}. However, it is already present in the source file and will likely cause consistency issues. For this reason we fallback to the default prefix '{cased_default_name}' when grabbing args and dependencies. Make sure to subclass the intermediate classes with the prefix you want (if different from '{cased_default_name}')")
final_name = cased_default_name
elif len(prefixes_counter) > 1:
logger.warning(f"We detected multiple prefix names when inheriting from {file}: {(*set(prefixes_counter),)}. We will only use the most used '{final_name}' prefix when grabbing args and dependencies. Make sure to subclass the intermediate classes with the prefix you want (if different from '{final_name}') or use a single prefix in all the modular (best).")
final_name_mapping[file] = get_lowercase_name(final_name)
for file in self.model_specific_modules:
if file not in final_name_mapping:
final_name_mapping[file] = self.model_name
return final_name_mapping
|
class ModularFileMapper(ModuleMapper):
'''This is a Mapper to visit a modular file (like `modular_llama.py`). It visits the whole file, recording dependency,
then visits all imported modeling files (like `modeling_llama.py`), and manages their mutual dependencies.
Calling the method `create_modules()` after visit will create all modules based on this modular file.
'''
def __init__(self, python_module, new_name):
pass
def visit_ImportFrom(self, node: cst.ImportFrom) -> None:
'''When visiting imports from modeling files (i.e. `transformers.models.xxx`) we get the code, parse it,
and save it in `self.model_specific_modules` to later visit. The imported objects are saved in `self.model_specific_imported_objects`.
'''
pass
def visit_SimpleStatementLine(self, node):
'''If we visit an import statement not previously visited, record it. If we visit a module-scope assignment,
simply record it or, if it is `__all__`, split it between files where we should dispatch it.
'''
pass
def leave_Module(self, node):
'''When we leave the modular file, we do the following in order:
1. for each modeling file found in the imports, rename it with the new model name, visit it, and update
its dependency graph with the new function and assignment definitions found in the modular
2. update the modular dependency graph with the imported functions and assignments (found when visiting the matching files)
3. compute the nested (recursive) function and assignment dependencies
'''
pass
def merge_model_specific_imports(self, visited_modules):
'''Merge the functions and assignments imported from the modeling files to the modular nodes and dependency graph,
based on the visited files.'''
pass
def compute_relative_order(self, missing_dependencies: set) -> dict[str, int]:
'''Compute in which relative order the `missing_dependencies` should appear when the nodes are added to the final file that
will be created based on the modular.
'''
pass
def infer_new_model_name(self) -> dict:
'''Infer whether we are using a model name prefix different from the usual model name as defined from the filename.
This is useful e.g. when we define a new multi-modal model, and only the text part inherits from `LlamaModel`,
so we have something like:
```python
class NewModelNameTextDecoderLayer(LlamaDecoderLayer):
pass
```
with the `Text` prefix added to the model name.
However, in case of multiple prefix used, we raise a warning and use the most frequent prefix, to avoid parsing
the same file multiple times and inconsistencies in the objects added from dependencies.
If the new prefix collides with a prefix of another class in the file where we are importing from, then we also
raise a warning, and use the default prefix (model name) to avoid collisions in dependencies.
'''
pass
| 8
| 7
| 39
| 2
| 29
| 8
| 8
| 0.32
| 1
| 11
| 2
| 0
| 7
| 12
| 7
| 42
| 284
| 22
| 202
| 72
| 194
| 64
| 154
| 72
| 146
| 13
| 5
| 5
| 53
|
70
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/utils/modular_model_converter.py
|
modular_model_converter.ModuleMapper
|
from libcst.metadata import MetadataWrapper, ParentNodeProvider, PositionProvider, ScopeProvider
import re
from libcst import matchers as m
from libcst import ClassDef, CSTVisitor
from collections import Counter, defaultdict, deque
from abc import ABC, abstractmethod
import libcst as cst
class ModuleMapper(CSTVisitor, ABC):
"""An abstract visitor class which analyses a module, creating a mapping of dependencies for classes, functions and assignments.
Class dependencies are computed with `compute_class_dependencies()`, while function and assignment dependencies are stored in
`self.object_recursive_dependency_mapping` (can be computed by `_compute_recursive_object_dependencies()`).
It defines common visiting patterns (i.e. common visit_xxx/leave_xxx functions) between the modular file and the
modeling files that will be visited.
"""
METADATA_DEPENDENCIES = (ParentNodeProvider, PositionProvider)
def __init__(self, python_module: cst.Module):
self.python_module: cst.Module = python_module
self.classes: dict[str, cst.ClassDef] = {}
self.imports = []
self.functions: dict[str, cst.FunctionDef] = {}
self.object_dependency_mapping = defaultdict(set)
self.assignments: dict[str, cst.SimpleStatementLine] = {}
self.current_function = None
self.current_class = None
self.current_assignment = None
self.objects_imported_from_modeling = set()
self.match_patterns = '|'.join(ALL_FILE_TYPES)
def visit_ImportFrom(self, node):
"""This keeps track of objects imported from neighbor modeling files (e.g. in `modeling_xxx.py, we have
`from .configuration_xxx import Config`, then `Config` should be recorded as it is not a dependency that needs
to be added (because it will be part of the imports)"""
import_module = self.python_module.code_for_node(node.module)
import_statement = '.' * len(node.relative) + import_module
if re.search(f'^\\.({self.match_patterns})_.*', import_statement):
for imported_object in node.names:
if imported_object.evaluated_alias is not None:
self.objects_imported_from_modeling.add(imported_object.evaluated_alias)
else:
self.objects_imported_from_modeling.add(imported_object.evaluated_name)
def visit_SimpleStatementLine(self, node):
"""
Global Assigns like `GEMMA_INPUT_DOCSTRING = 'THIS IS THE INPUT'` and all import statements
are extracted and saved in their corresponding dict. They are then used when updating dependency mappings.
"""
parent_node = self.get_metadata(cst.metadata.ParentNodeProvider, node)
simple_top_level_assign_structure = m.SimpleStatementLine(body=[m.Assign(targets=[m.AssignTarget(target=m.Name())])])
simple_top_level_variable_indexing = m.SimpleStatementLine(body=[m.Assign(targets=[m.AssignTarget(target=m.Subscript(value=m.Name()) | m.Attribute(value=m.Name()))])])
if m.matches(parent_node, m.Module()):
if m.matches(node, simple_top_level_assign_structure):
left_hand_side = node.body[0].targets[0].target.value
self.current_assignment = left_hand_side
self.assignments[left_hand_side] = node
elif m.matches(node, simple_top_level_variable_indexing):
indexed_variable = node.body[0].targets[0].target.value.value
self.current_assignment = indexed_variable
node_name = self.python_module.code_for_node(node)
self.assignments[node_name] = node
self.object_dependency_mapping[indexed_variable].add(node_name)
elif m.matches(node, m.SimpleStatementLine(body=[m.Import() | m.ImportFrom()])):
self.imports.append(node)
def leave_SimpleStatementLine(self, node):
self.current_assignment = None
def visit_FunctionDef(self, node):
parent_node = self.get_metadata(cst.metadata.ParentNodeProvider, node)
if m.matches(parent_node, m.Module()):
self.current_function = node.name.value
self.functions[node.name.value] = node
def leave_FunctionDef(self, node):
parent_node = self.get_metadata(cst.metadata.ParentNodeProvider, node)
if m.matches(parent_node, m.Module()):
self.current_function = None
def visit_If(self, node):
if self.current_function is None and self.current_class is None:
for stmt in node.body.body:
if m.matches(stmt, m.SimpleStatementLine(body=[m.ImportFrom() | m.Import()])):
self.imports.append(node)
def visit_ClassDef(self, node: ClassDef) -> None:
"""Record class nodes to create their dependencies at the end."""
self.classes[node.name.value] = node
self.current_class = node.name.value
def leave_ClassDef(self, node):
self.current_class = None
def visit_Name(self, node: cst.Call):
"""This is used to create a mapping from module-scope functions and assignments to objects used inside them."""
if self.current_function is not None:
self.object_dependency_mapping[self.current_function].add(node.value)
if self.current_assignment is not None:
self.object_dependency_mapping[self.current_assignment].add(node.value)
def leave_Module(self, node):
"""When leaving the module, we store the position of each global scoped node to allow sorting the dependencies
based on their position in the code later. We use the PositionProvider metadata wrapper for this.
We also make sure to update `self.object_dependency_mapping` so that it contains only names recorded in
`self.global_nodes`.
"""
self.global_nodes = {**self.assignments, **self.classes, **self.functions}
self.start_lines = {}
for id, node in self.global_nodes.items():
self.start_lines[id] = self.get_metadata(cst.metadata.PositionProvider, node).start.line
def _restrict_dependencies_to_known_entities(self):
"""Since we added every Name as part of `self.object_dependency_mapping`, we need to remove those that
are not part of the recorded objects in `self.global_nodes` (i.e. built-in variables, imports, etc).
This should be called only after all merging operations have been finalized!!"""
global_objects = set(self.global_nodes.keys())
for object_name, dependencies in self.object_dependency_mapping.items():
self.object_dependency_mapping[object_name] = {dep for dep in dependencies if dep in global_objects}
def _compute_recursive_object_dependencies(self) -> dict[str, set]:
"""Based on immediate dependency mapping, create the recursive dependency mapping. For example, given the
following file:
```
def foo():
pass
def bar():
foo()
def test():
bar()
```
this visitor can only record immediate dependencies, i.e. it will record the following
`self.object_dependency_mapping = {"test": {"bar"}, "bar": {"foo}}`. This function is used to create
the recursive mapping, i.e. `recursive_dependencies = {"test": {"bar", "foo"}, "bar": {"foo}}`.
"""
recursive_dependencies = {}
for object_name in self.object_dependency_mapping:
all_dependencies = find_all_dependencies(self.object_dependency_mapping, start_entity=object_name)
recursive_dependencies[object_name] = all_dependencies
return recursive_dependencies
def augment_dependencies(self, dependencies: set[str]) -> set[str]:
"""For a set of `dependencies`, augment them by adding all potential dependencies of the **functions** and
**assignments** present in the `dependencies`.
"""
new_dependencies = dependencies.copy()
for dep in tuple(dependencies):
if dep in self.object_recursive_dependency_mapping:
new_dependencies.update(self.object_recursive_dependency_mapping[dep])
return new_dependencies
def compute_class_dependencies(self):
"""For each visited class, find its dependencies based on visiting the current file + potential merged dependencies."""
self.class_dependency_mapping = {}
for class_name, class_node in self.classes.items():
dependencies = dependencies_for_class_node(class_node, set(self.global_nodes.keys()))
self.class_dependency_mapping[class_name] = self.augment_dependencies(dependencies)
@abstractmethod
def compute_relative_order(self, missing_dependencies: set) -> dict[str, int]:
raise NotImplementedError
|
class ModuleMapper(CSTVisitor, ABC):
'''An abstract visitor class which analyses a module, creating a mapping of dependencies for classes, functions and assignments.
Class dependencies are computed with `compute_class_dependencies()`, while function and assignment dependencies are stored in
`self.object_recursive_dependency_mapping` (can be computed by `_compute_recursive_object_dependencies()`).
It defines common visiting patterns (i.e. common visit_xxx/leave_xxx functions) between the modular file and the
modeling files that will be visited.
'''
def __init__(self, python_module: cst.Module):
pass
def visit_ImportFrom(self, node):
'''This keeps track of objects imported from neighbor modeling files (e.g. in `modeling_xxx.py, we have
`from .configuration_xxx import Config`, then `Config` should be recorded as it is not a dependency that needs
to be added (because it will be part of the imports)'''
pass
def visit_SimpleStatementLine(self, node):
'''
Global Assigns like `GEMMA_INPUT_DOCSTRING = 'THIS IS THE INPUT'` and all import statements
are extracted and saved in their corresponding dict. They are then used when updating dependency mappings.
'''
pass
def leave_SimpleStatementLine(self, node):
pass
def visit_FunctionDef(self, node):
pass
def leave_FunctionDef(self, node):
pass
def visit_If(self, node):
pass
def visit_ClassDef(self, node: ClassDef) -> None:
'''Record class nodes to create their dependencies at the end.'''
pass
def leave_ClassDef(self, node):
pass
def visit_Name(self, node: cst.Call):
'''This is used to create a mapping from module-scope functions and assignments to objects used inside them.'''
pass
def leave_Module(self, node):
'''When leaving the module, we store the position of each global scoped node to allow sorting the dependencies
based on their position in the code later. We use the PositionProvider metadata wrapper for this.
We also make sure to update `self.object_dependency_mapping` so that it contains only names recorded in
`self.global_nodes`.
'''
pass
def _restrict_dependencies_to_known_entities(self):
'''Since we added every Name as part of `self.object_dependency_mapping`, we need to remove those that
are not part of the recorded objects in `self.global_nodes` (i.e. built-in variables, imports, etc).
This should be called only after all merging operations have been finalized!!'''
pass
def _compute_recursive_object_dependencies(self) -> dict[str, set]:
'''Based on immediate dependency mapping, create the recursive dependency mapping. For example, given the
following file:
```
def foo():
pass
def bar():
foo()
def test():
bar()
```
this visitor can only record immediate dependencies, i.e. it will record the following
`self.object_dependency_mapping = {"test": {"bar"}, "bar": {"foo}}`. This function is used to create
the recursive mapping, i.e. `recursive_dependencies = {"test": {"bar", "foo"}, "bar": {"foo}}`.
'''
pass
def augment_dependencies(self, dependencies: set[str]) -> set[str]:
'''For a set of `dependencies`, augment them by adding all potential dependencies of the **functions** and
**assignments** present in the `dependencies`.
'''
pass
def compute_class_dependencies(self):
'''For each visited class, find its dependencies based on visiting the current file + potential merged dependencies.'''
pass
@abstractmethod
def compute_relative_order(self, missing_dependencies: set) -> dict[str, int]:
pass
| 18
| 10
| 9
| 0
| 5
| 4
| 2
| 0.71
| 2
| 6
| 0
| 2
| 15
| 13
| 15
| 35
| 155
| 18
| 85
| 50
| 68
| 60
| 80
| 49
| 64
| 4
| 4
| 3
| 33
|
71
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/utils/modular_model_converter.py
|
modular_model_converter.ReplaceNameTransformer
|
import libcst as cst
import re
from libcst import matchers as m
class ReplaceNameTransformer(m.MatcherDecoratableTransformer):
"""A transformer that replaces `old_name` with `new_name` in comments, string and any references.
It should take into account name like `MyNewModel`, or `my_new_model`. Without using the AUTO_MAPPING.
Supported renaming patterns:
- llama -> my_new_model and my_new_model -> llama
- Llama -> MyNewModel and MyNewModel -> Llama
- LLAMA -> MY_NEW_MODEL and MY_NEW_MODEL -> LLAMA
- LLaMa -> MyNewModel and MyNewModel -> Llama
"""
def __init__(self, old_name: str, new_name: str, original_new_model_name: str='', only_doc: bool=False):
super().__init__()
old_name = old_name.replace('-', '_')
new_name = new_name.replace('-', '_')
self.old_name = old_name
self.new_name = new_name
self.cased_new_name = get_cased_name(self.new_name)
self.cased_old_name = get_cased_name(self.old_name)
self.patterns = {old_name: new_name, old_name.upper(): new_name.upper(), self.cased_old_name: self.cased_new_name}
self.original_new_model_name = original_new_model_name
self.only_doc = only_doc
def _replace_name(self, original_node, updated_node):
if re.findall('# Copied from', updated_node.value):
return cst.RemoveFromParent()
update = preserve_case_replace(updated_node.value, self.patterns, self.cased_new_name)
return updated_node.with_changes(value=update)
@m.leave(m.SimpleString() | m.Comment())
def replace_name(self, original_node, updated_node):
return self._replace_name(original_node, updated_node)
def leave_Name(self, original_node, updated_node):
if not self.only_doc:
return self._replace_name(original_node, updated_node)
return updated_node
def leave_ImportFrom(self, original_node, updated_node):
"""
The imports from other file types (configuration, processing etc) should use original model name.
Also, no replaces on absolute imports (e.g. `from mamba_ssm import ...`)
"""
if len(original_node.relative) == 0:
return original_node
if self.original_new_model_name != self.new_name and m.matches(updated_node.module, m.Name()):
patterns = '|'.join(ALL_FILE_TYPES)
regex = f'({patterns})_{self.new_name}'
new_source = re.sub(regex, lambda m: f'{m.group(1)}_{self.original_new_model_name}', updated_node.module.value)
updated_node = updated_node.with_changes(module=updated_node.module.with_changes(value=new_source))
return updated_node
|
class ReplaceNameTransformer(m.MatcherDecoratableTransformer):
'''A transformer that replaces `old_name` with `new_name` in comments, string and any references.
It should take into account name like `MyNewModel`, or `my_new_model`. Without using the AUTO_MAPPING.
Supported renaming patterns:
- llama -> my_new_model and my_new_model -> llama
- Llama -> MyNewModel and MyNewModel -> Llama
- LLAMA -> MY_NEW_MODEL and MY_NEW_MODEL -> LLAMA
- LLaMa -> MyNewModel and MyNewModel -> Llama
'''
def __init__(self, old_name: str, new_name: str, original_new_model_name: str='', only_doc: bool=False):
pass
def _replace_name(self, original_node, updated_node):
pass
@m.leave(m.SimpleString() | m.Comment())
def replace_name(self, original_node, updated_node):
pass
def leave_Name(self, original_node, updated_node):
pass
def leave_ImportFrom(self, original_node, updated_node):
'''
The imports from other file types (configuration, processing etc) should use original model name.
Also, no replaces on absolute imports (e.g. `from mamba_ssm import ...`)
'''
pass
| 7
| 2
| 7
| 0
| 7
| 1
| 2
| 0.34
| 1
| 3
| 0
| 0
| 5
| 7
| 5
| 5
| 51
| 5
| 35
| 18
| 28
| 12
| 28
| 17
| 22
| 2
| 1
| 1
| 8
|
72
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modular_multimodal2.py
|
modular_multimodal2.Multimodal2VisionAttention
|
from transformers.models.clip.modeling_clip import CLIPMLP, CLIPAttention, CLIPEncoder, CLIPEncoderLayer, CLIPPreTrainedModel, CLIPVisionModel, CLIPVisionTransformer
class Multimodal2VisionAttention(CLIPAttention):
pass
|
class Multimodal2VisionAttention(CLIPAttention):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 2
| 0
| 0
| 0
| 13
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
73
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modular_multimodal2.py
|
modular_multimodal2.Multimodal2VisionEncoder
|
from torch import nn
from transformers.models.clip.modeling_clip import CLIPMLP, CLIPAttention, CLIPEncoder, CLIPEncoderLayer, CLIPPreTrainedModel, CLIPVisionModel, CLIPVisionTransformer
class Multimodal2VisionEncoder(CLIPEncoder):
def __init__(self, config):
super().__init__(config)
self.layers = nn.ModuleList([Multimodal2VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
|
class Multimodal2VisionEncoder(CLIPEncoder):
def __init__(self, config):
pass
| 2
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 1
| 1
| 1
| 13
| 4
| 0
| 4
| 3
| 2
| 0
| 4
| 3
| 2
| 1
| 2
| 0
| 1
|
74
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modular_multimodal2.py
|
modular_multimodal2.Multimodal2VisionEncoderLayer
|
from transformers.models.clip.modeling_clip import CLIPMLP, CLIPAttention, CLIPEncoder, CLIPEncoderLayer, CLIPPreTrainedModel, CLIPVisionModel, CLIPVisionTransformer
class Multimodal2VisionEncoderLayer(CLIPEncoderLayer):
def __init__(self, config):
super().__init__()
self.mlp = Multimodal2VisionMLP(config)
|
class Multimodal2VisionEncoderLayer(CLIPEncoderLayer):
def __init__(self, config):
pass
| 2
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 2
| 1
| 0
| 1
| 2
| 1
| 13
| 5
| 0
| 5
| 4
| 3
| 0
| 5
| 4
| 3
| 1
| 2
| 0
| 1
|
75
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modular_multimodal2.py
|
modular_multimodal2.Multimodal2VisionMLP
|
from transformers.models.clip.modeling_clip import CLIPMLP, CLIPAttention, CLIPEncoder, CLIPEncoderLayer, CLIPPreTrainedModel, CLIPVisionModel, CLIPVisionTransformer
class Multimodal2VisionMLP(CLIPMLP):
pass
|
class Multimodal2VisionMLP(CLIPMLP):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
76
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modular_multimodal2.py
|
modular_multimodal2.Multimodal2VisionModel
|
from transformers.utils import add_start_docstrings
from transformers.models.clip.modeling_clip import CLIPMLP, CLIPAttention, CLIPEncoder, CLIPEncoderLayer, CLIPPreTrainedModel, CLIPVisionModel, CLIPVisionTransformer
@add_start_docstrings('New doc', MULTIMODAL2_VISION_START_DOCSTRING)
class Multimodal2VisionModel(CLIPVisionModel, Multimodal2VisionPreTrainedModel):
_no_split_modules = ['Multimodal2VisionEncoderLayer']
|
@add_start_docstrings('New doc', MULTIMODAL2_VISION_START_DOCSTRING)
class Multimodal2VisionModel(CLIPVisionModel, Multimodal2VisionPreTrainedModel):
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 3
| 0
| 0
|
77
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modular_multimodal2.py
|
modular_multimodal2.Multimodal2VisionPreTrainedModel
|
from transformers.models.clip.modeling_clip import CLIPMLP, CLIPAttention, CLIPEncoder, CLIPEncoderLayer, CLIPPreTrainedModel, CLIPVisionModel, CLIPVisionTransformer
class Multimodal2VisionPreTrainedModel(CLIPPreTrainedModel):
def _init_weights(self, module):
if isinstance(module, Multimodal2VisionMLP):
pass
|
class Multimodal2VisionPreTrainedModel(CLIPPreTrainedModel):
def _init_weights(self, module):
pass
| 2
| 0
| 3
| 0
| 3
| 0
| 2
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 2
| 4
| 0
| 4
| 2
| 2
| 0
| 4
| 2
| 2
| 2
| 2
| 1
| 2
|
78
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modular_multimodal2.py
|
modular_multimodal2.Multimodal2VisionTransformer
|
from transformers.models.clip.modeling_clip import CLIPMLP, CLIPAttention, CLIPEncoder, CLIPEncoderLayer, CLIPPreTrainedModel, CLIPVisionModel, CLIPVisionTransformer
class Multimodal2VisionTransformer(CLIPVisionTransformer):
def __init__(self, config):
super().__init__(config)
self.encoder = Multimodal2VisionEncoder(config)
|
class Multimodal2VisionTransformer(CLIPVisionTransformer):
def __init__(self, config):
pass
| 2
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 2
| 1
| 0
| 1
| 1
| 1
| 13
| 4
| 0
| 4
| 3
| 2
| 0
| 4
| 3
| 2
| 1
| 2
| 0
| 1
|
79
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modular_my_new_model.py
|
modular_my_new_model.MyNewModelConfig
|
from transformers.models.llama.configuration_llama import LlamaConfig
class MyNewModelConfig(LlamaConfig):
"""
This is the configuration class to store the configuration of a [`MyNewModelModel`]. It is used to instantiate an MyNewModel
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the MyNewModel-7B.
e.g. [meta-my_new_model/MyNewModel-2-7b-hf](https://huggingface.co/meta-my_new_model/MyNewModel-2-7b-hf)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the MyNewModel model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MyNewModelModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. MyNewModel 1 supports up to 2048 tokens,
MyNewModel 2 up to 4096, CodeLlama up to 16384.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'my_new_model3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'my_new_model3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'my_new_model3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'my_new_model3'. Scaling factor applied to high frequency components of the RoPE
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_attention_heads
```python
>>> from transformers import MyNewModelModel, MyNewModelConfig
>>> # Initializing a MyNewModel my_new_model-7b style configuration
>>> configuration = MyNewModelConfig()
>>> # Initializing a model from the my_new_model-7b style configuration
>>> model = MyNewModelModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
def __init__(self, mlp_bias=True, new_param=0, **super_kwargs):
super().__init__(self, **super_kwargs)
self.mlp_bias = mlp_bias
self.new_param = new_param
|
class MyNewModelConfig(LlamaConfig):
'''
This is the configuration class to store the configuration of a [`MyNewModelModel`]. It is used to instantiate an MyNewModel
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the MyNewModel-7B.
e.g. [meta-my_new_model/MyNewModel-2-7b-hf](https://huggingface.co/meta-my_new_model/MyNewModel-2-7b-hf)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the MyNewModel model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MyNewModelModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. MyNewModel 1 supports up to 2048 tokens,
MyNewModel 2 up to 4096, CodeLlama up to 16384.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'my_new_model3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'my_new_model3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'my_new_model3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'my_new_model3'. Scaling factor applied to high frequency components of the RoPE
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_attention_heads
```python
>>> from transformers import MyNewModelModel, MyNewModelConfig
>>> # Initializing a MyNewModel my_new_model-7b style configuration
>>> configuration = MyNewModelConfig()
>>> # Initializing a model from the my_new_model-7b style configuration
>>> model = MyNewModelModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
'''
def __init__(self, mlp_bias=True, new_param=0, **super_kwargs):
pass
| 2
| 1
| 4
| 0
| 4
| 0
| 1
| 0.8
| 1
| 1
| 0
| 0
| 1
| 2
| 1
| 2
| 10
| 1
| 5
| 4
| 3
| 4
| 5
| 4
| 3
| 1
| 2
| 0
| 1
|
80
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modular_my_new_model2.py
|
modular_my_new_model2.MyNewModel2Config
|
from transformers.models.llama.configuration_llama import LlamaConfig
class MyNewModel2Config(LlamaConfig):
"""
This is the configuration class to store the configuration of a [`GemmaModel`]. It is used to instantiate an Gemma
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Gemma-7B.
e.g. [google/gemma-7b](https://huggingface.co/google/gemma-7b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 256000):
Vocabulary size of the Gemma model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`GemmaModel`]
```python
>>> from transformers import GemmaModel, GemmaConfig
>>> # Initializing a Gemma gemma-7b style configuration
>>> configuration = GemmaConfig()
>>> # Initializing a model from the gemma-7b style configuration
>>> model = GemmaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
|
class MyNewModel2Config(LlamaConfig):
'''
This is the configuration class to store the configuration of a [`GemmaModel`]. It is used to instantiate an Gemma
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Gemma-7B.
e.g. [google/gemma-7b](https://huggingface.co/google/gemma-7b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 256000):
Vocabulary size of the Gemma model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`GemmaModel`]
```python
>>> from transformers import GemmaModel, GemmaConfig
>>> # Initializing a Gemma gemma-7b style configuration
>>> configuration = GemmaConfig()
>>> # Initializing a model from the gemma-7b style configuration
>>> model = GemmaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 20
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 21
| 0
| 1
| 1
| 0
| 20
| 1
| 1
| 0
| 0
| 2
| 0
| 0
|
81
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modular_my_new_model2.py
|
modular_my_new_model2.MyNewModel2ForSequenceClassification
|
from transformers.models.gemma.modeling_gemma import GemmaForSequenceClassification
class MyNewModel2ForSequenceClassification(GemmaForSequenceClassification):
pass
|
class MyNewModel2ForSequenceClassification(GemmaForSequenceClassification):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
82
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modular_new_imgproc_model.py
|
modular_new_imgproc_model.ImgprocModelImageProcessor
|
import torch
from transformers.models.blip.image_processing_blip import BlipImageProcessor
import torch.utils.checkpoint
class ImgprocModelImageProcessor(BlipImageProcessor):
def new_image_processing_method(self, pixel_values: torch.FloatTensor):
return pixel_values / 2
|
class ImgprocModelImageProcessor(BlipImageProcessor):
def new_image_processing_method(self, pixel_values: torch.FloatTensor):
pass
| 2
| 0
| 2
| 0
| 2
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 24
| 3
| 0
| 3
| 2
| 1
| 0
| 3
| 2
| 1
| 1
| 4
| 0
| 1
|
83
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modular_new_model.py
|
modular_new_model.NewModelConfig
|
from transformers.models.gemma.configuration_gemma import GemmaConfig
class NewModelConfig(GemmaConfig):
def __init__(self, vocab_size=256030, hidden_size=64, intermediate_size=90, num_hidden_layers=28, num_attention_heads=16, num_key_value_heads=16, head_dim=256, hidden_act='gelu_pytorch_tanh', hidden_activation=None, max_position_embeddings=1500, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=0, eos_token_id=1, bos_token_id=2, tie_word_embeddings=True, rope_theta=10000.0, attention_bias=False, attention_dropout=0.0, **kwargs):
super().__init__(self, **kwargs)
@property
def num_heads(self):
return self.num_attention_heads
|
class NewModelConfig(GemmaConfig):
def __init__(self, vocab_size=256030, hidden_size=64, intermediate_size=90, num_hidden_layers=28, num_attention_heads=16, num_key_value_heads=16, head_dim=256, hidden_act='gelu_pytorch_tanh', hidden_activation=None, max_position_embeddings=1500, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=0, eos_token_id=1, bos_token_id=2, tie_word_embeddings=True, rope_theta=10000.0, attention_bias=False, attention_dropout=0.0, **kwargs):
pass
@property
def num_heads(self):
pass
| 4
| 0
| 14
| 0
| 14
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 0
| 2
| 3
| 30
| 1
| 29
| 27
| 2
| 0
| 5
| 3
| 2
| 1
| 2
| 0
| 2
|
84
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modular_new_task_model.py
|
modular_new_task_model.NewTaskModelForNewTask
|
import torch.utils.checkpoint
from typing import ClassVar, Optional, Union
import torch
from transformers.models.paligemma.modeling_paligemma import PaliGemmaForConditionalGeneration
from ...cache_utils import Cache
from torch import nn
class NewTaskModelForNewTask(PaliGemmaForConditionalGeneration):
main_input_name: ClassVar[str] = 'doc_input_ids'
def __init__(self, config):
super().__init__(config=config)
self.embedding_dim = self.config.embedding_dim
self.custom_text_proj = nn.Linear(self.config.text_config.hidden_size, self.embedding_dim)
if self.language_model._tied_weights_keys is not None:
self._tied_weights_keys = [f'model.language_model.{k}' for k in self.language_model._tied_weights_keys]
self.post_init()
def forward(self, input_ids: torch.LongTensor=None, pixel_values: torch.FloatTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[list[torch.FloatTensor], Cache]]=None, token_type_ids: Optional[torch.LongTensor]=None, cache_position: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, num_logits_to_keep: int=0):
"""
Returns:
"""
vlm_outputs = super().forward(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, token_type_ids=token_type_ids, cache_position=cache_position, inputs_embeds=inputs_embeds, labels=labels, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=True, return_dict=True, num_logits_to_keep=num_logits_to_keep)
last_hidden_states = vlm_outputs.hidden_states[-1]
proj = self.custom_text_proj(last_hidden_states)
embeddings = proj / proj.norm(dim=-1, keepdim=True)
if attention_mask is not None:
embeddings = embeddings * attention_mask.unsqueeze(-1)
return (embeddings,) + vlm_outputs
def resize_token_embeddings(self, new_num_tokens: Optional[int]=None, pad_to_multiple_of=None, mean_resizing=True) -> nn.Embedding:
model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)
self.config.text_config.vocab_size = model_embeds.num_embeddings
self.config.vocab_size = model_embeds.num_embeddings
self.vocab_size = model_embeds.num_embeddings
return model_embeds
|
class NewTaskModelForNewTask(PaliGemmaForConditionalGeneration):
def __init__(self, config):
pass
def forward(self, input_ids: torch.LongTensor=None, pixel_values: torch.FloatTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[list[torch.FloatTensor], Cache]]=None, token_type_ids: Optional[torch.LongTensor]=None, cache_position: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, num_logits_to_keep: int=0):
'''
Returns:
'''
pass
def resize_token_embeddings(self, new_num_tokens: Optional[int]=None, pad_to_multiple_of=None, mean_resizing=True) -> nn.Embedding:
pass
| 4
| 1
| 22
| 3
| 18
| 3
| 1
| 0.18
| 1
| 4
| 0
| 0
| 3
| 4
| 3
| 16
| 71
| 11
| 55
| 32
| 33
| 10
| 22
| 14
| 18
| 2
| 3
| 1
| 4
|
85
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modular_roberta.py
|
modular_roberta.RobertaEmbeddings
|
import torch.nn as nn
from transformers.models.bert.modeling_bert import BertEmbeddings, BertModel
class RobertaEmbeddings(BertEmbeddings):
def __init__(self, config):
super().__init__(config)
self.pad_token_id = config.pad_token_id
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, config.pad_token_id)
|
class RobertaEmbeddings(BertEmbeddings):
def __init__(self, config):
pass
| 2
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 2
| 1
| 13
| 7
| 0
| 7
| 4
| 5
| 0
| 5
| 4
| 3
| 1
| 2
| 0
| 1
|
86
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modular_roberta.py
|
modular_roberta.RobertaModel
|
from transformers.models.bert.modeling_bert import BertEmbeddings, BertModel
class RobertaModel(BertModel):
def __init__(self, config, add_pooling_layer=True):
super().__init__(self, config)
|
class RobertaModel(BertModel):
def __init__(self, config, add_pooling_layer=True):
pass
| 2
| 0
| 2
| 0
| 2
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 7
| 3
| 0
| 3
| 2
| 1
| 0
| 3
| 2
| 1
| 1
| 3
| 0
| 1
|
87
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modular_super.py
|
modular_super.SuperModel
|
from typing import Optional, Union
from ...cache_utils import Cache
from transformers.modeling_outputs import CausalLMOutputWithPast
from transformers.models.llama.modeling_llama import LlamaModel
import torch
class SuperModel(LlamaModel):
def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, CausalLMOutputWithPast]:
out = super().forward(input_ids, attention_mask, position_ids, past_key_values, inputs_embeds, use_cache, output_attentions, output_hidden_states, return_dict, cache_position)
out.logits *= 2 ** 4
return out
|
class SuperModel(LlamaModel):
def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, CausalLMOutputWithPast]:
pass
| 2
| 0
| 27
| 0
| 27
| 0
| 1
| 0
| 1
| 4
| 0
| 0
| 1
| 0
| 1
| 8
| 28
| 0
| 28
| 15
| 14
| 0
| 5
| 3
| 3
| 1
| 3
| 0
| 1
|
88
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/modular-transformers/modular_switch_function.py
|
modular_switch_function.SwitchFunctionAttention
|
from transformers.models.llama.modeling_llama import LlamaAttention
class SwitchFunctionAttention(LlamaAttention):
pass
|
class SwitchFunctionAttention(LlamaAttention):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
89
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/utils/notification_service.py
|
notification_service.Message
|
import re
import os
from typing import Any, Optional, Union
import json
import time
class Message:
def __init__(self, title: str, ci_title: str, model_results: dict, additional_results: dict, selected_warnings: Optional[list]=None, prev_ci_artifacts=None, other_ci_artifacts=None):
self.title = title
self.ci_title = ci_title
self.n_model_success = sum((r['success'] for r in model_results.values()))
self.n_model_single_gpu_failures = sum((dicts_to_sum(r['failed'])['single'] for r in model_results.values()))
self.n_model_multi_gpu_failures = sum((dicts_to_sum(r['failed'])['multi'] for r in model_results.values()))
self.n_model_unknown_failures = sum((dicts_to_sum(r['failed'])['unclassified'] for r in model_results.values()))
self.n_model_failures = self.n_model_single_gpu_failures + self.n_model_multi_gpu_failures + self.n_model_unknown_failures
self.n_model_jobs_errored_out = sum((r['error'] for r in model_results.values()))
self.n_additional_success = sum((r['success'] for r in additional_results.values()))
self.n_additional_jobs_errored_out = sum((r['error'] for r in additional_results.values()))
if len(additional_results) > 0:
all_additional_failures = dicts_to_sum([r['failed'] for r in additional_results.values()])
self.n_additional_single_gpu_failures = all_additional_failures['single']
self.n_additional_multi_gpu_failures = all_additional_failures['multi']
self.n_additional_unknown_gpu_failures = all_additional_failures['unclassified']
else:
self.n_additional_single_gpu_failures = 0
self.n_additional_multi_gpu_failures = 0
self.n_additional_unknown_gpu_failures = 0
self.n_additional_failures = self.n_additional_single_gpu_failures + self.n_additional_multi_gpu_failures + self.n_additional_unknown_gpu_failures
self.n_failures = self.n_model_failures + self.n_additional_failures
self.n_success = self.n_model_success + self.n_additional_success
self.n_tests = self.n_failures + self.n_success
self.n_jobs_errored_out = self.n_model_jobs_errored_out + self.n_additional_jobs_errored_out
self.model_results = model_results
self.additional_results = additional_results
self.thread_ts = None
if selected_warnings is None:
selected_warnings = []
self.selected_warnings = selected_warnings
self.prev_ci_artifacts = prev_ci_artifacts
self.other_ci_artifacts = other_ci_artifacts
@property
def time(self) -> str:
all_results = [*self.model_results.values(), *self.additional_results.values()]
time_spent = []
for r in all_results:
if len(r['time_spent']):
time_spent.extend(r['time_spent'])
total_secs = sum(time_spent)
hours, minutes, seconds = (total_secs // 3600, total_secs % 3600 // 60, total_secs % 60)
return f'{int(hours)}h{int(minutes)}m{int(seconds)}s'
@property
def header(self) -> dict:
return {'type': 'header', 'text': {'type': 'plain_text', 'text': self.title}}
@property
def ci_title_section(self) -> dict:
return {'type': 'section', 'text': {'type': 'mrkdwn', 'text': self.ci_title}}
@property
def no_failures(self) -> dict:
return {'type': 'section', 'text': {'type': 'plain_text', 'text': f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.', 'emoji': True}, 'accessory': {'type': 'button', 'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True}, 'url': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}"}}
@property
def failures(self) -> dict:
return {'type': 'section', 'text': {'type': 'plain_text', 'text': f'There were {self.n_failures} failures, out of {self.n_tests} tests.\n🚨 There were {self.n_jobs_errored_out} jobs errored out (not producing test output files).\nThe suite ran in {self.time}.', 'emoji': True}, 'accessory': {'type': 'button', 'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True}, 'url': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}"}}
@property
def warnings(self) -> dict:
button_text = 'Check warnings (Link not found)'
job_link = f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}"
for job in github_actions_jobs:
if 'Extract warnings in CI artifacts' in job['name'] and job['conclusion'] == 'success':
button_text = 'Check warnings'
job_link = job['html_url']
break
huggingface_hub_warnings = [x for x in self.selected_warnings if 'huggingface_hub' in x]
text = f'There are {len(self.selected_warnings)} warnings being selected.'
text += f'\n{len(huggingface_hub_warnings)} of them are from `huggingface_hub`.'
return {'type': 'section', 'text': {'type': 'plain_text', 'text': text, 'emoji': True}, 'accessory': {'type': 'button', 'text': {'type': 'plain_text', 'text': button_text, 'emoji': True}, 'url': job_link}}
@staticmethod
def get_device_report(report, rjust=6):
if 'single' in report and 'multi' in report:
return f"{str(report['single']).rjust(rjust)} | {str(report['multi']).rjust(rjust)} | "
elif 'single' in report:
return f"{str(report['single']).rjust(rjust)} | {'0'.rjust(rjust)} | "
elif 'multi' in report:
return f"{'0'.rjust(rjust)} | {str(report['multi']).rjust(rjust)} | "
@property
def category_failures(self) -> dict:
if job_name != 'run_models_gpu':
category_failures_report = ''
return {'type': 'section', 'text': {'type': 'mrkdwn', 'text': category_failures_report}}
model_failures = [v['failed'] for v in self.model_results.values()]
category_failures = {}
for model_failure in model_failures:
for key, value in model_failure.items():
if key not in category_failures:
category_failures[key] = dict(value)
else:
category_failures[key]['unclassified'] += value['unclassified']
category_failures[key]['single'] += value['single']
category_failures[key]['multi'] += value['multi']
individual_reports = []
for key, value in category_failures.items():
device_report = self.get_device_report(value)
if sum(value.values()):
if device_report:
individual_reports.append(f'{device_report}{key}')
else:
individual_reports.append(key)
header = 'Single | Multi | Category\n'
category_failures_report = prepare_reports(title='The following categories had failures', header=header, reports=individual_reports)
return {'type': 'section', 'text': {'type': 'mrkdwn', 'text': category_failures_report}}
def compute_diff_for_failure_reports(self, curr_failure_report, prev_failure_report):
model_failures = curr_failure_report.split('\n')[3:-2]
prev_model_failures = prev_failure_report.split('\n')[3:-2]
entries_changed = set(model_failures).difference(prev_model_failures)
prev_map = {}
for f in prev_model_failures:
items = [x.strip() for x in f.split('| ')]
prev_map[items[-1]] = [int(x) for x in items[:-1]]
curr_map = {}
for f in entries_changed:
items = [x.strip() for x in f.split('| ')]
curr_map[items[-1]] = [int(x) for x in items[:-1]]
diff_map = {}
for k, v in curr_map.items():
if k not in prev_map:
diff_map[k] = v
else:
diff = [x - y for x, y in zip(v, prev_map[k])]
if max(diff) > 0:
diff_map[k] = diff
entries_changed = []
for model_name, diff_values in diff_map.items():
diff = [str(x) for x in diff_values]
diff = [f'+{x}' if x != '0' and (not x.startswith('-')) else x for x in diff]
diff = [x.rjust(9) for x in diff]
device_report = ' | '.join(diff) + ' | '
report = f'{device_report}{model_name}'
entries_changed.append(report)
entries_changed = sorted(entries_changed, key=lambda s: s.split('| ')[-1])
return entries_changed
@property
def model_failures(self) -> list[dict]:
def per_model_sum(model_category_dict):
return dicts_to_sum(model_category_dict['failed'].values())
failures = {}
non_model_failures = {k: per_model_sum(v) for k, v in self.model_results.items() if sum(per_model_sum(v).values())}
for k, v in self.model_results.items():
k = k.replace('models_', '').replace('quantization_', '')
if k in NON_MODEL_TEST_MODULES:
continue
if sum(per_model_sum(v).values()):
dict_failed = dict(v['failed'])
if job_name == 'run_models_gpu':
pytorch_specific_failures = dict_failed.pop('PyTorch')
other_failures = dicts_to_sum(dict_failed.values())
failures[k] = {'PyTorch': pytorch_specific_failures, 'other': other_failures}
else:
test_name = job_to_test_map[job_name]
specific_failures = dict_failed.pop(test_name)
failures[k] = {test_name: specific_failures}
model_reports = []
other_module_reports = []
for key, value in non_model_failures.items():
key = key.replace('models_', '').replace('quantization_', '')
if key in NON_MODEL_TEST_MODULES:
device_report = self.get_device_report(value)
if sum(value.values()):
if device_report:
report = f'{device_report}{key}'
else:
report = key
other_module_reports.append(report)
for key, value in failures.items():
if job_name == 'run_models_gpu':
device_report_values = [value['PyTorch']['single'], value['PyTorch']['multi'], sum(value['other'].values())]
else:
test_name = job_to_test_map[job_name]
device_report_values = [value[test_name]['single'], value[test_name]['multi']]
if sum(device_report_values):
rjust_width = 9 if job_name == 'run_models_gpu' else 6
device_report = ' | '.join([str(x).rjust(rjust_width) for x in device_report_values]) + ' | '
report = f'{device_report}{key}'
model_reports.append(report)
if job_name == 'run_models_gpu':
model_header = 'Single PT | Multi PT | Other | Category\n'
else:
model_header = 'Single | Multi | Category\n'
label = test_to_result_name[job_to_test_map[job_name]]
sorted_model_reports = sorted(model_reports, key=lambda s: s.split('| ')[-1])
model_failures_report = prepare_reports(title=f'These following {label} modules had failures', header=model_header, reports=sorted_model_reports)
module_header = 'Single | Multi | Category\n'
sorted_module_reports = sorted(other_module_reports, key=lambda s: s.split('| ')[-1])
module_failures_report = prepare_reports(title=f'The following {label} modules had failures', header=module_header, reports=sorted_module_reports)
model_failure_sections = [{'type': 'section', 'text': {'type': 'mrkdwn', 'text': model_failures_report}}]
model_failure_sections.append({'type': 'section', 'text': {'type': 'mrkdwn', 'text': module_failures_report}})
model_failures_report = prepare_reports(title=f'These following {label} modules had failures', header=model_header, reports=sorted_model_reports, to_truncate=False)
file_path = os.path.join(os.getcwd(), f'ci_results_{job_name}/model_failures_report.txt')
with open(file_path, 'w', encoding='UTF-8') as fp:
fp.write(model_failures_report)
module_failures_report = prepare_reports(title=f'The following {label} modules had failures', header=module_header, reports=sorted_module_reports, to_truncate=False)
file_path = os.path.join(os.getcwd(), f'ci_results_{job_name}/module_failures_report.txt')
with open(file_path, 'w', encoding='UTF-8') as fp:
fp.write(module_failures_report)
if self.prev_ci_artifacts is not None:
if f'ci_results_{job_name}' in self.prev_ci_artifacts and 'model_failures_report.txt' in self.prev_ci_artifacts[f'ci_results_{job_name}']:
prev_model_failures = self.prev_ci_artifacts[f'ci_results_{job_name}']['model_failures_report.txt']
entries_changed = self.compute_diff_for_failure_reports(model_failures_report, prev_model_failures)
if len(entries_changed) > 0:
diff_report = prepare_reports(title='Changed model modules failures', header=model_header, reports=entries_changed, to_truncate=False)
file_path = os.path.join(os.getcwd(), f'ci_results_{job_name}/changed_model_failures_report.txt')
with open(file_path, 'w', encoding='UTF-8') as fp:
fp.write(diff_report)
diff_report = prepare_reports(title='*Changed model modules failures*', header=model_header, reports=entries_changed)
model_failure_sections.append({'type': 'section', 'text': {'type': 'mrkdwn', 'text': diff_report}})
return model_failure_sections
@property
def additional_failures(self) -> dict:
failures = {k: v['failed'] for k, v in self.additional_results.items()}
errors = {k: v['error'] for k, v in self.additional_results.items()}
individual_reports = []
for key, value in failures.items():
device_report = self.get_device_report(value)
if sum(value.values()) or errors[key]:
report = f'{key}'
if errors[key]:
report = f'[Errored out] {report}'
if device_report:
report = f'{device_report}{report}'
individual_reports.append(report)
header = 'Single | Multi | Category\n'
failures_report = prepare_reports(title='The following non-modeling tests had failures', header=header, reports=individual_reports)
return {'type': 'section', 'text': {'type': 'mrkdwn', 'text': failures_report}}
@property
def payload(self) -> str:
blocks = [self.header]
if self.ci_title:
blocks.append(self.ci_title_section)
if self.n_model_failures > 0 or self.n_additional_failures > 0 or self.n_jobs_errored_out > 0:
blocks.append(self.failures)
if self.n_model_failures > 0:
block = self.category_failures
if block['text']['text']:
blocks.append(block)
for block in self.model_failures:
if block['text']['text']:
blocks.append(block)
if self.n_additional_failures > 0:
blocks.append(self.additional_failures)
if self.n_model_failures == 0 and self.n_additional_failures == 0:
blocks.append(self.no_failures)
if len(self.selected_warnings) > 0:
blocks.append(self.warnings)
new_failure_blocks = []
for idx, (prev_workflow_run_id, prev_ci_artifacts) in enumerate([self.prev_ci_artifacts] + self.other_ci_artifacts):
if idx == 0:
new_failure_blocks = self.get_new_model_failure_blocks(prev_ci_artifacts=prev_ci_artifacts, with_header=False)
extra_blocks = self.get_new_model_failure_blocks(prev_ci_artifacts=prev_ci_artifacts, to_truncate=False)
if extra_blocks:
filename = 'new_failures'
if idx > 0:
filename = f'{filename}_against_{prev_workflow_run_id}'
failure_text = extra_blocks[-1]['text']['text']
file_path = os.path.join(os.getcwd(), f'ci_results_{job_name}/{filename}.txt')
with open(file_path, 'w', encoding='UTF-8') as fp:
fp.write(failure_text)
file_path = os.path.join(os.getcwd(), f'ci_results_{job_name}/{filename}.txt')
_ = api.upload_file(path_or_fileobj=file_path, path_in_repo=f'{report_repo_folder}/ci_results_{job_name}/{filename}.txt', repo_id=report_repo_id, repo_type='dataset', token=os.environ.get('TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN', None))
new_failed_tests = {}
nb_new_failed_tests = 0
for line in failure_text.split():
if 'https://github.com/huggingface/transformers/actions/runs' in line:
pattern = '<(https://github.com/huggingface/transformers/actions/runs/.+?/job/.+?)\\|(.+?)>'
items = re.findall(pattern, line)
elif 'tests/' in line:
if 'tests/models/' in line or ('tests/quantization/' in line and job_name == 'run_quantization_torch_gpu'):
model = line.split('/')[2]
else:
model = line.split('/')[1]
if model not in new_failed_tests:
new_failed_tests[model] = {'single-gpu': [], 'multi-gpu': []}
for _, device in items:
new_failed_tests[model][f'{device}-gpu'].append(line)
nb_new_failed_tests += 1
file_path = os.path.join(os.getcwd(), f'ci_results_{job_name}/{filename}.json')
with open(file_path, 'w', encoding='UTF-8') as fp:
json.dump(new_failed_tests, fp, ensure_ascii=False, indent=4)
file_path = os.path.join(os.getcwd(), f'ci_results_{job_name}/{filename}.json')
commit_info = api.upload_file(path_or_fileobj=file_path, path_in_repo=f'{report_repo_folder}/ci_results_{job_name}/{filename}.json', repo_id=report_repo_id, repo_type='dataset', token=os.environ.get('TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN', None))
new_failures_url = f'https://huggingface.co/datasets/{report_repo_id}/raw/{commit_info.oid}/{report_repo_folder}/ci_results_{job_name}/{filename}.json'
if idx == 0:
block = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': f'*There are {nb_new_failed_tests} new failed tests*\n\n(compared to previous run: <https://github.com/huggingface/transformers/actions/runs/{prev_workflow_run_id}|{prev_workflow_run_id}>)'}, 'accessory': {'type': 'button', 'text': {'type': 'plain_text', 'text': 'Check new failures'}, 'url': new_failures_url}}
blocks.append(block)
else:
block = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': f"*There are {nb_new_failed_tests} failed tests unique to this run*\n\n(compared to{(' Nvidia CI ' if is_scheduled_ci_run else ' ')}run: <https://github.com/huggingface/transformers/actions/runs/{prev_workflow_run_id}|{prev_workflow_run_id}>)"}, 'accessory': {'type': 'button', 'text': {'type': 'plain_text', 'text': 'Check failures'}, 'url': new_failures_url}}
blocks.append(block)
if diff_file_url is not None:
block = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': f'*Test results diff*\n\n(compared to previous run: <https://github.com/huggingface/transformers/actions/runs/{prev_workflow_run_id}|{prev_workflow_run_id}>)'}, 'accessory': {'type': 'button', 'text': {'type': 'plain_text', 'text': 'Check test result diff file'}, 'url': diff_file_url}}
blocks.append(block)
if len(new_failure_blocks) > 0:
blocks.extend(new_failure_blocks)
return json.dumps(blocks)
@staticmethod
def error_out(title, ci_title='', runner_not_available=False, runner_failed=False, setup_failed=False):
blocks = []
title_block = {'type': 'header', 'text': {'type': 'plain_text', 'text': title}}
blocks.append(title_block)
if ci_title:
ci_title_block = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': ci_title}}
blocks.append(ci_title_block)
offline_runners = []
if runner_not_available:
text = '💔 CI runners are not available! Tests are not run. 😭'
result = os.environ.get('OFFLINE_RUNNERS')
if result is not None:
offline_runners = json.loads(result)
elif runner_failed:
text = '💔 CI runners have problems! Tests are not run. 😭'
elif setup_failed:
text = '💔 Setup job failed. Tests are not run. 😭'
else:
text = '💔 There was an issue running the tests. 😭'
error_block_1 = {'type': 'header', 'text': {'type': 'plain_text', 'text': text}}
text = ''
if len(offline_runners) > 0:
text = '\n • ' + '\n • '.join(offline_runners)
text = f'The following runners are offline:\n{text}\n\n'
text += "🙏 Let's fix it ASAP! 🙏"
error_block_2 = {'type': 'section', 'text': {'type': 'plain_text', 'text': text}, 'accessory': {'type': 'button', 'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True}, 'url': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}"}}
blocks.extend([error_block_1, error_block_2])
payload = json.dumps(blocks)
print('Sending the following payload')
print(json.dumps({'blocks': blocks}))
client.chat_postMessage(channel=SLACK_REPORT_CHANNEL_ID, text=text, blocks=payload)
def post(self):
payload = self.payload
print('Sending the following payload')
print(json.dumps({'blocks': json.loads(payload)}))
text = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
self.thread_ts = client.chat_postMessage(channel=SLACK_REPORT_CHANNEL_ID, blocks=payload, text=text)
def get_reply_blocks(self, job_name, job_result, failures, device, text):
"""
failures: A list with elements of the form {"line": full test name, "trace": error trace}
"""
MAX_ERROR_TEXT = 3000 - len('[Truncated]')
failure_text = ''
for idx, error in enumerate(failures):
new_text = failure_text + f"*{error['line']}*\n_{error['trace']}_\n\n"
if len(new_text) > MAX_ERROR_TEXT:
failure_text = failure_text + '[Truncated]'
break
failure_text = new_text
title = job_name
if device is not None:
title += f' ({device}-gpu)'
content = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_result['job_link'] is not None and job_result['job_link'][device] is not None:
content['accessory'] = {'type': 'button', 'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True}, 'url': job_result['job_link'][device]}
return [{'type': 'header', 'text': {'type': 'plain_text', 'text': title.upper(), 'emoji': True}}, content, {'type': 'section', 'text': {'type': 'mrkdwn', 'text': failure_text}}]
def get_new_model_failure_blocks(self, prev_ci_artifacts, with_header=True, to_truncate=True):
if prev_ci_artifacts is None:
return []
if len(self.model_results) > 0:
target_results = self.model_results
else:
target_results = self.additional_results[job_to_test_map[job_name]]
if 'failures' in target_results:
target_results = {job_name: target_results}
sorted_dict = sorted(target_results.items(), key=lambda t: t[0])
job = job_to_test_map[job_name]
prev_model_results = {}
if f'ci_results_{job_name}' in prev_ci_artifacts and f'{test_to_result_name[job]}_results.json' in prev_ci_artifacts[f'ci_results_{job_name}']:
prev_model_results = json.loads(prev_ci_artifacts[f'ci_results_{job_name}'][f'{test_to_result_name[job]}_results.json'])
if 'failures' in prev_model_results:
prev_model_results = {job_name: prev_model_results}
all_failure_lines = {}
for job, job_result in sorted_dict:
if len(job_result['failures']):
devices = sorted(job_result['failures'].keys(), reverse=True)
for device in devices:
failures = job_result['failures'][device]
prev_error_lines = {}
if job in prev_model_results and device in prev_model_results[job]['failures']:
prev_error_lines = {error['line'] for error in prev_model_results[job]['failures'][device]}
url = None
if job_result['job_link'] is not None and job_result['job_link'][device] is not None:
url = job_result['job_link'][device]
for idx, error in enumerate(failures):
if error['line'] in prev_error_lines:
continue
new_text = f"{error['line']}\n\n"
if new_text not in all_failure_lines:
all_failure_lines[new_text] = []
all_failure_lines[new_text].append(f'<{url}|{device}>' if url is not None else device)
MAX_ERROR_TEXT = 3000 - len('[Truncated]') - len('```New failures```\n\n')
if not to_truncate:
MAX_ERROR_TEXT = float('inf')
failure_text = ''
for line, devices in all_failure_lines.items():
new_text = failure_text + f"{'|'.join(devices)} gpu\n{line}"
if len(new_text) > MAX_ERROR_TEXT:
failure_text = failure_text + '[Truncated]'
break
failure_text = new_text
blocks = []
if failure_text:
if with_header:
blocks.append({'type': 'header', 'text': {'type': 'plain_text', 'text': 'New failures', 'emoji': True}})
else:
failure_text = f'{failure_text}'
blocks.append({'type': 'section', 'text': {'type': 'mrkdwn', 'text': failure_text}})
return blocks
def post_reply(self):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.')
sorted_dict = sorted(self.model_results.items(), key=lambda t: t[0])
for job, job_result in sorted_dict:
if len(job_result['failures']):
for device, failures in job_result['failures'].items():
text = '\n'.join(sorted([f'*{k}*: {v[device]}' for k, v in job_result['failed'].items() if v[device]]))
blocks = self.get_reply_blocks(job, job_result, failures, device, text=text)
print('Sending the following reply')
print(json.dumps({'blocks': blocks}))
client.chat_postMessage(channel=SLACK_REPORT_CHANNEL_ID, text=f'Results for {job}', blocks=blocks, thread_ts=self.thread_ts['ts'])
time.sleep(1)
for job, job_result in self.additional_results.items():
if len(job_result['failures']):
for device, failures in job_result['failures'].items():
blocks = self.get_reply_blocks(job, job_result, failures, device, text=f"Number of failures: {job_result['failed'][device]}")
print('Sending the following reply')
print(json.dumps({'blocks': blocks}))
client.chat_postMessage(channel=SLACK_REPORT_CHANNEL_ID, text=f'Results for {job}', blocks=blocks, thread_ts=self.thread_ts['ts'])
time.sleep(1)
|
class Message:
def __init__(self, title: str, ci_title: str, model_results: dict, additional_results: dict, selected_warnings: Optional[list]=None, prev_ci_artifacts=None, other_ci_artifacts=None):
pass
@property
def time(self) -> str:
pass
@property
def header(self) -> dict:
pass
@property
def ci_title_section(self) -> dict:
pass
@property
def no_failures(self) -> dict:
pass
@property
def failures(self) -> dict:
pass
@property
def warnings(self) -> dict:
pass
@staticmethod
def get_device_report(report, rjust=6):
pass
@property
def category_failures(self) -> dict:
pass
def compute_diff_for_failure_reports(self, curr_failure_report, prev_failure_report):
pass
@property
def model_failures(self) -> list[dict]:
pass
def per_model_sum(model_category_dict):
pass
@property
def additional_failures(self) -> dict:
pass
@property
def payload(self) -> str:
pass
@staticmethod
def error_out(title, ci_title='', runner_not_available=False, runner_failed=False, setup_failed=False):
pass
def post(self):
pass
def get_reply_blocks(self, job_name, job_result, failures, device, text):
'''
failures: A list with elements of the form {"line": full test name, "trace": error trace}
'''
pass
def get_new_model_failure_blocks(self, prev_ci_artifacts, with_header=True, to_truncate=True):
pass
def post_reply(self):
pass
| 32
| 1
| 37
| 5
| 29
| 2
| 6
| 0.07
| 0
| 9
| 0
| 1
| 16
| 20
| 18
| 18
| 722
| 121
| 564
| 178
| 524
| 38
| 355
| 156
| 335
| 17
| 0
| 5
| 107
|
90
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/benchmarking/plot_csv_file.py
|
plot_csv_file.Plot
|
import matplotlib.pyplot as plt
import numpy as np
import csv
from matplotlib.ticker import ScalarFormatter
from collections import defaultdict
class Plot:
def __init__(self, args):
self.args = args
self.result_dict = defaultdict(lambda: {'bsz': [], 'seq_len': [], 'result': {}})
with open(self.args.csv_file, newline='') as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
model_name = row['model']
self.result_dict[model_name]['bsz'].append(int(row['batch_size']))
self.result_dict[model_name]['seq_len'].append(int(row['sequence_length']))
if can_convert_to_int(row['result']):
self.result_dict[model_name]['result'][int(row['batch_size']), int(row['sequence_length'])] = int(row['result'])
elif can_convert_to_float(row['result']):
self.result_dict[model_name]['result'][int(row['batch_size']), int(row['sequence_length'])] = float(row['result'])
def plot(self):
fig, ax = plt.subplots()
title_str = 'Time usage' if self.args.is_time else 'Memory usage'
title_str = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
ax.set_xscale('log')
ax.set_yscale('log')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
for model_name_idx, model_name in enumerate(self.result_dict.keys()):
batch_sizes = sorted(set(self.result_dict[model_name]['bsz']))
sequence_lengths = sorted(set(self.result_dict[model_name]['seq_len']))
results = self.result_dict[model_name]['result']
x_axis_array, inner_loop_array = (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
label_model_name = model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
y_axis_array = np.asarray([results[x, inner_loop_value] for x in x_axis_array if (x, inner_loop_value) in results], dtype=int)
else:
y_axis_array = np.asarray([results[inner_loop_value, x] for x in x_axis_array if (inner_loop_value, x) in results], dtype=np.float32)
x_axis_label, inner_loop_label = ('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
x_axis_array = np.asarray(x_axis_array, int)[:len(y_axis_array)]
plt.scatter(x_axis_array, y_axis_array, label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}')
plt.plot(x_axis_array, y_axis_array, '--')
title_str += f' {label_model_name} vs.'
title_str = title_str[:-4]
y_axis_label = 'Time in s' if self.args.is_time else 'Memory in MB'
plt.title(title_str)
plt.xlabel(x_axis_label)
plt.ylabel(y_axis_label)
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file)
else:
plt.show()
|
class Plot:
def __init__(self, args):
pass
def plot(self):
pass
| 3
| 0
| 42
| 7
| 33
| 3
| 9
| 0.07
| 0
| 5
| 0
| 0
| 2
| 2
| 2
| 2
| 85
| 14
| 67
| 22
| 64
| 5
| 46
| 21
| 43
| 13
| 0
| 3
| 17
|
91
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/benchmarking/plot_csv_file.py
|
plot_csv_file.PlotArguments
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class PlotArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
csv_file: str = field(metadata={'help': 'The csv file to plot.'})
plot_along_batch: bool = field(default=False, metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'})
is_time: bool = field(default=False, metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'})
no_log_scale: bool = field(default=False, metadata={'help': 'Disable logarithmic scale when plotting'})
is_train: bool = field(default=False, metadata={'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'})
figure_png_file: Optional[str] = field(default=None, metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'})
short_model_names: Optional[list[str]] = list_field(default=None, metadata={'help': 'List of model names that are used instead of the ones in the csv file.'})
|
@dataclass
class PlotArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 1
| 29
| 8
| 28
| 3
| 8
| 8
| 7
| 0
| 0
| 0
| 0
|
92
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/audio-classification/run_audio_classification.py
|
run_audio_classification.ModelArguments
|
from dataclasses import dataclass, field
import warnings
from typing import Optional
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(default='facebook/wav2vec2-base', metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
feature_extractor_name: Optional[str] = field(default=None, metadata={'help': 'Name or path of preprocessor config.'})
freeze_feature_encoder: bool = field(default=True, metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
attention_mask: bool = field(default=True, metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`).'})
trust_remote_code: bool = field(default=False, metadata={'help': 'Whether to trust the execution of code from datasets/models defined on the Hub. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.'})
freeze_feature_extractor: Optional[bool] = field(default=None, metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
ignore_mismatched_sizes: bool = field(default=False, metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'})
def __post_init__(self):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn('The argument `--freeze_feature_extractor` is deprecated and will be removed in a future version. Use `--freeze_feature_encoder` instead. Setting `freeze_feature_encoder==True`.', FutureWarning)
if self.freeze_feature_extractor and (not self.freeze_feature_encoder):
raise ValueError('The argument `--freeze_feature_extractor` is deprecated and should not be used in combination with `--freeze_feature_encoder`. Only make use of `--freeze_feature_encoder`.')
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
'''
def __post_init__(self):
pass
| 3
| 1
| 14
| 0
| 14
| 0
| 3
| 0.05
| 0
| 2
| 0
| 0
| 1
| 0
| 1
| 1
| 69
| 2
| 64
| 13
| 62
| 3
| 17
| 13
| 15
| 3
| 0
| 1
| 3
|
93
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/text-classification/run_classification.py
|
run_classification.DataTrainingArguments
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
do_regression: bool = field(default=None, metadata={'help': 'Whether to do regression instead of classification. If None, will be inferred from the dataset.'})
text_column_names: Optional[str] = field(default=None, metadata={'help': 'The name of the text column in the input dataset or a CSV/JSON file. If not specified, will use the "sentence" column for single/multi-label classification task.'})
text_column_delimiter: Optional[str] = field(default=' ', metadata={'help': 'The delimiter to use to join text columns into a single sentence.'})
train_split_name: Optional[str] = field(default=None, metadata={'help': 'The name of the train split in the input dataset. If not specified, will use the "train" split when do_train is enabled'})
validation_split_name: Optional[str] = field(default=None, metadata={'help': 'The name of the validation split in the input dataset. If not specified, will use the "validation" split when do_eval is enabled'})
test_split_name: Optional[str] = field(default=None, metadata={'help': 'The name of the test split in the input dataset. If not specified, will use the "test" split when do_predict is enabled'})
remove_splits: Optional[str] = field(default=None, metadata={'help': 'The splits to remove from the dataset. Multiple splits should be separated by commas.'})
remove_columns: Optional[str] = field(default=None, metadata={'help': 'The columns to remove from the dataset. Multiple columns should be separated by commas.'})
label_column_name: Optional[str] = field(default=None, metadata={'help': 'The name of the label column in the input dataset or a CSV/JSON file. If not specified, will use the "label" column for single/multi-label classification task'})
max_seq_length: int = field(default=128, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
pad_to_max_length: bool = field(default=True, metadata={'help': 'Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when batching to the maximum length in the batch.'})
shuffle_train_dataset: bool = field(default=False, metadata={'help': 'Whether to shuffle the train dataset or not.'})
shuffle_seed: int = field(default=42, metadata={'help': 'Random seed that will be used to shuffle the train dataset.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
max_predict_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of prediction examples to this value if set.'})
metric_name: Optional[str] = field(default=None, metadata={'help': 'The metric to use for evaluation.'})
train_file: Optional[str] = field(default=None, metadata={'help': 'A csv or a json file containing the training data.'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'A csv or a json file containing the validation data.'})
test_file: Optional[str] = field(default=None, metadata={'help': 'A csv or a json file containing the test data.'})
def __post_init__(self):
if self.dataset_name is None:
if self.train_file is None or self.validation_file is None:
raise ValueError(' training/validation file or a dataset name.')
train_extension = self.train_file.split('.')[-1]
assert train_extension in ['csv', 'json'], '`train_file` should be a csv or a json file.'
validation_extension = self.validation_file.split('.')[-1]
assert validation_extension == train_extension, '`validation_file` should have the same extension (csv or json) as `train_file`.'
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
'''
def __post_init__(self):
pass
| 3
| 1
| 11
| 1
| 10
| 0
| 3
| 0.04
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 146
| 4
| 136
| 28
| 134
| 6
| 33
| 28
| 31
| 3
| 0
| 2
| 3
|
94
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/contrastive-image-text/run_clip.py
|
run_clip.DataTrainingArguments
|
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
data_dir: Optional[str] = field(default=None, metadata={'help': 'The data directory containing input files.'})
image_column: Optional[str] = field(default='image_path', metadata={'help': 'The name of the column in the datasets containing the full image file paths.'})
caption_column: Optional[str] = field(default='caption', metadata={'help': 'The name of the column in the datasets containing the image captions.'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a jsonlines file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file (a jsonlines file).'})
max_seq_length: Optional[int] = field(default=128, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and (self.validation_file is None):
raise ValueError('Need either a dataset name or a training/validation file.')
else:
if self.train_file is not None:
extension = self.train_file.split('.')[-1]
assert extension in ['csv', 'json'], '`train_file` should be a csv or a json file.'
if self.validation_file is not None:
extension = self.validation_file.split('.')[-1]
assert extension in ['csv', 'json'], '`validation_file` should be a csv or a json file.'
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
'''
def __post_init__(self):
pass
| 3
| 1
| 10
| 0
| 10
| 0
| 4
| 0.04
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 72
| 2
| 67
| 15
| 65
| 3
| 22
| 15
| 20
| 4
| 0
| 2
| 4
|
95
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/contrastive-image-text/run_clip.py
|
run_clip.ModelArguments
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
image_processor_name: str = field(default=None, metadata={'help': 'Name or path of preprocessor config.'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`).'})
trust_remote_code: bool = field(default=False, metadata={'help': 'Whether to trust the execution of code from datasets/models defined on the Hub. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.'})
freeze_vision_model: bool = field(default=False, metadata={'help': 'Whether to freeze the vision model parameters or not.'})
freeze_text_model: bool = field(default=False, metadata={'help': 'Whether to freeze the text model parameters or not.'})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.06
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 51
| 1
| 47
| 12
| 46
| 3
| 12
| 12
| 11
| 0
| 0
| 0
| 0
|
96
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/contrastive-image-text/run_clip.py
|
run_clip.Transform
|
import torch
from torchvision.transforms.functional import InterpolationMode
from torchvision.transforms import CenterCrop, ConvertImageDtype, Normalize, Resize
class Transform(torch.nn.Module):
def __init__(self, image_size, mean, std):
super().__init__()
self.transforms = torch.nn.Sequential(Resize([image_size], interpolation=InterpolationMode.BICUBIC), CenterCrop(image_size), ConvertImageDtype(torch.float), Normalize(mean, std))
def forward(self, x) -> torch.Tensor:
"""`x` should be an instance of `PIL.Image.Image`"""
with torch.no_grad():
x = self.transforms(x)
return x
|
class Transform(torch.nn.Module):
def __init__(self, image_size, mean, std):
pass
def forward(self, x) -> torch.Tensor:
'''`x` should be an instance of `PIL.Image.Image`'''
pass
| 3
| 1
| 7
| 0
| 6
| 1
| 1
| 0.08
| 1
| 3
| 0
| 0
| 2
| 1
| 2
| 12
| 15
| 1
| 13
| 4
| 10
| 1
| 8
| 4
| 5
| 1
| 1
| 1
| 2
|
97
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/language-modeling/run_clm.py
|
run_clm.DataTrainingArguments
|
from dataclasses import dataclass, field
from transformers.utils.versions import require_version
from typing import Optional
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a text file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
streaming: bool = field(default=False, metadata={'help': 'Enable streaming mode'})
block_size: Optional[int] = field(default=None, metadata={'help': 'Optional input sequence length after tokenization. The training dataset will be truncated in block of this size for training. Default to the model max input length for single sentence inputs (take into account special tokens).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
validation_split_percentage: Optional[int] = field(default=5, metadata={'help': "The percentage of the train set used as validation set in case there's no validation split"})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
keep_linebreaks: bool = field(default=True, metadata={'help': 'Whether to keep line breaks when using TXT files or not.'})
def __post_init__(self):
if self.streaming:
require_version('datasets>=2.0.0', 'The streaming feature requires `datasets>=2.0.0`')
if self.dataset_name is None and self.train_file is None and (self.validation_file is None):
raise ValueError('Need either a dataset name or a training/validation file.')
else:
if self.train_file is not None:
extension = self.train_file.split('.')[-1]
assert extension in ['csv', 'json', 'txt'], '`train_file` should be a csv, a json or a txt file.'
if self.validation_file is not None:
extension = self.validation_file.split('.')[-1]
assert extension in ['csv', 'json', 'txt'], '`validation_file` should be a csv, a json or a txt file.'
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
'''
def __post_init__(self):
pass
| 3
| 1
| 13
| 1
| 12
| 0
| 5
| 0.04
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 75
| 3
| 69
| 15
| 67
| 3
| 24
| 15
| 22
| 5
| 0
| 2
| 5
|
98
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/language-modeling/run_clm.py
|
run_clm.ModelArguments
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(default=None, metadata={'help': "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch."})
model_type: Optional[str] = field(default=None, metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(MODEL_TYPES)})
config_overrides: Optional[str] = field(default=None, metadata={'help': 'Override some existing default config settings when a model is trained from scratch. Example: n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`).'})
trust_remote_code: bool = field(default=False, metadata={'help': 'Whether to trust the execution of code from datasets/models defined on the Hub. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.'})
dtype: Optional[str] = field(default=None, metadata={'help': "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the dtype will be automatically derived from the model's weights.", 'choices': ['auto', 'bfloat16', 'float16', 'float32']})
def __post_init__(self):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError("--config_overrides can't be used in combination with --config_name or --model_name_or_path")
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
'''
def __post_init__(self):
pass
| 3
| 1
| 5
| 0
| 5
| 0
| 2
| 0.04
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 88
| 2
| 83
| 14
| 81
| 3
| 16
| 14
| 14
| 2
| 0
| 1
| 2
|
99
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/language-modeling/run_fim.py
|
run_fim.DataTrainingArguments
|
from transformers.utils.versions import require_version
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a text file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
streaming: bool = field(default=False, metadata={'help': 'Enable streaming mode'})
block_size: Optional[int] = field(default=None, metadata={'help': 'Optional input sequence length after tokenization. The training dataset will be truncated in block of this size for training. Default to the model max input length for single sentence inputs (take into account special tokens).'})
fim_rate: Optional[float] = field(default=0.5, metadata={'help': 'Optional probability with which the FIM transformation is applied to the example. Default is 0.5. A rate of 1.0 means every example will undergo FIM transformation, while a rate of 0.0 means no example will.'})
fim_spm_rate: Optional[float] = field(default=0.5, metadata={'help': 'Within the examples undergoing FIM transformation, this rate determines the probability of applying the Sentence Permutation Mode (SPM). Default is 0.5. A rate of 1.0 means all FIM transformations will use SPM, while a rate of 0.0 means none will.'})
truncate_or_pad: Optional[bool] = field(default=True, metadata={'help': 'Indicates whether the transformed example should be truncated or padded to maintain the same length as the original example. Default is True. If False, the function will not truncate or pad the examples.'})
fim_prefix_token: Optional[str] = field(default='<fim_prefix>', metadata={'help': "Fill-in-Middle Prefix token. Defaults to '<fim_prefix>'."})
fim_middle_token: Optional[str] = field(default='<fim_middle>', metadata={'help': "Fill-in-Middle Middle token. Defaults to '<fim_middle>'."})
fim_suffix_token: Optional[str] = field(default='<fim_suffix>', metadata={'help': "Fill-in-Middle Suffix token. Defaults to '<fim_suffix>'."})
pad_token: Optional[str] = field(default='<fim_pad>', metadata={'help': "Fill-in-Middle Pad token. Used only when 'truncate_or_pad' is set to True. Defaults to '<fim_pad>'."})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
validation_split_percentage: Optional[int] = field(default=5, metadata={'help': "The percentage of the train set used as validation set in case there's no validation split"})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
keep_linebreaks: bool = field(default=True, metadata={'help': 'Whether to keep line breaks when using TXT files or not.'})
def __post_init__(self):
if self.streaming:
require_version('datasets>=2.0.0', 'The streaming feature requires `datasets>=2.0.0`')
if self.dataset_name is None and self.train_file is None and (self.validation_file is None):
raise ValueError('Need either a dataset name or a training/validation file.')
else:
if self.train_file is not None:
extension = self.train_file.split('.')[-1]
assert extension in ['csv', 'json', 'txt'], '`train_file` should be a csv, a json or a txt file.'
if self.validation_file is not None:
extension = self.validation_file.split('.')[-1]
assert extension in ['csv', 'json', 'txt'], '`validation_file` should be a csv, a json or a txt file.'
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
'''
def __post_init__(self):
pass
| 3
| 1
| 13
| 1
| 12
| 0
| 5
| 0.02
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 127
| 3
| 121
| 22
| 119
| 3
| 31
| 22
| 29
| 5
| 0
| 2
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.