text stringlengths 1 1.02k | class_index int64 0 10.8k | source stringlengths 85 188 |
|---|---|---|
class WarmUp(schedules.LearningRateSchedule):
"""
Applies a warmup schedule on a given learning rate decay schedule.
Args:
initial_learning_rate (`float`):
The initial learning rate for the schedule after the warmup (so this will be the learning rate at the end
of the warmup... | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization_tf.py |
def __init__(
self,
initial_learning_rate: float,
decay_schedule_fn: Callable,
warmup_steps: int,
power: float = 1.0,
name: str = None,
):
super().__init__()
self.initial_learning_rate = initial_learning_rate
self.warmup_steps = warmup_steps
... | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization_tf.py |
def __call__(self, step):
with tf.name_scope(self.name or "WarmUp") as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
global_step_float = tf.cast(step, tf.float32)
wa... | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization_tf.py |
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
} | 0 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization_tf.py |
class AdamWeightDecay(Adam):
"""
Adam enables L2 weight decay and clip_by_global_norm on gradients. Just adding the square of the weights to the
loss function is *not* the correct way of using L2 regularization/weight decay with Adam, since that will interact
with the m and v parameters in strange ways ... | 1 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization_tf.py |
Args:
learning_rate (`Union[float, LearningRateSchedule]`, *optional*, defaults to 0.001):
The learning rate to use or a schedule.
beta_1 (`float`, *optional*, defaults to 0.9):
The beta1 parameter in Adam, which is the exponential decay rate for the 1st momentum estimates.
... | 1 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization_tf.py |
List of the parameter names (or re patterns) to apply weight decay to. If none is passed, weight decay is
applied to all parameters by default (unless they are in `exclude_from_weight_decay`).
exclude_from_weight_decay (`List[str]`, *optional*):
List of the parameter names (or re pattern... | 1 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization_tf.py |
`learning_rate` instead.
""" | 1 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization_tf.py |
def __init__(
self,
learning_rate: Union[float, schedules.LearningRateSchedule] = 0.001,
beta_1: float = 0.9,
beta_2: float = 0.999,
epsilon: float = 1e-7,
amsgrad: bool = False,
weight_decay_rate: float = 0.0,
include_in_weight_decay: Optional[List[str]] ... | 1 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization_tf.py |
def _prepare_local(self, var_device, var_dtype, apply_state):
super(AdamWeightDecay, self)._prepare_local(var_device, var_dtype, apply_state)
apply_state[(var_device, var_dtype)]["weight_decay_rate"] = tf.constant(
self.weight_decay_rate, name="adam_weight_decay_rate"
)
def _dec... | 1 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization_tf.py |
def _get_lr(self, var_device, var_dtype, apply_state):
"""Retrieves the learning rate with the given state."""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
apply_state = apply_state or {}
coefficients = apply_state.get((var_device, var_dtype))
if ... | 1 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization_tf.py |
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
return super(AdamWeightDecay, self)._resour... | 1 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization_tf.py |
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True | 1 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization_tf.py |
class GradientAccumulator:
"""
Gradient accumulation utility. When used with a distribution strategy, the accumulator should be called in a
replica context. Gradients will be accumulated locally on each replica and without synchronization. Users should
then call `.gradients`, scale the gradients if requ... | 2 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization_tf.py |
@property
def step(self):
"""Number of accumulated steps."""
if self._accum_steps is None:
self._accum_steps = tf.Variable(
tf.constant(0, dtype=tf.int64),
trainable=False,
synchronization=tf.VariableSynchronization.ON_READ,
... | 2 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization_tf.py |
def __call__(self, gradients):
"""Accumulates `gradients` on the current replica."""
if not self._gradients:
_ = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(gradient),
... | 2 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization_tf.py |
for accum_gradient, gradient in zip(self._gradients, gradients):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(gradient)
self._accum_steps.assign_add(1)
def reset(self):
"""Resets the accumulated gradients on the current replica."""
... | 2 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/optimization_tf.py |
class BaseModelOutput(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the mode... | 3 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shap... | 3 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
class BaseModelOutputWithNoAttention(ModelOutput):
"""
Base class for model's outputs, with potential hidden states.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Sequence of hidden-states at the output of the last layer of the model... | 4 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
class BaseModelOutputWithPooling(ModelOutput):
"""
Base class for model's outputs that also contains a pooling of the last hidden states. | 5 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Last layer hidden-state of the fi... | 5 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. | 5 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shap... | 5 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
class BaseModelOutputWithPoolingAndNoAttention(ModelOutput):
"""
Base class for model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Sequence of hidden-states at the ou... | 6 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
"""
last_hidden_state: torch.FloatTensor = None
pooler_output: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None | 6 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
class BaseModelOutputWithPast(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the... | 7 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.Flo... | 7 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
input) to speed up sequential decoding.
hidden_states (`tuple(torch.FloatTensor)`, *o... | 7 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shap... | 7 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
class BaseModelOutputWithCrossAttentions(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the las... | 8 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shap... | 8 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
"""
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple... | 8 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
class BaseModelOutputWithPoolingAndCrossAttentions(ModelOutput):
"""
Base class for model's outputs that also contains a pooling of the last hidden states. | 9 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Last layer hidden-state of the fi... | 9 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. | 9 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shap... | 9 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
... | 9 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
last_hidden_state: torch.FloatTensor = None
pooler_output: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
cross_attentions: Optional[Tu... | 9 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
class BaseModelOutputWithPastAndCrossAttentions(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hi... | 10 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.Flo... | 10 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
input) to speed up sequential decoding.
hidden_states (`tuple(torch.FloatTensor)`, *o... | 10 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shap... | 10 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
"""
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optiona... | 10 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
class MoECausalLMOutputWithPast(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs as well as Mixture of Expert's router hidden
states terms, to train a MoE model.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
... | 11 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_s... | 11 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
z_loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided):
z_loss for the sparse modules.
aux_loss (`torch.FloatTensor`, *optional*, returned when ... | 11 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
z_loss: torch.FloatTensor = None
au... | 11 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
class MoEModelOutput(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model... | 12 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shap... | 12 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Raw router probabilities that are computed by MoE routers, these terms are used to compute the auxiliary
loss and the z_loss for Mixture of Experts models.
"""
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tupl... | 12 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
class MoeModelOutputWithPast(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of t... | 13 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
input) to speed up sequential decoding.
hidden_states (`tuple(torch.FloatTensor)`, *o... | 13 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shap... | 13 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Raw router logtis (post-softmax) that are computed by MoE routers, these terms are used to compute the auxiliary
loss for Mixture of Experts models.
"""
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tupl... | 13 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
class MoeCausalLMOutputWithPast(ModelOutput):
"""
Base class for causal language model (or autoregressive) with mixture of experts outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction... | 14 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Raw router logtis (post-softmax) that are computed by MoE routers, these terms are used to compute the auxiliary
loss for Mixture of Experts models.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
... | 14 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_s... | 14 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
aux_loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatT... | 14 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
class MoEModelOutputWithPastAndCrossAttentions(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding) as well as
Mixture of Expert's router hidden states terms, to train a MoE model.
Args:
last_hidden_state (`torch.FloatTensor`... | 15 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.Flo... | 15 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
input) to speed up sequential decoding.
hidden_states (`tuple(torch.FloatTensor)`, *o... | 15 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shap... | 15 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
router_probs (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or wh... | 15 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
rout... | 15 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
class Seq2SeqModelOutput(ModelOutput):
"""
Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
decoding.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of h... | 16 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`... | 16 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs.
decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each laye... | 16 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden... | 16 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs.
encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each laye... | 16 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor, ...]]... | 16 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
class Seq2SeqMoEModelOutput(ModelOutput):
"""
Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
decoding.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence o... | 17 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`... | 17 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs.
decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each laye... | 17 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Router logits of the decoder model, useful to compute the auxiliary loss for Mixture of Experts models.
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each laye... | 17 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden... | 17 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs.
encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each laye... | 17 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
decoder_router_logits: Optional[Tuple[torch.FloatTensor]]... | 17 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
class CausalLMOutput(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTens... | 18 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shap... | 18 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
class CausalLMOutputWithPast(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.F... | 19 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_s... | 19 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple... | 19 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
class CausalLMOutputWithCrossAttentions(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logit... | 20 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shap... | 20 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Cross attentions weights after the attention softmax, used to compute the weighted average in the
cross-attention heads.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `torch.FloatTensor`... | 20 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
cross_attentions: Optional[Tuple[torch.... | 20 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
class SequenceClassifierOutputWithPast(ModelOutput):
"""
Base class for outputs of sentence classification models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logit... | 21 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_s... | 21 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple... | 21 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
class MaskedLMOutput(ModelOutput):
"""
Base class for masked language models outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Masked language modeling (MLM) loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequenc... | 22 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shap... | 22 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
class Seq2SeqLMOutput(ModelOutput):
"""
Base class for sequence-to-sequence language models outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss.
logits (`torch.FloatTensor` of shape `(batch_size, seque... | 23 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`... | 23 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of ... | 23 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden... | 23 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of sha... | 23 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
cross_attentions: Optio... | 23 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
class Seq2SeqMoEOutput(ModelOutput):
"""
Base class for sequence-to-sequence language models outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequ... | 24 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`... | 24 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_outputs.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.