跳转至

bert

mindnlp.transformers.models.bert.configuration_bert.BertConfig

Bases: PretrainedConfig

Configuration for BERT-base

Source code in mindnlp\transformers\models\bert\configuration_bert.py
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
class BertConfig(PretrainedConfig):
    """
    Configuration for BERT-base
    """
    model_type = "bert"

    def __init__(
        self,
        vocab_size=30522,
        hidden_size=768,
        num_hidden_layers=12,
        num_attention_heads=12,
        intermediate_size=3072,
        hidden_act="gelu",
        hidden_dropout_prob=0.1,
        attention_probs_dropout_prob=0.1,
        max_position_embeddings=512,
        type_vocab_size=2,
        initializer_range=0.02,
        layer_norm_eps=1e-12,
        pad_token_id=0,
        position_embedding_type="absolute",
        use_cache=True,
        classifier_dropout=None,
        **kwargs,
    ):
        """
        Initialize a BertConfig object with the specified parameters.

        Args:
            self (object): The object instance.
            vocab_size (int): The size of the vocabulary. Defaults to 30522.
            hidden_size (int): The size of the hidden layers. Defaults to 768.
            num_hidden_layers (int): The number of hidden layers. Defaults to 12.
            num_attention_heads (int): The number of attention heads. Defaults to 12.
            intermediate_size (int): The size of the intermediate layer in the transformer encoder. Defaults to 3072.
            hidden_act (str): The activation function for the hidden layers. Defaults to 'gelu'.
            hidden_dropout_prob (float): The dropout probability for the hidden layers. Defaults to 0.1.
            attention_probs_dropout_prob (float): The dropout probability for the attention probabilities. Defaults to 0.1.
            max_position_embeddings (int): The maximum position index. Defaults to 512.
            type_vocab_size (int): The size of the type vocabulary. Defaults to 2.
            initializer_range (float): The range for weight initialization. Defaults to 0.02.
            layer_norm_eps (float): The epsilon value for layer normalization. Defaults to 1e-12.
            pad_token_id (int): The token ID for padding. Defaults to 0.
            position_embedding_type (str): The type of position embeddings. Defaults to 'absolute'.
            use_cache (bool): Whether to use cache during inference. Defaults to True.
            classifier_dropout (float): The dropout probability for the classifier layer. Defaults to None.

        Returns:
            None.

        Raises:
            ValueError: If any of the input parameters are invalid or out of range.
        """
        super().__init__(pad_token_id=pad_token_id, **kwargs)
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        self.hidden_act = hidden_act
        self.intermediate_size = intermediate_size
        self.hidden_dropout_prob = hidden_dropout_prob
        self.attention_probs_dropout_prob = attention_probs_dropout_prob
        self.max_position_embeddings = max_position_embeddings
        self.type_vocab_size = type_vocab_size
        self.initializer_range = initializer_range
        self.layer_norm_eps = layer_norm_eps
        self.position_embedding_type = position_embedding_type
        self.use_cache = use_cache
        self.classifier_dropout = classifier_dropout

mindnlp.transformers.models.bert.configuration_bert.BertConfig.__init__(vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs)

Initialize a BertConfig object with the specified parameters.

PARAMETER DESCRIPTION
self

The object instance.

TYPE: object

vocab_size

The size of the vocabulary. Defaults to 30522.

TYPE: int DEFAULT: 30522

hidden_size

The size of the hidden layers. Defaults to 768.

TYPE: int DEFAULT: 768

num_hidden_layers

The number of hidden layers. Defaults to 12.

TYPE: int DEFAULT: 12

num_attention_heads

The number of attention heads. Defaults to 12.

TYPE: int DEFAULT: 12

intermediate_size

The size of the intermediate layer in the transformer encoder. Defaults to 3072.

TYPE: int DEFAULT: 3072

hidden_act

The activation function for the hidden layers. Defaults to 'gelu'.

TYPE: str DEFAULT: 'gelu'

hidden_dropout_prob

The dropout probability for the hidden layers. Defaults to 0.1.

TYPE: float DEFAULT: 0.1

attention_probs_dropout_prob

The dropout probability for the attention probabilities. Defaults to 0.1.

TYPE: float DEFAULT: 0.1

max_position_embeddings

The maximum position index. Defaults to 512.

TYPE: int DEFAULT: 512

type_vocab_size

The size of the type vocabulary. Defaults to 2.

TYPE: int DEFAULT: 2

initializer_range

The range for weight initialization. Defaults to 0.02.

TYPE: float DEFAULT: 0.02

layer_norm_eps

The epsilon value for layer normalization. Defaults to 1e-12.

TYPE: float DEFAULT: 1e-12

pad_token_id

The token ID for padding. Defaults to 0.

TYPE: int DEFAULT: 0

position_embedding_type

The type of position embeddings. Defaults to 'absolute'.

TYPE: str DEFAULT: 'absolute'

use_cache

Whether to use cache during inference. Defaults to True.

TYPE: bool DEFAULT: True

classifier_dropout

The dropout probability for the classifier layer. Defaults to None.

TYPE: float DEFAULT: None

RETURNS DESCRIPTION

None.

RAISES DESCRIPTION
ValueError

If any of the input parameters are invalid or out of range.

Source code in mindnlp\transformers\models\bert\configuration_bert.py
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
def __init__(
    self,
    vocab_size=30522,
    hidden_size=768,
    num_hidden_layers=12,
    num_attention_heads=12,
    intermediate_size=3072,
    hidden_act="gelu",
    hidden_dropout_prob=0.1,
    attention_probs_dropout_prob=0.1,
    max_position_embeddings=512,
    type_vocab_size=2,
    initializer_range=0.02,
    layer_norm_eps=1e-12,
    pad_token_id=0,
    position_embedding_type="absolute",
    use_cache=True,
    classifier_dropout=None,
    **kwargs,
):
    """
    Initialize a BertConfig object with the specified parameters.

    Args:
        self (object): The object instance.
        vocab_size (int): The size of the vocabulary. Defaults to 30522.
        hidden_size (int): The size of the hidden layers. Defaults to 768.
        num_hidden_layers (int): The number of hidden layers. Defaults to 12.
        num_attention_heads (int): The number of attention heads. Defaults to 12.
        intermediate_size (int): The size of the intermediate layer in the transformer encoder. Defaults to 3072.
        hidden_act (str): The activation function for the hidden layers. Defaults to 'gelu'.
        hidden_dropout_prob (float): The dropout probability for the hidden layers. Defaults to 0.1.
        attention_probs_dropout_prob (float): The dropout probability for the attention probabilities. Defaults to 0.1.
        max_position_embeddings (int): The maximum position index. Defaults to 512.
        type_vocab_size (int): The size of the type vocabulary. Defaults to 2.
        initializer_range (float): The range for weight initialization. Defaults to 0.02.
        layer_norm_eps (float): The epsilon value for layer normalization. Defaults to 1e-12.
        pad_token_id (int): The token ID for padding. Defaults to 0.
        position_embedding_type (str): The type of position embeddings. Defaults to 'absolute'.
        use_cache (bool): Whether to use cache during inference. Defaults to True.
        classifier_dropout (float): The dropout probability for the classifier layer. Defaults to None.

    Returns:
        None.

    Raises:
        ValueError: If any of the input parameters are invalid or out of range.
    """
    super().__init__(pad_token_id=pad_token_id, **kwargs)
    self.vocab_size = vocab_size
    self.hidden_size = hidden_size
    self.num_hidden_layers = num_hidden_layers
    self.num_attention_heads = num_attention_heads
    self.hidden_act = hidden_act
    self.intermediate_size = intermediate_size
    self.hidden_dropout_prob = hidden_dropout_prob
    self.attention_probs_dropout_prob = attention_probs_dropout_prob
    self.max_position_embeddings = max_position_embeddings
    self.type_vocab_size = type_vocab_size
    self.initializer_range = initializer_range
    self.layer_norm_eps = layer_norm_eps
    self.position_embedding_type = position_embedding_type
    self.use_cache = use_cache
    self.classifier_dropout = classifier_dropout

mindnlp.transformers.models.bert.modeling_bert

MindSpore BERT model.

mindnlp.transformers.models.bert.modeling_bert.BertEmbeddings

Bases: Module

Construct the embeddings from word, position and token_type embeddings.

Source code in mindnlp\transformers\models\bert\modeling_bert.py
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
class BertEmbeddings(nn.Module):
    """Construct the embeddings from word, position and token_type embeddings."""

    def __init__(self, config):
        super().__init__()
        self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
        self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
        self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)

        # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
        # any TensorFlow checkpoint file
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        # position_ids (1, len position emb) is contiguous in memory and exported when serialized
        self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
        self.register_buffer(
            "position_ids", ops.arange(config.max_position_embeddings).broadcast_to((1, -1)), persistent=False
        )
        self.register_buffer(
            "token_type_ids", ops.zeros(self.position_ids.shape, dtype=mindspore.int64), persistent=False
        )

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        past_key_values_length: int = 0,
    ) -> mindspore.Tensor:
        if input_ids is not None:
            input_shape = input_ids.shape
        else:
            input_shape = inputs_embeds.shape[:-1]

        seq_length = input_shape[1]

        if position_ids is None:
            position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]

        # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
        # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
        # issue #5664
        if token_type_ids is None:
            if hasattr(self, "token_type_ids"):
                buffered_token_type_ids = self.token_type_ids[:, :seq_length]
                buffered_token_type_ids_expanded = buffered_token_type_ids.broadcast_to((input_shape[0], seq_length))
                token_type_ids = buffered_token_type_ids_expanded
            else:
                token_type_ids = ops.zeros(input_shape, dtype=mindspore.int64)

        if inputs_embeds is None:
            inputs_embeds = self.word_embeddings(input_ids)
        token_type_embeddings = self.token_type_embeddings(token_type_ids)

        embeddings = inputs_embeds + token_type_embeddings
        if self.position_embedding_type == "absolute":
            position_embeddings = self.position_embeddings(position_ids)
            embeddings += position_embeddings
        embeddings = self.LayerNorm(embeddings)
        embeddings = self.dropout(embeddings)
        return embeddings

mindnlp.transformers.models.bert.modeling_bert.BertForMaskedLM

Bases: BertPreTrainedModel

Source code in mindnlp\transformers\models\bert\modeling_bert.py
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
class BertForMaskedLM(BertPreTrainedModel):
    _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"]

    def __init__(self, config):
        super().__init__(config)

        if config.is_decoder:
            logger.warning(
                "If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for "
                "bi-directional self-attention."
            )

        self.bert = BertModel(config, add_pooling_layer=False)
        self.cls = BertOnlyMLMHead(config)

        # Initialize weights and apply final processing
        self.post_init()

    def get_output_embeddings(self):
        return self.cls.predictions.decoder

    def set_output_embeddings(self, new_embeddings):
        self.cls.predictions.decoder = new_embeddings
        self.cls.predictions.bias = new_embeddings.bias

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        encoder_hidden_states: Optional[mindspore.Tensor] = None,
        encoder_attention_mask: Optional[mindspore.Tensor] = None,
        labels: Optional[mindspore.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ) -> Union[Tuple[mindspore.Tensor], MaskedLMOutput]:
        r"""
        labels (`mindspore.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
            config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
            loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
        """

        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        outputs = self.bert(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=encoder_attention_mask,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        sequence_output = outputs[0]
        prediction_scores = self.cls(sequence_output)

        masked_lm_loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()  # -100 index = padding token
            masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))

        if not return_dict:
            output = (prediction_scores,) + outputs[2:]
            return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output

        return MaskedLMOutput(
            loss=masked_lm_loss,
            logits=prediction_scores,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )

    def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
        input_shape = input_ids.shape
        effective_batch_size = input_shape[0]

        #  add a dummy token
        if self.config.pad_token_id is None:
            raise ValueError("The PAD token should be defined for generation")

        attention_mask = ops.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
        dummy_token = ops.full(
            (effective_batch_size, 1), self.config.pad_token_id, dtype=mindspore.int64
        )
        input_ids = ops.cat([input_ids, dummy_token], dim=1)

        return {"input_ids": input_ids, "attention_mask": attention_mask}

mindnlp.transformers.models.bert.modeling_bert.BertForMaskedLM.forward(input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None)

labels (mindspore.Tensor of shape (batch_size, sequence_length), optional): Labels for computing the masked language modeling loss. Indices should be in [-100, 0, ..., config.vocab_size] (see input_ids docstring) Tokens with indices set to -100 are ignored (masked), the loss is only computed for the tokens with labels in [0, ..., config.vocab_size]

Source code in mindnlp\transformers\models\bert\modeling_bert.py
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
def forward(
    self,
    input_ids: Optional[mindspore.Tensor] = None,
    attention_mask: Optional[mindspore.Tensor] = None,
    token_type_ids: Optional[mindspore.Tensor] = None,
    position_ids: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    inputs_embeds: Optional[mindspore.Tensor] = None,
    encoder_hidden_states: Optional[mindspore.Tensor] = None,
    encoder_attention_mask: Optional[mindspore.Tensor] = None,
    labels: Optional[mindspore.Tensor] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
) -> Union[Tuple[mindspore.Tensor], MaskedLMOutput]:
    r"""
    labels (`mindspore.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
        Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
        config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
        loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
    """

    return_dict = return_dict if return_dict is not None else self.config.use_return_dict

    outputs = self.bert(
        input_ids,
        attention_mask=attention_mask,
        token_type_ids=token_type_ids,
        position_ids=position_ids,
        head_mask=head_mask,
        inputs_embeds=inputs_embeds,
        encoder_hidden_states=encoder_hidden_states,
        encoder_attention_mask=encoder_attention_mask,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
    )

    sequence_output = outputs[0]
    prediction_scores = self.cls(sequence_output)

    masked_lm_loss = None
    if labels is not None:
        loss_fct = CrossEntropyLoss()  # -100 index = padding token
        masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))

    if not return_dict:
        output = (prediction_scores,) + outputs[2:]
        return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output

    return MaskedLMOutput(
        loss=masked_lm_loss,
        logits=prediction_scores,
        hidden_states=outputs.hidden_states,
        attentions=outputs.attentions,
    )

mindnlp.transformers.models.bert.modeling_bert.BertForMultipleChoice

Bases: BertPreTrainedModel

Source code in mindnlp\transformers\models\bert\modeling_bert.py
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
class BertForMultipleChoice(BertPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)

        self.bert = BertModel(config)
        classifier_dropout = (
            config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
        )
        self.dropout = nn.Dropout(classifier_dropout)
        self.classifier = nn.Linear(config.hidden_size, 1)

        # Initialize weights and apply final processing
        self.post_init()

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        labels: Optional[mindspore.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ) -> Union[Tuple[mindspore.Tensor], MultipleChoiceModelOutput]:
        r"""
        labels (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
            num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
            `input_ids` above)
        """
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]

        input_ids = input_ids.view(-1, input_ids.shape[-1]) if input_ids is not None else None
        attention_mask = attention_mask.view(-1, attention_mask.shape[-1]) if attention_mask is not None else None
        token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None
        position_ids = position_ids.view(-1, position_ids.shape[-1]) if position_ids is not None else None
        inputs_embeds = (
            inputs_embeds.view(-1, inputs_embeds.shape[-2], inputs_embeds.shape[-1])
            if inputs_embeds is not None
            else None
        )

        outputs = self.bert(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        pooled_output = outputs[1]

        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)
        reshaped_logits = logits.view(-1, num_choices)

        loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(reshaped_logits, labels)

        if not return_dict:
            output = (reshaped_logits,) + outputs[2:]
            return ((loss,) + output) if loss is not None else output

        return MultipleChoiceModelOutput(
            loss=loss,
            logits=reshaped_logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )

mindnlp.transformers.models.bert.modeling_bert.BertForMultipleChoice.forward(input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None)

labels (mindspore.Tensor of shape (batch_size,), optional): Labels for computing the multiple choice classification loss. Indices should be in [0, ..., num_choices-1] where num_choices is the size of the second dimension of the input tensors. (See input_ids above)

Source code in mindnlp\transformers\models\bert\modeling_bert.py
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
def forward(
    self,
    input_ids: Optional[mindspore.Tensor] = None,
    attention_mask: Optional[mindspore.Tensor] = None,
    token_type_ids: Optional[mindspore.Tensor] = None,
    position_ids: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    inputs_embeds: Optional[mindspore.Tensor] = None,
    labels: Optional[mindspore.Tensor] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
) -> Union[Tuple[mindspore.Tensor], MultipleChoiceModelOutput]:
    r"""
    labels (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
        Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
        num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
        `input_ids` above)
    """
    return_dict = return_dict if return_dict is not None else self.config.use_return_dict
    num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]

    input_ids = input_ids.view(-1, input_ids.shape[-1]) if input_ids is not None else None
    attention_mask = attention_mask.view(-1, attention_mask.shape[-1]) if attention_mask is not None else None
    token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None
    position_ids = position_ids.view(-1, position_ids.shape[-1]) if position_ids is not None else None
    inputs_embeds = (
        inputs_embeds.view(-1, inputs_embeds.shape[-2], inputs_embeds.shape[-1])
        if inputs_embeds is not None
        else None
    )

    outputs = self.bert(
        input_ids,
        attention_mask=attention_mask,
        token_type_ids=token_type_ids,
        position_ids=position_ids,
        head_mask=head_mask,
        inputs_embeds=inputs_embeds,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
    )

    pooled_output = outputs[1]

    pooled_output = self.dropout(pooled_output)
    logits = self.classifier(pooled_output)
    reshaped_logits = logits.view(-1, num_choices)

    loss = None
    if labels is not None:
        loss_fct = CrossEntropyLoss()
        loss = loss_fct(reshaped_logits, labels)

    if not return_dict:
        output = (reshaped_logits,) + outputs[2:]
        return ((loss,) + output) if loss is not None else output

    return MultipleChoiceModelOutput(
        loss=loss,
        logits=reshaped_logits,
        hidden_states=outputs.hidden_states,
        attentions=outputs.attentions,
    )

mindnlp.transformers.models.bert.modeling_bert.BertForNextSentencePrediction

Bases: BertPreTrainedModel

Source code in mindnlp\transformers\models\bert\modeling_bert.py
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
class BertForNextSentencePrediction(BertPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)

        self.bert = BertModel(config)
        self.cls = BertOnlyNSPHead(config)

        # Initialize weights and apply final processing
        self.post_init()

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        labels: Optional[mindspore.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        **kwargs,
    ) -> Union[Tuple[mindspore.Tensor], NextSentencePredictorOutput]:
        r"""
        labels (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
            (see `input_ids` docstring). Indices should be in `[0, 1]`:

            - 0 indicates sequence B is a continuation of sequence A,
            - 1 indicates sequence B is a random sequence.

        Returns:

        Example:

        ```python
        >>> from transformers import AutoTokenizer, BertForNextSentencePrediction
        >>> import torch

        >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
        >>> model = BertForNextSentencePrediction.from_pretrained("google-bert/bert-base-uncased")

        >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
        >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
        >>> encoding = tokenizer(prompt, next_sentence, return_tensors="ms")

        >>> outputs = model(**encoding, labels=mindspore.Tensor([1]))
        >>> logits = outputs.logits
        >>> assert logits[0, 0] < logits[0, 1]  # next sentence was random
        ```
        """

        if "next_sentence_label" in kwargs:
            warnings.warn(
                "The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
                " `labels` instead.",
                FutureWarning,
            )
            labels = kwargs.pop("next_sentence_label")

        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        outputs = self.bert(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        pooled_output = outputs[1]

        seq_relationship_scores = self.cls(pooled_output)

        next_sentence_loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))

        if not return_dict:
            output = (seq_relationship_scores,) + outputs[2:]
            return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output

        return NextSentencePredictorOutput(
            loss=next_sentence_loss,
            logits=seq_relationship_scores,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )

mindnlp.transformers.models.bert.modeling_bert.BertForNextSentencePrediction.forward(input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs)

labels (mindspore.Tensor of shape (batch_size,), optional): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see input_ids docstring). Indices should be in [0, 1]:

- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.

Returns:

Example:

>>> from transformers import AutoTokenizer, BertForNextSentencePrediction
>>> import torch

>>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
>>> model = BertForNextSentencePrediction.from_pretrained("google-bert/bert-base-uncased")

>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors="ms")

>>> outputs = model(**encoding, labels=mindspore.Tensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1]  # next sentence was random
Source code in mindnlp\transformers\models\bert\modeling_bert.py
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
def forward(
    self,
    input_ids: Optional[mindspore.Tensor] = None,
    attention_mask: Optional[mindspore.Tensor] = None,
    token_type_ids: Optional[mindspore.Tensor] = None,
    position_ids: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    inputs_embeds: Optional[mindspore.Tensor] = None,
    labels: Optional[mindspore.Tensor] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
    **kwargs,
) -> Union[Tuple[mindspore.Tensor], NextSentencePredictorOutput]:
    r"""
    labels (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
        Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
        (see `input_ids` docstring). Indices should be in `[0, 1]`:

        - 0 indicates sequence B is a continuation of sequence A,
        - 1 indicates sequence B is a random sequence.

    Returns:

    Example:

    ```python
    >>> from transformers import AutoTokenizer, BertForNextSentencePrediction
    >>> import torch

    >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
    >>> model = BertForNextSentencePrediction.from_pretrained("google-bert/bert-base-uncased")

    >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
    >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
    >>> encoding = tokenizer(prompt, next_sentence, return_tensors="ms")

    >>> outputs = model(**encoding, labels=mindspore.Tensor([1]))
    >>> logits = outputs.logits
    >>> assert logits[0, 0] < logits[0, 1]  # next sentence was random
    ```
    """

    if "next_sentence_label" in kwargs:
        warnings.warn(
            "The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
            " `labels` instead.",
            FutureWarning,
        )
        labels = kwargs.pop("next_sentence_label")

    return_dict = return_dict if return_dict is not None else self.config.use_return_dict

    outputs = self.bert(
        input_ids,
        attention_mask=attention_mask,
        token_type_ids=token_type_ids,
        position_ids=position_ids,
        head_mask=head_mask,
        inputs_embeds=inputs_embeds,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
    )

    pooled_output = outputs[1]

    seq_relationship_scores = self.cls(pooled_output)

    next_sentence_loss = None
    if labels is not None:
        loss_fct = CrossEntropyLoss()
        next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))

    if not return_dict:
        output = (seq_relationship_scores,) + outputs[2:]
        return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output

    return NextSentencePredictorOutput(
        loss=next_sentence_loss,
        logits=seq_relationship_scores,
        hidden_states=outputs.hidden_states,
        attentions=outputs.attentions,
    )

mindnlp.transformers.models.bert.modeling_bert.BertForPreTraining

Bases: BertPreTrainedModel

Source code in mindnlp\transformers\models\bert\modeling_bert.py
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
class BertForPreTraining(BertPreTrainedModel):
    _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"]

    def __init__(self, config):
        super().__init__(config)

        self.bert = BertModel(config)
        self.cls = BertPreTrainingHeads(config)

        # Initialize weights and apply final processing
        self.post_init()

    def get_output_embeddings(self):
        return self.cls.predictions.decoder

    def set_output_embeddings(self, new_embeddings):
        self.cls.predictions.decoder = new_embeddings
        self.cls.predictions.bias = new_embeddings.bias

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        labels: Optional[mindspore.Tensor] = None,
        next_sentence_label: Optional[mindspore.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ) -> Union[Tuple[mindspore.Tensor], BertForPreTrainingOutput]:
        r"""
            labels (`mindspore.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
                config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
                the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
            next_sentence_label (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
                Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
                pair (see `input_ids` docstring) Indices should be in `[0, 1]`:

                - 0 indicates sequence B is a continuation of sequence A,
                - 1 indicates sequence B is a random sequence.
            kwargs (`Dict[str, any]`, *optional*, defaults to `{}`):
                Used to hide legacy arguments that have been deprecated.

        Returns:

        Example:

        ```python
        >>> from transformers import AutoTokenizer, BertForPreTraining
        >>> import torch

        >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
        >>> model = BertForPreTraining.from_pretrained("google-bert/bert-base-uncased")

        >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="ms")
        >>> outputs = model(**inputs)

        >>> prediction_logits = outputs.prediction_logits
        >>> seq_relationship_logits = outputs.seq_relationship_logits
        ```
        """
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        outputs = self.bert(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        sequence_output, pooled_output = outputs[:2]
        prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)

        total_loss = None
        if labels is not None and next_sentence_label is not None:
            loss_fct = CrossEntropyLoss()
            masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
            next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
            total_loss = masked_lm_loss + next_sentence_loss

        if not return_dict:
            output = (prediction_scores, seq_relationship_score) + outputs[2:]
            return ((total_loss,) + output) if total_loss is not None else output

        return BertForPreTrainingOutput(
            loss=total_loss,
            prediction_logits=prediction_scores,
            seq_relationship_logits=seq_relationship_score,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )

mindnlp.transformers.models.bert.modeling_bert.BertForPreTraining.forward(input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, next_sentence_label=None, output_attentions=None, output_hidden_states=None, return_dict=None)

labels (`mindspore.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
    Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
    config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
    the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
next_sentence_label (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
    Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
    pair (see `input_ids` docstring) Indices should be in `[0, 1]`:

    - 0 indicates sequence B is a continuation of sequence A,
    - 1 indicates sequence B is a random sequence.
kwargs (`Dict[str, any]`, *optional*, defaults to `{}`):
    Used to hide legacy arguments that have been deprecated.

Returns:

Example:

>>> from transformers import AutoTokenizer, BertForPreTraining
>>> import torch

>>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
>>> model = BertForPreTraining.from_pretrained("google-bert/bert-base-uncased")

>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="ms")
>>> outputs = model(**inputs)

>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
Source code in mindnlp\transformers\models\bert\modeling_bert.py
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
def forward(
    self,
    input_ids: Optional[mindspore.Tensor] = None,
    attention_mask: Optional[mindspore.Tensor] = None,
    token_type_ids: Optional[mindspore.Tensor] = None,
    position_ids: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    inputs_embeds: Optional[mindspore.Tensor] = None,
    labels: Optional[mindspore.Tensor] = None,
    next_sentence_label: Optional[mindspore.Tensor] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
) -> Union[Tuple[mindspore.Tensor], BertForPreTrainingOutput]:
    r"""
        labels (`mindspore.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
            config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
            the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
        next_sentence_label (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
            pair (see `input_ids` docstring) Indices should be in `[0, 1]`:

            - 0 indicates sequence B is a continuation of sequence A,
            - 1 indicates sequence B is a random sequence.
        kwargs (`Dict[str, any]`, *optional*, defaults to `{}`):
            Used to hide legacy arguments that have been deprecated.

    Returns:

    Example:

    ```python
    >>> from transformers import AutoTokenizer, BertForPreTraining
    >>> import torch

    >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
    >>> model = BertForPreTraining.from_pretrained("google-bert/bert-base-uncased")

    >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="ms")
    >>> outputs = model(**inputs)

    >>> prediction_logits = outputs.prediction_logits
    >>> seq_relationship_logits = outputs.seq_relationship_logits
    ```
    """
    return_dict = return_dict if return_dict is not None else self.config.use_return_dict

    outputs = self.bert(
        input_ids,
        attention_mask=attention_mask,
        token_type_ids=token_type_ids,
        position_ids=position_ids,
        head_mask=head_mask,
        inputs_embeds=inputs_embeds,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
    )

    sequence_output, pooled_output = outputs[:2]
    prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)

    total_loss = None
    if labels is not None and next_sentence_label is not None:
        loss_fct = CrossEntropyLoss()
        masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
        next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
        total_loss = masked_lm_loss + next_sentence_loss

    if not return_dict:
        output = (prediction_scores, seq_relationship_score) + outputs[2:]
        return ((total_loss,) + output) if total_loss is not None else output

    return BertForPreTrainingOutput(
        loss=total_loss,
        prediction_logits=prediction_scores,
        seq_relationship_logits=seq_relationship_score,
        hidden_states=outputs.hidden_states,
        attentions=outputs.attentions,
    )

mindnlp.transformers.models.bert.modeling_bert.BertForPreTrainingOutput dataclass

Bases: ModelOutput

Output type of [BertForPreTraining].

PARAMETER DESCRIPTION
loss

Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.

TYPE: *optional*, returned when `labels` is provided, `mindspore.Tensor` of shape `(1,)` DEFAULT: None

prediction_logits

Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).

TYPE: `mindspore.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)` DEFAULT: None

seq_relationship_logits

Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).

TYPE: `mindspore.Tensor` of shape `(batch_size, 2)` DEFAULT: None

hidden_states

Tuple of mindspore.Tensor (one for the output of the embeddings + one for the output of each layer) of shape (batch_size, sequence_length, hidden_size).

Hidden-states of the model at the output of each layer plus the initial embedding outputs.

TYPE: `tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True` DEFAULT: None

attentions

Tuple of mindspore.Tensor (one for each layer) of shape (batch_size, num_heads, sequence_length, sequence_length).

Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.

TYPE: `tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True` DEFAULT: None

Source code in mindnlp\transformers\models\bert\modeling_bert.py
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
@dataclass
class BertForPreTrainingOutput(ModelOutput):
    """
    Output type of [`BertForPreTraining`].

    Args:
        loss (*optional*, returned when `labels` is provided, `mindspore.Tensor` of shape `(1,)`):
            Total loss as the sum of the masked language modeling loss and the next sequence prediction
            (classification) loss.
        prediction_logits (`mindspore.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
        seq_relationship_logits (`mindspore.Tensor` of shape `(batch_size, 2)`):
            Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
            before SoftMax).
        hidden_states (`tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `mindspore.Tensor` (one for the output of the embeddings + one for the output of each layer) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
    """

    loss: Optional[mindspore.Tensor] = None
    prediction_logits: mindspore.Tensor = None
    seq_relationship_logits: mindspore.Tensor = None
    hidden_states: Optional[Tuple[mindspore.Tensor]] = None
    attentions: Optional[Tuple[mindspore.Tensor]] = None

mindnlp.transformers.models.bert.modeling_bert.BertForQuestionAnswering

Bases: BertPreTrainedModel

Source code in mindnlp\transformers\models\bert\modeling_bert.py
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
class BertForQuestionAnswering(BertPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels

        self.bert = BertModel(config, add_pooling_layer=False)
        self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)

        # Initialize weights and apply final processing
        self.post_init()

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        start_positions: Optional[mindspore.Tensor] = None,
        end_positions: Optional[mindspore.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ) -> Union[Tuple[mindspore.Tensor], QuestionAnsweringModelOutput]:
        r"""
        start_positions (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the start of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
            are not taken into account for computing the loss.
        end_positions (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the end of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
            are not taken into account for computing the loss.
        """
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        outputs = self.bert(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        sequence_output = outputs[0]

        logits = self.qa_outputs(sequence_output)
        start_logits, end_logits = ops.split(logits, 1, dim=-1)
        start_logits = start_logits.squeeze(-1)
        end_logits = end_logits.squeeze(-1)

        total_loss = None
        if start_positions is not None and end_positions is not None:
            # If we are on multi-GPU, split add a dimension
            if len(start_positions.shape) > 1:
                start_positions = start_positions.squeeze(-1)
            if len(end_positions.shape) > 1:
                end_positions = end_positions.squeeze(-1)
            # sometimes the start/end positions are outside our model inputs, we ignore these terms
            ignored_index = start_logits.shape[1]
            start_positions = start_positions.clamp(0, ignored_index)
            end_positions = end_positions.clamp(0, ignored_index)

            loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2

        if not return_dict:
            output = (start_logits, end_logits) + outputs[2:]
            return ((total_loss,) + output) if total_loss is not None else output

        return QuestionAnsweringModelOutput(
            loss=total_loss,
            start_logits=start_logits,
            end_logits=end_logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )

mindnlp.transformers.models.bert.modeling_bert.BertForQuestionAnswering.forward(input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None)

start_positions (mindspore.Tensor of shape (batch_size,), optional): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (sequence_length). Position outside of the sequence are not taken into account for computing the loss. end_positions (mindspore.Tensor of shape (batch_size,), optional): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (sequence_length). Position outside of the sequence are not taken into account for computing the loss.

Source code in mindnlp\transformers\models\bert\modeling_bert.py
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
def forward(
    self,
    input_ids: Optional[mindspore.Tensor] = None,
    attention_mask: Optional[mindspore.Tensor] = None,
    token_type_ids: Optional[mindspore.Tensor] = None,
    position_ids: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    inputs_embeds: Optional[mindspore.Tensor] = None,
    start_positions: Optional[mindspore.Tensor] = None,
    end_positions: Optional[mindspore.Tensor] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
) -> Union[Tuple[mindspore.Tensor], QuestionAnsweringModelOutput]:
    r"""
    start_positions (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
        Labels for position (index) of the start of the labelled span for computing the token classification loss.
        Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
        are not taken into account for computing the loss.
    end_positions (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
        Labels for position (index) of the end of the labelled span for computing the token classification loss.
        Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
        are not taken into account for computing the loss.
    """
    return_dict = return_dict if return_dict is not None else self.config.use_return_dict

    outputs = self.bert(
        input_ids,
        attention_mask=attention_mask,
        token_type_ids=token_type_ids,
        position_ids=position_ids,
        head_mask=head_mask,
        inputs_embeds=inputs_embeds,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
    )

    sequence_output = outputs[0]

    logits = self.qa_outputs(sequence_output)
    start_logits, end_logits = ops.split(logits, 1, dim=-1)
    start_logits = start_logits.squeeze(-1)
    end_logits = end_logits.squeeze(-1)

    total_loss = None
    if start_positions is not None and end_positions is not None:
        # If we are on multi-GPU, split add a dimension
        if len(start_positions.shape) > 1:
            start_positions = start_positions.squeeze(-1)
        if len(end_positions.shape) > 1:
            end_positions = end_positions.squeeze(-1)
        # sometimes the start/end positions are outside our model inputs, we ignore these terms
        ignored_index = start_logits.shape[1]
        start_positions = start_positions.clamp(0, ignored_index)
        end_positions = end_positions.clamp(0, ignored_index)

        loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
        start_loss = loss_fct(start_logits, start_positions)
        end_loss = loss_fct(end_logits, end_positions)
        total_loss = (start_loss + end_loss) / 2

    if not return_dict:
        output = (start_logits, end_logits) + outputs[2:]
        return ((total_loss,) + output) if total_loss is not None else output

    return QuestionAnsweringModelOutput(
        loss=total_loss,
        start_logits=start_logits,
        end_logits=end_logits,
        hidden_states=outputs.hidden_states,
        attentions=outputs.attentions,
    )

mindnlp.transformers.models.bert.modeling_bert.BertForSequenceClassification

Bases: BertPreTrainedModel

Source code in mindnlp\transformers\models\bert\modeling_bert.py
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
class BertForSequenceClassification(BertPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.config = config

        self.bert = BertModel(config)
        classifier_dropout = (
            config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
        )
        self.dropout = nn.Dropout(classifier_dropout)
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)

        # Initialize weights and apply final processing
        self.post_init()

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        labels: Optional[mindspore.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ) -> Union[Tuple[mindspore.Tensor], SequenceClassifierOutput]:
        r"""
        labels (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        """
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        outputs = self.bert(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        pooled_output = outputs[1]

        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)

        loss = None
        if labels is not None:
            if self.config.problem_type is None:
                if self.num_labels == 1:
                    self.config.problem_type = "regression"
                elif self.num_labels > 1 and labels.dtype in (mindspore.int64, mindspore.int32):
                    self.config.problem_type = "single_label_classification"
                else:
                    self.config.problem_type = "multi_label_classification"

            if self.config.problem_type == "regression":
                loss_fct = MSELoss()
                if self.num_labels == 1:
                    loss = loss_fct(logits.squeeze(), labels.squeeze())
                else:
                    loss = loss_fct(logits, labels)
            elif self.config.problem_type == "single_label_classification":
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
            elif self.config.problem_type == "multi_label_classification":
                loss_fct = BCEWithLogitsLoss()
                loss = loss_fct(logits, labels)
        if not return_dict:
            output = (logits,) + outputs[2:]
            return ((loss,) + output) if loss is not None else output

        return SequenceClassifierOutput(
            loss=loss,
            logits=logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )

mindnlp.transformers.models.bert.modeling_bert.BertForSequenceClassification.forward(input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None)

labels (mindspore.Tensor of shape (batch_size,), optional): Labels for computing the sequence classification/regression loss. Indices should be in [0, ..., config.num_labels - 1]. If config.num_labels == 1 a regression loss is computed (Mean-Square loss), If config.num_labels > 1 a classification loss is computed (Cross-Entropy).

Source code in mindnlp\transformers\models\bert\modeling_bert.py
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
def forward(
    self,
    input_ids: Optional[mindspore.Tensor] = None,
    attention_mask: Optional[mindspore.Tensor] = None,
    token_type_ids: Optional[mindspore.Tensor] = None,
    position_ids: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    inputs_embeds: Optional[mindspore.Tensor] = None,
    labels: Optional[mindspore.Tensor] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
) -> Union[Tuple[mindspore.Tensor], SequenceClassifierOutput]:
    r"""
    labels (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
        Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
        config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
        `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
    """
    return_dict = return_dict if return_dict is not None else self.config.use_return_dict

    outputs = self.bert(
        input_ids,
        attention_mask=attention_mask,
        token_type_ids=token_type_ids,
        position_ids=position_ids,
        head_mask=head_mask,
        inputs_embeds=inputs_embeds,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
    )

    pooled_output = outputs[1]

    pooled_output = self.dropout(pooled_output)
    logits = self.classifier(pooled_output)

    loss = None
    if labels is not None:
        if self.config.problem_type is None:
            if self.num_labels == 1:
                self.config.problem_type = "regression"
            elif self.num_labels > 1 and labels.dtype in (mindspore.int64, mindspore.int32):
                self.config.problem_type = "single_label_classification"
            else:
                self.config.problem_type = "multi_label_classification"

        if self.config.problem_type == "regression":
            loss_fct = MSELoss()
            if self.num_labels == 1:
                loss = loss_fct(logits.squeeze(), labels.squeeze())
            else:
                loss = loss_fct(logits, labels)
        elif self.config.problem_type == "single_label_classification":
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
        elif self.config.problem_type == "multi_label_classification":
            loss_fct = BCEWithLogitsLoss()
            loss = loss_fct(logits, labels)
    if not return_dict:
        output = (logits,) + outputs[2:]
        return ((loss,) + output) if loss is not None else output

    return SequenceClassifierOutput(
        loss=loss,
        logits=logits,
        hidden_states=outputs.hidden_states,
        attentions=outputs.attentions,
    )

mindnlp.transformers.models.bert.modeling_bert.BertForTokenClassification

Bases: BertPreTrainedModel

Source code in mindnlp\transformers\models\bert\modeling_bert.py
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
class BertForTokenClassification(BertPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels

        self.bert = BertModel(config, add_pooling_layer=False)
        classifier_dropout = (
            config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
        )
        self.dropout = nn.Dropout(classifier_dropout)
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)

        # Initialize weights and apply final processing
        self.post_init()

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        labels: Optional[mindspore.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ) -> Union[Tuple[mindspore.Tensor], TokenClassifierOutput]:
        r"""
        labels (`mindspore.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
        """
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        outputs = self.bert(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        sequence_output = outputs[0]

        sequence_output = self.dropout(sequence_output)
        logits = self.classifier(sequence_output)

        loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))

        if not return_dict:
            output = (logits,) + outputs[2:]
            return ((loss,) + output) if loss is not None else output

        return TokenClassifierOutput(
            loss=loss,
            logits=logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )

mindnlp.transformers.models.bert.modeling_bert.BertForTokenClassification.forward(input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None)

labels (mindspore.Tensor of shape (batch_size, sequence_length), optional): Labels for computing the token classification loss. Indices should be in [0, ..., config.num_labels - 1].

Source code in mindnlp\transformers\models\bert\modeling_bert.py
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
def forward(
    self,
    input_ids: Optional[mindspore.Tensor] = None,
    attention_mask: Optional[mindspore.Tensor] = None,
    token_type_ids: Optional[mindspore.Tensor] = None,
    position_ids: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    inputs_embeds: Optional[mindspore.Tensor] = None,
    labels: Optional[mindspore.Tensor] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
) -> Union[Tuple[mindspore.Tensor], TokenClassifierOutput]:
    r"""
    labels (`mindspore.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
        Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
    """
    return_dict = return_dict if return_dict is not None else self.config.use_return_dict

    outputs = self.bert(
        input_ids,
        attention_mask=attention_mask,
        token_type_ids=token_type_ids,
        position_ids=position_ids,
        head_mask=head_mask,
        inputs_embeds=inputs_embeds,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
    )

    sequence_output = outputs[0]

    sequence_output = self.dropout(sequence_output)
    logits = self.classifier(sequence_output)

    loss = None
    if labels is not None:
        loss_fct = CrossEntropyLoss()
        loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))

    if not return_dict:
        output = (logits,) + outputs[2:]
        return ((loss,) + output) if loss is not None else output

    return TokenClassifierOutput(
        loss=loss,
        logits=logits,
        hidden_states=outputs.hidden_states,
        attentions=outputs.attentions,
    )

mindnlp.transformers.models.bert.modeling_bert.BertLMHeadModel

Bases: BertPreTrainedModel

Source code in mindnlp\transformers\models\bert\modeling_bert.py
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
class BertLMHeadModel(BertPreTrainedModel):
    _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]

    def __init__(self, config):
        super().__init__(config)

        if not config.is_decoder:
            logger.warning("If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`")

        self.bert = BertModel(config, add_pooling_layer=False)
        self.cls = BertOnlyMLMHead(config)

        # Initialize weights and apply final processing
        self.post_init()

    def get_output_embeddings(self):
        return self.cls.predictions.decoder

    def set_output_embeddings(self, new_embeddings):
        self.cls.predictions.decoder = new_embeddings
        self.cls.predictions.bias = new_embeddings.bias

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        encoder_hidden_states: Optional[mindspore.Tensor] = None,
        encoder_attention_mask: Optional[mindspore.Tensor] = None,
        labels: Optional[mindspore.Tensor] = None,
        past_key_values: Optional[List[mindspore.Tensor]] = None,
        use_cache: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ) -> Union[Tuple[mindspore.Tensor], CausalLMOutputWithCrossAttentions]:
        r"""
        encoder_hidden_states  (`mindspore.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
            Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
            the model is configured as a decoder.
        encoder_attention_mask (`mindspore.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
            the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.
        labels (`mindspore.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
            `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
            ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
        past_key_values (`tuple(tuple(mindspore.Tensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
            Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.

            If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
            don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
            `decoder_input_ids` of shape `(batch_size, sequence_length)`.
        use_cache (`bool`, *optional*):
            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
            `past_key_values`).
        """
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if labels is not None:
            use_cache = False

        outputs = self.bert(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=encoder_attention_mask,
            past_key_values=past_key_values,
            use_cache=use_cache,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        sequence_output = outputs[0]
        prediction_scores = self.cls(sequence_output)

        lm_loss = None
        if labels is not None:
            # we are doing next-token prediction; shift prediction scores and input ids by one
            shifted_prediction_scores = prediction_scores[:, :-1, :]
            labels = labels[:, 1:]
            loss_fct = CrossEntropyLoss()
            lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))

        if not return_dict:
            output = (prediction_scores,) + outputs[2:]
            return ((lm_loss,) + output) if lm_loss is not None else output

        return CausalLMOutputWithCrossAttentions(
            loss=lm_loss,
            logits=prediction_scores,
            past_key_values=outputs.past_key_values,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
            cross_attentions=outputs.cross_attentions,
        )

    def prepare_inputs_for_generation(
        self, input_ids, past_key_values=None, attention_mask=None, use_cache=True, **model_kwargs
    ):
        input_shape = input_ids.shape
        # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
        if attention_mask is None:
            attention_mask = input_ids.new_ones(input_shape)

        # cut decoder_input_ids if past_key_values is used
        if past_key_values is not None:
            past_length = past_key_values[0][0].shape[2]

            # Some generation methods already pass only the last input ID
            if input_ids.shape[1] > past_length:
                remove_prefix_length = past_length
            else:
                # Default to old behavior: keep only final ID
                remove_prefix_length = input_ids.shape[1] - 1

            input_ids = input_ids[:, remove_prefix_length:]

        return {
            "input_ids": input_ids,
            "attention_mask": attention_mask,
            "past_key_values": past_key_values,
            "use_cache": use_cache,
        }

    def _reorder_cache(self, past_key_values, beam_idx):
        reordered_past = ()
        for layer_past in past_key_values:
            reordered_past += (
                tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),
            )
        return reordered_past

mindnlp.transformers.models.bert.modeling_bert.BertLMHeadModel.forward(input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None)

encoder_hidden_states (mindspore.Tensor of shape (batch_size, sequence_length, hidden_size), optional): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (mindspore.Tensor of shape (batch_size, sequence_length), optional): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in [0, 1]:

- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.

labels (mindspore.Tensor of shape (batch_size, sequence_length), optional): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in [-100, 0, ..., config.vocab_size] (see input_ids docstring) Tokens with indices set to -100 are ignored (masked), the loss is only computed for the tokens with labels n [0, ..., config.vocab_size] past_key_values (tuple(tuple(mindspore.Tensor)) of length config.n_layers with each tuple having 4 tensors of shape (batch_size, num_heads, sequence_length - 1, embed_size_per_head)): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.

If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.

use_cache (bool, optional): If set to True, past_key_values key value states are returned and can be used to speed up decoding (see past_key_values).

Source code in mindnlp\transformers\models\bert\modeling_bert.py
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
def forward(
    self,
    input_ids: Optional[mindspore.Tensor] = None,
    attention_mask: Optional[mindspore.Tensor] = None,
    token_type_ids: Optional[mindspore.Tensor] = None,
    position_ids: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    inputs_embeds: Optional[mindspore.Tensor] = None,
    encoder_hidden_states: Optional[mindspore.Tensor] = None,
    encoder_attention_mask: Optional[mindspore.Tensor] = None,
    labels: Optional[mindspore.Tensor] = None,
    past_key_values: Optional[List[mindspore.Tensor]] = None,
    use_cache: Optional[bool] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
) -> Union[Tuple[mindspore.Tensor], CausalLMOutputWithCrossAttentions]:
    r"""
    encoder_hidden_states  (`mindspore.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
        Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
        the model is configured as a decoder.
    encoder_attention_mask (`mindspore.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
        Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
        the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:

        - 1 for tokens that are **not masked**,
        - 0 for tokens that are **masked**.
    labels (`mindspore.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
        Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
        `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
        ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
    past_key_values (`tuple(tuple(mindspore.Tensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
        Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.

        If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
        don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
        `decoder_input_ids` of shape `(batch_size, sequence_length)`.
    use_cache (`bool`, *optional*):
        If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
        `past_key_values`).
    """
    return_dict = return_dict if return_dict is not None else self.config.use_return_dict
    if labels is not None:
        use_cache = False

    outputs = self.bert(
        input_ids,
        attention_mask=attention_mask,
        token_type_ids=token_type_ids,
        position_ids=position_ids,
        head_mask=head_mask,
        inputs_embeds=inputs_embeds,
        encoder_hidden_states=encoder_hidden_states,
        encoder_attention_mask=encoder_attention_mask,
        past_key_values=past_key_values,
        use_cache=use_cache,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
    )

    sequence_output = outputs[0]
    prediction_scores = self.cls(sequence_output)

    lm_loss = None
    if labels is not None:
        # we are doing next-token prediction; shift prediction scores and input ids by one
        shifted_prediction_scores = prediction_scores[:, :-1, :]
        labels = labels[:, 1:]
        loss_fct = CrossEntropyLoss()
        lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))

    if not return_dict:
        output = (prediction_scores,) + outputs[2:]
        return ((lm_loss,) + output) if lm_loss is not None else output

    return CausalLMOutputWithCrossAttentions(
        loss=lm_loss,
        logits=prediction_scores,
        past_key_values=outputs.past_key_values,
        hidden_states=outputs.hidden_states,
        attentions=outputs.attentions,
        cross_attentions=outputs.cross_attentions,
    )

mindnlp.transformers.models.bert.modeling_bert.BertModel

Bases: BertPreTrainedModel

The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in Attention is all you need by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.

To behave as an decoder the model needs to be initialized with the is_decoder argument of the configuration set to True. To be used in a Seq2Seq model, the model needs to initialized with both is_decoder argument and add_cross_attention set to True; an encoder_hidden_states is then expected as an input to the forward pass.

Source code in mindnlp\transformers\models\bert\modeling_bert.py
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
class BertModel(BertPreTrainedModel):
    """

    The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
    cross-attention is added between the self-attention layers, following the architecture described in [Attention is
    all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
    Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.

    To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
    to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
    `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
    """

    _no_split_modules = ["BertEmbeddings", "BertLayer"]

    def __init__(self, config, add_pooling_layer=True):
        super().__init__(config)
        self.config = config

        self.embeddings = BertEmbeddings(config)
        self.encoder = BertEncoder(config)

        self.pooler = BertPooler(config) if add_pooling_layer else None

        self.attn_implementation = config._attn_implementation
        self.position_embedding_type = config.position_embedding_type

        # Initialize weights and apply final processing
        self.post_init()

    def get_input_embeddings(self):
        return self.embeddings.word_embeddings

    def set_input_embeddings(self, value):
        self.embeddings.word_embeddings = value

    def _prune_heads(self, heads_to_prune):
        """
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        """
        for layer, heads in heads_to_prune.items():
            self.encoder.layer[layer].attention.prune_heads(heads)

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        position_ids: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        encoder_hidden_states: Optional[mindspore.Tensor] = None,
        encoder_attention_mask: Optional[mindspore.Tensor] = None,
        past_key_values: Optional[List[mindspore.Tensor]] = None,
        use_cache: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ) -> Union[Tuple[mindspore.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
        r"""
        encoder_hidden_states  (`mindspore.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
            Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
            the model is configured as a decoder.
        encoder_attention_mask (`mindspore.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
            the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.
        past_key_values (`tuple(tuple(mindspore.Tensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
            Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.

            If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
            don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
            `decoder_input_ids` of shape `(batch_size, sequence_length)`.
        use_cache (`bool`, *optional*):
            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
            `past_key_values`).
        """
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = (
            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        )
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        if self.config.is_decoder:
            use_cache = use_cache if use_cache is not None else self.config.use_cache
        else:
            use_cache = False

        if input_ids is not None and inputs_embeds is not None:
            raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
        elif input_ids is not None:
            self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
            input_shape = input_ids.shape
        elif inputs_embeds is not None:
            input_shape = inputs_embeds.shape[:-1]
        else:
            raise ValueError("You have to specify either input_ids or inputs_embeds")

        batch_size, seq_length = input_shape

        # past_key_values_length
        past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0

        if token_type_ids is None:
            if hasattr(self.embeddings, "token_type_ids"):
                buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
                buffered_token_type_ids_expanded = buffered_token_type_ids.broadcast_to((batch_size, seq_length))
                token_type_ids = buffered_token_type_ids_expanded
            else:
                token_type_ids = ops.zeros(input_shape, dtype=mindspore.int64)

        embedding_output = self.embeddings(
            input_ids=input_ids,
            position_ids=position_ids,
            token_type_ids=token_type_ids,
            inputs_embeds=inputs_embeds,
            past_key_values_length=past_key_values_length,
        )

        if attention_mask is None:
            attention_mask = ops.ones((batch_size, seq_length + past_key_values_length))

        # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
        # ourselves in which case we just need to make it broadcastable to all heads.
        extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)

        # If a 2D or 3D attention mask is provided for the cross-attention
        # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
        if self.config.is_decoder and encoder_hidden_states is not None:
            encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.shape
            encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
            if encoder_attention_mask is None:
                encoder_attention_mask = ops.ones(encoder_hidden_shape)

            encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
        else:
            encoder_extended_attention_mask = None

        # Prepare head mask if needed
        # 1.0 in head_mask indicate we keep the head
        # attention_probs has shape bsz x n_heads x N x N
        # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
        # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
        head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)

        encoder_outputs = self.encoder(
            embedding_output,
            attention_mask=extended_attention_mask,
            head_mask=head_mask,
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=encoder_extended_attention_mask,
            past_key_values=past_key_values,
            use_cache=use_cache,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )
        sequence_output = encoder_outputs[0]
        pooled_output = self.pooler(sequence_output) if self.pooler is not None else None

        if not return_dict:
            return (sequence_output, pooled_output) + encoder_outputs[1:]

        return BaseModelOutputWithPoolingAndCrossAttentions(
            last_hidden_state=sequence_output,
            pooler_output=pooled_output,
            past_key_values=encoder_outputs.past_key_values,
            hidden_states=encoder_outputs.hidden_states,
            attentions=encoder_outputs.attentions,
            cross_attentions=encoder_outputs.cross_attentions,
        )

mindnlp.transformers.models.bert.modeling_bert.BertModel.forward(input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None)

encoder_hidden_states (mindspore.Tensor of shape (batch_size, sequence_length, hidden_size), optional): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (mindspore.Tensor of shape (batch_size, sequence_length), optional): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in [0, 1]:

- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.

past_key_values (tuple(tuple(mindspore.Tensor)) of length config.n_layers with each tuple having 4 tensors of shape (batch_size, num_heads, sequence_length - 1, embed_size_per_head)): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.

If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.

use_cache (bool, optional): If set to True, past_key_values key value states are returned and can be used to speed up decoding (see past_key_values).

Source code in mindnlp\transformers\models\bert\modeling_bert.py
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
def forward(
    self,
    input_ids: Optional[mindspore.Tensor] = None,
    attention_mask: Optional[mindspore.Tensor] = None,
    token_type_ids: Optional[mindspore.Tensor] = None,
    position_ids: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    inputs_embeds: Optional[mindspore.Tensor] = None,
    encoder_hidden_states: Optional[mindspore.Tensor] = None,
    encoder_attention_mask: Optional[mindspore.Tensor] = None,
    past_key_values: Optional[List[mindspore.Tensor]] = None,
    use_cache: Optional[bool] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
) -> Union[Tuple[mindspore.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
    r"""
    encoder_hidden_states  (`mindspore.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
        Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
        the model is configured as a decoder.
    encoder_attention_mask (`mindspore.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
        Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
        the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:

        - 1 for tokens that are **not masked**,
        - 0 for tokens that are **masked**.
    past_key_values (`tuple(tuple(mindspore.Tensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
        Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.

        If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
        don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
        `decoder_input_ids` of shape `(batch_size, sequence_length)`.
    use_cache (`bool`, *optional*):
        If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
        `past_key_values`).
    """
    output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
    output_hidden_states = (
        output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
    )
    return_dict = return_dict if return_dict is not None else self.config.use_return_dict

    if self.config.is_decoder:
        use_cache = use_cache if use_cache is not None else self.config.use_cache
    else:
        use_cache = False

    if input_ids is not None and inputs_embeds is not None:
        raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
    elif input_ids is not None:
        self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
        input_shape = input_ids.shape
    elif inputs_embeds is not None:
        input_shape = inputs_embeds.shape[:-1]
    else:
        raise ValueError("You have to specify either input_ids or inputs_embeds")

    batch_size, seq_length = input_shape

    # past_key_values_length
    past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0

    if token_type_ids is None:
        if hasattr(self.embeddings, "token_type_ids"):
            buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
            buffered_token_type_ids_expanded = buffered_token_type_ids.broadcast_to((batch_size, seq_length))
            token_type_ids = buffered_token_type_ids_expanded
        else:
            token_type_ids = ops.zeros(input_shape, dtype=mindspore.int64)

    embedding_output = self.embeddings(
        input_ids=input_ids,
        position_ids=position_ids,
        token_type_ids=token_type_ids,
        inputs_embeds=inputs_embeds,
        past_key_values_length=past_key_values_length,
    )

    if attention_mask is None:
        attention_mask = ops.ones((batch_size, seq_length + past_key_values_length))

    # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
    # ourselves in which case we just need to make it broadcastable to all heads.
    extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)

    # If a 2D or 3D attention mask is provided for the cross-attention
    # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
    if self.config.is_decoder and encoder_hidden_states is not None:
        encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.shape
        encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
        if encoder_attention_mask is None:
            encoder_attention_mask = ops.ones(encoder_hidden_shape)

        encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
    else:
        encoder_extended_attention_mask = None

    # Prepare head mask if needed
    # 1.0 in head_mask indicate we keep the head
    # attention_probs has shape bsz x n_heads x N x N
    # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
    # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
    head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)

    encoder_outputs = self.encoder(
        embedding_output,
        attention_mask=extended_attention_mask,
        head_mask=head_mask,
        encoder_hidden_states=encoder_hidden_states,
        encoder_attention_mask=encoder_extended_attention_mask,
        past_key_values=past_key_values,
        use_cache=use_cache,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
    )
    sequence_output = encoder_outputs[0]
    pooled_output = self.pooler(sequence_output) if self.pooler is not None else None

    if not return_dict:
        return (sequence_output, pooled_output) + encoder_outputs[1:]

    return BaseModelOutputWithPoolingAndCrossAttentions(
        last_hidden_state=sequence_output,
        pooler_output=pooled_output,
        past_key_values=encoder_outputs.past_key_values,
        hidden_states=encoder_outputs.hidden_states,
        attentions=encoder_outputs.attentions,
        cross_attentions=encoder_outputs.cross_attentions,
    )

mindnlp.transformers.models.bert.modeling_bert.BertPreTrainedModel

Bases: PreTrainedModel

An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.

Source code in mindnlp\transformers\models\bert\modeling_bert.py
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
class BertPreTrainedModel(PreTrainedModel):
    """
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    """

    config_class = BertConfig
    base_model_prefix = "bert"
    supports_gradient_checkpointing = True

    def _init_weights(self, module):
        """Initialize the weights"""
        if isinstance(module, nn.Linear):
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
            if module.bias is not None:
                nn.init.zeros_(module.bias)
        elif isinstance(module, nn.Embedding):
            nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
            if module.padding_idx is not None:
                module.weight[module.padding_idx] = 0
        elif isinstance(module, nn.LayerNorm):
            nn.init.zeros_(module.bias)
            nn.init.ones_(module.weight)

mindnlp.transformers.models.bert.modeling_graph_bert

MindNLP bert model

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertAttention

Bases: Module

Bert Attention

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
class MSBertAttention(nn.Module):
    r"""
    Bert Attention
    """
    def __init__(self, config, causal, init_cache=False):
        """
        Initializes an instance of MSBertAttention.

        Args:
            self: The instance of the class itself.
            config (object): The configuration object containing various settings.
            causal (bool): Flag indicating whether the attention mechanism is causal.
            init_cache (bool, optional): Flag indicating whether to initialize cache. Default is False.

        Returns:
            None.

        Raises:
            None.
        """
        super().__init__()
        self.self = MSBertSelfAttention(config, causal, init_cache)
        self.output = MSBertSelfOutput(config)

    def forward(self, hidden_states, attention_mask=None, head_mask=None):
        """
        Constructs the attention mechanism for a multi-head self-attention layer in MSBertAttention.

        Args:
            self (MSBertAttention): The instance of the MSBertAttention class.
            hidden_states (torch.Tensor): The input tensor of shape (batch_size, sequence_length, hidden_size).
                It represents the sequence of hidden states for each token in the input sequence.
            attention_mask (torch.Tensor, optional): An optional tensor of shape (batch_size, sequence_length) indicating
                which tokens should be attended to and which should be ignored. The value 1 indicates to attend to the token,
                while 0 indicates to ignore it. If not provided, all tokens are attended to.
            head_mask (torch.Tensor, optional): An optional tensor of shape (num_heads,) or (num_layers, num_heads) indicating
                which heads or layers to mask. 1 indicates to include the head/layer, while 0 indicates to mask it.
                If not provided, all heads/layers are included.

        Returns:
            Tuple[torch.Tensor]:
                A tuple containing:

                - attention_output (torch.Tensor): The output tensor of shape (batch_size, sequence_length, hidden_size),
                  which represents the attended hidden states for each token in the input sequence.
                - self_outputs[1:] (tuple): A tuple of length `num_layers` containing tensors representing intermediate
                  outputs of the self-attention mechanism.

        Raises:
            None.
        """
        self_outputs = self.self(hidden_states, attention_mask, head_mask)
        attention_output = self.output(self_outputs[0], hidden_states)
        outputs = (attention_output,) + self_outputs[1:]
        return outputs

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertAttention.__init__(config, causal, init_cache=False)

Initializes an instance of MSBertAttention.

PARAMETER DESCRIPTION
self

The instance of the class itself.

config

The configuration object containing various settings.

TYPE: object

causal

Flag indicating whether the attention mechanism is causal.

TYPE: bool

init_cache

Flag indicating whether to initialize cache. Default is False.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION

None.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
def __init__(self, config, causal, init_cache=False):
    """
    Initializes an instance of MSBertAttention.

    Args:
        self: The instance of the class itself.
        config (object): The configuration object containing various settings.
        causal (bool): Flag indicating whether the attention mechanism is causal.
        init_cache (bool, optional): Flag indicating whether to initialize cache. Default is False.

    Returns:
        None.

    Raises:
        None.
    """
    super().__init__()
    self.self = MSBertSelfAttention(config, causal, init_cache)
    self.output = MSBertSelfOutput(config)

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertAttention.forward(hidden_states, attention_mask=None, head_mask=None)

Constructs the attention mechanism for a multi-head self-attention layer in MSBertAttention.

PARAMETER DESCRIPTION
self

The instance of the MSBertAttention class.

TYPE: MSBertAttention

hidden_states

The input tensor of shape (batch_size, sequence_length, hidden_size). It represents the sequence of hidden states for each token in the input sequence.

TYPE: Tensor

attention_mask

An optional tensor of shape (batch_size, sequence_length) indicating which tokens should be attended to and which should be ignored. The value 1 indicates to attend to the token, while 0 indicates to ignore it. If not provided, all tokens are attended to.

TYPE: Tensor DEFAULT: None

head_mask

An optional tensor of shape (num_heads,) or (num_layers, num_heads) indicating which heads or layers to mask. 1 indicates to include the head/layer, while 0 indicates to mask it. If not provided, all heads/layers are included.

TYPE: Tensor DEFAULT: None

RETURNS DESCRIPTION

Tuple[torch.Tensor]: A tuple containing:

  • attention_output (torch.Tensor): The output tensor of shape (batch_size, sequence_length, hidden_size), which represents the attended hidden states for each token in the input sequence.
  • self_outputs[1:] (tuple): A tuple of length num_layers containing tensors representing intermediate outputs of the self-attention mechanism.
Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
def forward(self, hidden_states, attention_mask=None, head_mask=None):
    """
    Constructs the attention mechanism for a multi-head self-attention layer in MSBertAttention.

    Args:
        self (MSBertAttention): The instance of the MSBertAttention class.
        hidden_states (torch.Tensor): The input tensor of shape (batch_size, sequence_length, hidden_size).
            It represents the sequence of hidden states for each token in the input sequence.
        attention_mask (torch.Tensor, optional): An optional tensor of shape (batch_size, sequence_length) indicating
            which tokens should be attended to and which should be ignored. The value 1 indicates to attend to the token,
            while 0 indicates to ignore it. If not provided, all tokens are attended to.
        head_mask (torch.Tensor, optional): An optional tensor of shape (num_heads,) or (num_layers, num_heads) indicating
            which heads or layers to mask. 1 indicates to include the head/layer, while 0 indicates to mask it.
            If not provided, all heads/layers are included.

    Returns:
        Tuple[torch.Tensor]:
            A tuple containing:

            - attention_output (torch.Tensor): The output tensor of shape (batch_size, sequence_length, hidden_size),
              which represents the attended hidden states for each token in the input sequence.
            - self_outputs[1:] (tuple): A tuple of length `num_layers` containing tensors representing intermediate
              outputs of the self-attention mechanism.

    Raises:
        None.
    """
    self_outputs = self.self(hidden_states, attention_mask, head_mask)
    attention_output = self.output(self_outputs[0], hidden_states)
    outputs = (attention_output,) + self_outputs[1:]
    return outputs

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertEmbeddings

Bases: Module

Embeddings for BERT, include word, position and token_type

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
class MSBertEmbeddings(nn.Module):
    """
    Embeddings for BERT, include word, position and token_type
    """
    def __init__(self, config):
        """
        Initializes an instance of the MSBertEmbeddings class.

        Args:
            self: The object instance.
            config: An object of the config class containing the configuration parameters for the embeddings.

        Returns:
            None.

        Raises:
            None.

        This method initializes the MSBertEmbeddings object by setting up the word embeddings, position embeddings,
        token type embeddings, layer normalization, and dropout. The configuration parameters are used to determine
        the size of the embeddings and other properties.

        - The 'word_embeddings' attribute is an instance of the nn.Embedding class, which represents a lookup table
        for word embeddings. It takes the vocabulary size (config.vocab_size) and hidden size (config.hidden_size) as 
        arguments.
        - The 'position_embeddings' attribute is an instance of the nn.Embedding class, which represents a lookup table
        for position embeddings. It takes the maximum position embeddings (config.max_position_embeddings) and hidden size 
        (config.hidden_size) as arguments.
        - The 'token_type_embeddings' attribute is an instance of the nn.Embedding class, which represents a lookup table
        for token type embeddings. It takes the token type vocabulary size (config.type_vocab_size) and hidden size 
        (config.hidden_size) as arguments.
        - The 'LayerNorm' attribute is an instance of the nn.LayerNorm class, which applies layer normalization to the
        input embeddings. It takes the hidden size (config.hidden_size) and epsilon (config.layer_norm_eps) as arguments.
        - The 'dropout' attribute is an instance of the nn.Dropout class, which applies dropout regularization to the
        input embeddings. It takes the dropout probability (config.hidden_dropout_prob) as an argument.
        """
        super().__init__()
        self.word_embeddings = nn.Embedding(
            config.vocab_size,
            config.hidden_size,
        )
        self.position_embeddings = nn.Embedding(
            config.max_position_embeddings,
            config.hidden_size,
        )
        self.token_type_embeddings = nn.Embedding(
            config.type_vocab_size,
            config.hidden_size,
        )
        self.LayerNorm = nn.LayerNorm(
            (config.hidden_size,), eps=config.layer_norm_eps
        )
        self.dropout = nn.Dropout(p=config.hidden_dropout_prob)

    def forward(self, input_ids, token_type_ids, position_ids):
        """
        This method forwards the embeddings for MSBert model.

        Args:
            self (object): The object instance of MSBertEmbeddings class.
            input_ids (tensor): The input tensor containing the token ids for the input sequence.
            token_type_ids (tensor): The token type ids to distinguish different sentences in the input sequence.
            position_ids (tensor): The position ids to indicate the position of each token in the input sequence.

        Returns:
            tensor: The forwarded embeddings for the input sequence represented as a tensor.

        Raises:
            None
        """
        words_embeddings = self.word_embeddings(input_ids)
        position_embeddings = self.position_embeddings(position_ids)
        token_type_embeddings = self.token_type_embeddings(token_type_ids)
        embeddings = words_embeddings + position_embeddings + token_type_embeddings
        embeddings = self.LayerNorm(embeddings)
        embeddings = self.dropout(embeddings)
        return embeddings

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertEmbeddings.__init__(config)

Initializes an instance of the MSBertEmbeddings class.

PARAMETER DESCRIPTION
self

The object instance.

config

An object of the config class containing the configuration parameters for the embeddings.

RETURNS DESCRIPTION

None.

This method initializes the MSBertEmbeddings object by setting up the word embeddings, position embeddings, token type embeddings, layer normalization, and dropout. The configuration parameters are used to determine the size of the embeddings and other properties.

  • The 'word_embeddings' attribute is an instance of the nn.Embedding class, which represents a lookup table for word embeddings. It takes the vocabulary size (config.vocab_size) and hidden size (config.hidden_size) as arguments.
  • The 'position_embeddings' attribute is an instance of the nn.Embedding class, which represents a lookup table for position embeddings. It takes the maximum position embeddings (config.max_position_embeddings) and hidden size (config.hidden_size) as arguments.
  • The 'token_type_embeddings' attribute is an instance of the nn.Embedding class, which represents a lookup table for token type embeddings. It takes the token type vocabulary size (config.type_vocab_size) and hidden size (config.hidden_size) as arguments.
  • The 'LayerNorm' attribute is an instance of the nn.LayerNorm class, which applies layer normalization to the input embeddings. It takes the hidden size (config.hidden_size) and epsilon (config.layer_norm_eps) as arguments.
  • The 'dropout' attribute is an instance of the nn.Dropout class, which applies dropout regularization to the input embeddings. It takes the dropout probability (config.hidden_dropout_prob) as an argument.
Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
def __init__(self, config):
    """
    Initializes an instance of the MSBertEmbeddings class.

    Args:
        self: The object instance.
        config: An object of the config class containing the configuration parameters for the embeddings.

    Returns:
        None.

    Raises:
        None.

    This method initializes the MSBertEmbeddings object by setting up the word embeddings, position embeddings,
    token type embeddings, layer normalization, and dropout. The configuration parameters are used to determine
    the size of the embeddings and other properties.

    - The 'word_embeddings' attribute is an instance of the nn.Embedding class, which represents a lookup table
    for word embeddings. It takes the vocabulary size (config.vocab_size) and hidden size (config.hidden_size) as 
    arguments.
    - The 'position_embeddings' attribute is an instance of the nn.Embedding class, which represents a lookup table
    for position embeddings. It takes the maximum position embeddings (config.max_position_embeddings) and hidden size 
    (config.hidden_size) as arguments.
    - The 'token_type_embeddings' attribute is an instance of the nn.Embedding class, which represents a lookup table
    for token type embeddings. It takes the token type vocabulary size (config.type_vocab_size) and hidden size 
    (config.hidden_size) as arguments.
    - The 'LayerNorm' attribute is an instance of the nn.LayerNorm class, which applies layer normalization to the
    input embeddings. It takes the hidden size (config.hidden_size) and epsilon (config.layer_norm_eps) as arguments.
    - The 'dropout' attribute is an instance of the nn.Dropout class, which applies dropout regularization to the
    input embeddings. It takes the dropout probability (config.hidden_dropout_prob) as an argument.
    """
    super().__init__()
    self.word_embeddings = nn.Embedding(
        config.vocab_size,
        config.hidden_size,
    )
    self.position_embeddings = nn.Embedding(
        config.max_position_embeddings,
        config.hidden_size,
    )
    self.token_type_embeddings = nn.Embedding(
        config.type_vocab_size,
        config.hidden_size,
    )
    self.LayerNorm = nn.LayerNorm(
        (config.hidden_size,), eps=config.layer_norm_eps
    )
    self.dropout = nn.Dropout(p=config.hidden_dropout_prob)

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertEmbeddings.forward(input_ids, token_type_ids, position_ids)

This method forwards the embeddings for MSBert model.

PARAMETER DESCRIPTION
self

The object instance of MSBertEmbeddings class.

TYPE: object

input_ids

The input tensor containing the token ids for the input sequence.

TYPE: tensor

token_type_ids

The token type ids to distinguish different sentences in the input sequence.

TYPE: tensor

position_ids

The position ids to indicate the position of each token in the input sequence.

TYPE: tensor

RETURNS DESCRIPTION
tensor

The forwarded embeddings for the input sequence represented as a tensor.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
def forward(self, input_ids, token_type_ids, position_ids):
    """
    This method forwards the embeddings for MSBert model.

    Args:
        self (object): The object instance of MSBertEmbeddings class.
        input_ids (tensor): The input tensor containing the token ids for the input sequence.
        token_type_ids (tensor): The token type ids to distinguish different sentences in the input sequence.
        position_ids (tensor): The position ids to indicate the position of each token in the input sequence.

    Returns:
        tensor: The forwarded embeddings for the input sequence represented as a tensor.

    Raises:
        None
    """
    words_embeddings = self.word_embeddings(input_ids)
    position_embeddings = self.position_embeddings(position_ids)
    token_type_embeddings = self.token_type_embeddings(token_type_ids)
    embeddings = words_embeddings + position_embeddings + token_type_embeddings
    embeddings = self.LayerNorm(embeddings)
    embeddings = self.dropout(embeddings)
    return embeddings

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertEncoder

Bases: Module

Bert Encoder

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
class MSBertEncoder(nn.Module):
    r"""
    Bert Encoder
    """
    def __init__(self, config):
        """
        Initializes an instance of the MSBertEncoder class.

        Args:
            self (MSBertEncoder): The instance of the class itself.
            config:
                An object containing the configuration parameters for the MSBertEncoder.

                - output_attentions (bool): Whether to output attentions weights.
                - output_hidden_states (bool): Whether to output all hidden states.
                - layer (nn.ModuleList): List of MSBertLayer instances.

        Returns:
            None.

        Raises:
            None.
        """
        super().__init__()
        self.output_attentions = config.output_attentions
        self.output_hidden_states = config.output_hidden_states
        self.layer = nn.ModuleList(
            [MSBertLayer(config) for _ in range(config.num_hidden_layers)]
        )

    def _set_recompute(self):
        """
        Sets the recompute flag for each layer in the MSBertEncoder.

        Args:
            self: An instance of the MSBertEncoder class.

        Returns:
            None.

        Raises:
            None.

        Description:
            This method iterates over each layer within the MSBertEncoder instance and sets the recompute flag
            for each layer by calling the 'recompute()' method of the layer.
            The recompute flag is used to indicate whether the layer needs to be recomputed during the
            forward pass of the encoder.
            By setting the recompute flag, it allows for dynamic computation of the layer based on the input.

        Example:
            ```python
            >>> encoder = MSBertEncoder()
            >>> encoder._set_recompute()
            ```

        Note:
            This method is typically called internally within the MSBertEncoder class
            and does not need to be called externally.
        """
        for layer in self.layer:
            layer.recompute()

    def forward(self, hidden_states, attention_mask=None, head_mask=None,
                encoder_hidden_states = None,
                encoder_attention_mask = None):
        """
        Constructs the MSBertEncoder.

        Args:
            self: An instance of the MSBertEncoder class.
            hidden_states (Tensor): The input hidden states of the encoder.
                Shape: (batch_size, sequence_length, hidden_size)
            attention_mask (Tensor, optional): The attention mask for the input hidden states.
                If provided, the attention mask should have the same shape as the hidden states.
                Each element of the mask should be 0 or 1, where 0 indicates the position is padded/invalid and 1
                indicates the position is not padded/valid.
                Defaults to None.
            head_mask (Tensor, optional): The head mask for the attention mechanism.
                If provided, the head mask should have the same shape as the number of layers in the encoder.
                Each element of the mask should be 0 or 1, where 0 indicates the head is masked and 1 indicates the head is not masked.
                Defaults to None.
            encoder_hidden_states (Tensor, optional): The hidden states of the encoder.
                Shape: (batch_size, sequence_length, hidden_size)
                Defaults to None.
            encoder_attention_mask (Tensor, optional): The attention mask for the encoder hidden states.
                If provided, the attention mask should have the same shape as the encoder hidden states.
                Each element of the mask should be 0 or 1, where 0 indicates the position is padded/invalid
                and 1 indicates the position is not padded/valid.
                Defaults to None.

        Returns:
            outputs (Tuple):
                A tuple containing the following elements:

                - hidden_states (Tensor): The output hidden states of the encoder.
                    Shape: (batch_size, sequence_length, hidden_size)
                - all_hidden_states (Tuple[Tensor]): A tuple of hidden states of all layers.
                    Each element of the tuple has the shape (batch_size, sequence_length, hidden_size).
                    This will be included if the 'output_hidden_states' flag is set to True.
                - all_attentions (Tuple[Tensor]): A tuple of attention scores of all layers.
                    Each element of the tuple has the shape (batch_size, num_heads, sequence_length, sequence_length).
                    This will be included if the 'output_attentions' flag is set to True.

        Raises:
            None.
        """
        all_hidden_states = ()
        all_attentions = ()
        for i, layer_module in enumerate(self.layer):
            if self.output_hidden_states:
                all_hidden_states += (hidden_states,)

            layer_outputs = layer_module(
                hidden_states,
                attention_mask,
                head_mask[i] if head_mask is not None else None,
                encoder_hidden_states,
                encoder_attention_mask
                )
            hidden_states = layer_outputs[0]

            if self.output_attentions:
                all_attentions += (layer_outputs[1],)

        if self.output_hidden_states:
            all_hidden_states += (hidden_states,)

        outputs = (hidden_states,)
        if self.output_hidden_states:
            outputs += (all_hidden_states,)
        if self.output_attentions:
            outputs += (all_attentions,)
        return outputs

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertEncoder.__init__(config)

Initializes an instance of the MSBertEncoder class.

PARAMETER DESCRIPTION
self

The instance of the class itself.

TYPE: MSBertEncoder

config

An object containing the configuration parameters for the MSBertEncoder.

  • output_attentions (bool): Whether to output attentions weights.
  • output_hidden_states (bool): Whether to output all hidden states.
  • layer (nn.ModuleList): List of MSBertLayer instances.

RETURNS DESCRIPTION

None.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
def __init__(self, config):
    """
    Initializes an instance of the MSBertEncoder class.

    Args:
        self (MSBertEncoder): The instance of the class itself.
        config:
            An object containing the configuration parameters for the MSBertEncoder.

            - output_attentions (bool): Whether to output attentions weights.
            - output_hidden_states (bool): Whether to output all hidden states.
            - layer (nn.ModuleList): List of MSBertLayer instances.

    Returns:
        None.

    Raises:
        None.
    """
    super().__init__()
    self.output_attentions = config.output_attentions
    self.output_hidden_states = config.output_hidden_states
    self.layer = nn.ModuleList(
        [MSBertLayer(config) for _ in range(config.num_hidden_layers)]
    )

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertEncoder.forward(hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None)

Constructs the MSBertEncoder.

PARAMETER DESCRIPTION
self

An instance of the MSBertEncoder class.

hidden_states

The input hidden states of the encoder. Shape: (batch_size, sequence_length, hidden_size)

TYPE: Tensor

attention_mask

The attention mask for the input hidden states. If provided, the attention mask should have the same shape as the hidden states. Each element of the mask should be 0 or 1, where 0 indicates the position is padded/invalid and 1 indicates the position is not padded/valid. Defaults to None.

TYPE: Tensor DEFAULT: None

head_mask

The head mask for the attention mechanism. If provided, the head mask should have the same shape as the number of layers in the encoder. Each element of the mask should be 0 or 1, where 0 indicates the head is masked and 1 indicates the head is not masked. Defaults to None.

TYPE: Tensor DEFAULT: None

encoder_hidden_states

The hidden states of the encoder. Shape: (batch_size, sequence_length, hidden_size) Defaults to None.

TYPE: Tensor DEFAULT: None

encoder_attention_mask

The attention mask for the encoder hidden states. If provided, the attention mask should have the same shape as the encoder hidden states. Each element of the mask should be 0 or 1, where 0 indicates the position is padded/invalid and 1 indicates the position is not padded/valid. Defaults to None.

TYPE: Tensor DEFAULT: None

RETURNS DESCRIPTION
outputs

A tuple containing the following elements:

  • hidden_states (Tensor): The output hidden states of the encoder. Shape: (batch_size, sequence_length, hidden_size)
  • all_hidden_states (Tuple[Tensor]): A tuple of hidden states of all layers. Each element of the tuple has the shape (batch_size, sequence_length, hidden_size). This will be included if the 'output_hidden_states' flag is set to True.
  • all_attentions (Tuple[Tensor]): A tuple of attention scores of all layers. Each element of the tuple has the shape (batch_size, num_heads, sequence_length, sequence_length). This will be included if the 'output_attentions' flag is set to True.

TYPE: Tuple

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
def forward(self, hidden_states, attention_mask=None, head_mask=None,
            encoder_hidden_states = None,
            encoder_attention_mask = None):
    """
    Constructs the MSBertEncoder.

    Args:
        self: An instance of the MSBertEncoder class.
        hidden_states (Tensor): The input hidden states of the encoder.
            Shape: (batch_size, sequence_length, hidden_size)
        attention_mask (Tensor, optional): The attention mask for the input hidden states.
            If provided, the attention mask should have the same shape as the hidden states.
            Each element of the mask should be 0 or 1, where 0 indicates the position is padded/invalid and 1
            indicates the position is not padded/valid.
            Defaults to None.
        head_mask (Tensor, optional): The head mask for the attention mechanism.
            If provided, the head mask should have the same shape as the number of layers in the encoder.
            Each element of the mask should be 0 or 1, where 0 indicates the head is masked and 1 indicates the head is not masked.
            Defaults to None.
        encoder_hidden_states (Tensor, optional): The hidden states of the encoder.
            Shape: (batch_size, sequence_length, hidden_size)
            Defaults to None.
        encoder_attention_mask (Tensor, optional): The attention mask for the encoder hidden states.
            If provided, the attention mask should have the same shape as the encoder hidden states.
            Each element of the mask should be 0 or 1, where 0 indicates the position is padded/invalid
            and 1 indicates the position is not padded/valid.
            Defaults to None.

    Returns:
        outputs (Tuple):
            A tuple containing the following elements:

            - hidden_states (Tensor): The output hidden states of the encoder.
                Shape: (batch_size, sequence_length, hidden_size)
            - all_hidden_states (Tuple[Tensor]): A tuple of hidden states of all layers.
                Each element of the tuple has the shape (batch_size, sequence_length, hidden_size).
                This will be included if the 'output_hidden_states' flag is set to True.
            - all_attentions (Tuple[Tensor]): A tuple of attention scores of all layers.
                Each element of the tuple has the shape (batch_size, num_heads, sequence_length, sequence_length).
                This will be included if the 'output_attentions' flag is set to True.

    Raises:
        None.
    """
    all_hidden_states = ()
    all_attentions = ()
    for i, layer_module in enumerate(self.layer):
        if self.output_hidden_states:
            all_hidden_states += (hidden_states,)

        layer_outputs = layer_module(
            hidden_states,
            attention_mask,
            head_mask[i] if head_mask is not None else None,
            encoder_hidden_states,
            encoder_attention_mask
            )
        hidden_states = layer_outputs[0]

        if self.output_attentions:
            all_attentions += (layer_outputs[1],)

    if self.output_hidden_states:
        all_hidden_states += (hidden_states,)

    outputs = (hidden_states,)
    if self.output_hidden_states:
        outputs += (all_hidden_states,)
    if self.output_attentions:
        outputs += (all_attentions,)
    return outputs

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertForPretraining

Bases: MSBertPreTrainedModel

Bert For Pretraining

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
class MSBertForPretraining(MSBertPreTrainedModel):
    r"""
    Bert For Pretraining
    """
    def __init__(self, config, *args, **kwargs):
        """
        __init__

        Initialize the MSBertForPretraining class.

        Args:
            self: The instance of the MSBertForPretraining class.
            config: The configuration for the MSBertForPretraining,
                containing various parameters and settings for model initialization.
                It should be an instance of the configuration class specific to the MSBertForPretraining model.

        Returns:
            None.

        Raises:
            None
        """
        super().__init__(config, *args, **kwargs)
        self.bert = MSBertModel(config)
        self.cls = MSBertPreTrainingHeads(config)
        self.vocab_size = config.vocab_size

        self.cls.predictions.decoder.weight = (
            self.bert.embeddings.word_embeddings.weight
        )

    def forward(
        self,
        input_ids,
        attention_mask=None,
        token_type_ids=None,
        position_ids=None,
        head_mask=None,
        masked_lm_positions=None,
    ):
        """
        This method forwards the pretraining model for MSBertForPretraining.

        Args:
            self (MSBertForPretraining): The instance of the MSBertForPretraining class.
            input_ids (Tensor): The input tensor containing the token ids.
            attention_mask (Tensor, optional): A tensor representing the attention mask. Default is None.
            token_type_ids (Tensor, optional): A tensor representing the token type ids. Default is None.
            position_ids (Tensor, optional): A tensor representing the position ids. Default is None.
            head_mask (Tensor, optional): A tensor representing the head mask. Default is None.
            masked_lm_positions (List[int]): A list of integer positions of masked language model tokens.

        Returns:
            Tuple[Tensor, Tensor]: A tuple containing the prediction scores and sequence relationship score.

        Raises:
            None
        """
        outputs = self.bert(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
        )
        # ic(outputs) # [shape(batch_size, 128, 256), shape(batch_size, 256)]

        sequence_output, pooled_output = outputs[:2]
        prediction_scores, seq_relationship_score = self.cls(
            sequence_output, pooled_output, masked_lm_positions
        )

        outputs = (
            prediction_scores,
            seq_relationship_score,
        ) + outputs[2:]
        # ic(outputs) # [shape(batch_size, 128, 256), shape(batch_size, 256)]

        return outputs

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertForPretraining.__init__(config, *args, **kwargs)

init

Initialize the MSBertForPretraining class.

PARAMETER DESCRIPTION
self

The instance of the MSBertForPretraining class.

config

The configuration for the MSBertForPretraining, containing various parameters and settings for model initialization. It should be an instance of the configuration class specific to the MSBertForPretraining model.

RETURNS DESCRIPTION

None.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
def __init__(self, config, *args, **kwargs):
    """
    __init__

    Initialize the MSBertForPretraining class.

    Args:
        self: The instance of the MSBertForPretraining class.
        config: The configuration for the MSBertForPretraining,
            containing various parameters and settings for model initialization.
            It should be an instance of the configuration class specific to the MSBertForPretraining model.

    Returns:
        None.

    Raises:
        None
    """
    super().__init__(config, *args, **kwargs)
    self.bert = MSBertModel(config)
    self.cls = MSBertPreTrainingHeads(config)
    self.vocab_size = config.vocab_size

    self.cls.predictions.decoder.weight = (
        self.bert.embeddings.word_embeddings.weight
    )

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertForPretraining.forward(input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, masked_lm_positions=None)

This method forwards the pretraining model for MSBertForPretraining.

PARAMETER DESCRIPTION
self

The instance of the MSBertForPretraining class.

TYPE: MSBertForPretraining

input_ids

The input tensor containing the token ids.

TYPE: Tensor

attention_mask

A tensor representing the attention mask. Default is None.

TYPE: Tensor DEFAULT: None

token_type_ids

A tensor representing the token type ids. Default is None.

TYPE: Tensor DEFAULT: None

position_ids

A tensor representing the position ids. Default is None.

TYPE: Tensor DEFAULT: None

head_mask

A tensor representing the head mask. Default is None.

TYPE: Tensor DEFAULT: None

masked_lm_positions

A list of integer positions of masked language model tokens.

TYPE: List[int] DEFAULT: None

RETURNS DESCRIPTION

Tuple[Tensor, Tensor]: A tuple containing the prediction scores and sequence relationship score.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
def forward(
    self,
    input_ids,
    attention_mask=None,
    token_type_ids=None,
    position_ids=None,
    head_mask=None,
    masked_lm_positions=None,
):
    """
    This method forwards the pretraining model for MSBertForPretraining.

    Args:
        self (MSBertForPretraining): The instance of the MSBertForPretraining class.
        input_ids (Tensor): The input tensor containing the token ids.
        attention_mask (Tensor, optional): A tensor representing the attention mask. Default is None.
        token_type_ids (Tensor, optional): A tensor representing the token type ids. Default is None.
        position_ids (Tensor, optional): A tensor representing the position ids. Default is None.
        head_mask (Tensor, optional): A tensor representing the head mask. Default is None.
        masked_lm_positions (List[int]): A list of integer positions of masked language model tokens.

    Returns:
        Tuple[Tensor, Tensor]: A tuple containing the prediction scores and sequence relationship score.

    Raises:
        None
    """
    outputs = self.bert(
        input_ids,
        attention_mask=attention_mask,
        token_type_ids=token_type_ids,
        position_ids=position_ids,
        head_mask=head_mask,
    )
    # ic(outputs) # [shape(batch_size, 128, 256), shape(batch_size, 256)]

    sequence_output, pooled_output = outputs[:2]
    prediction_scores, seq_relationship_score = self.cls(
        sequence_output, pooled_output, masked_lm_positions
    )

    outputs = (
        prediction_scores,
        seq_relationship_score,
    ) + outputs[2:]
    # ic(outputs) # [shape(batch_size, 128, 256), shape(batch_size, 256)]

    return outputs

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertForSequenceClassification

Bases: MSBertPreTrainedModel

Bert Model for classification tasks

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
class MSBertForSequenceClassification(MSBertPreTrainedModel):
    """Bert Model for classification tasks"""
    def __init__(self, config):
        """
        Initializes an instance of the MSBertForSequenceClassification class.

        Args:
            self: The instance of the class.
            config (object): A configuration object containing the settings for the model.
                It should include the following attributes:

                - num_labels (int): The number of labels for sequence classification.
                - classifier_dropout (float, optional): The dropout probability for the classifier layer.
                If not provided, the value will default to config.hidden_dropout_prob.

        Returns:
            None: This method initializes the instance with the provided configuration.

        Raises:
            TypeError: If the config parameter is not provided or is not of the expected type.
            ValueError: If the num_labels attribute is not present in the config object.
            AttributeError: If the config object does not contain the necessary attributes for model configuration.
            RuntimeError: If an error occurs during model initialization.
        """
        super().__init__(config)
        self.num_labels = config.num_labels
        self.config = config

        self.bert = MSBertModel(config)
        classifier_dropout = (
            config.classifier_dropout
            if config.classifier_dropout is not None
            else config.hidden_dropout_prob
        )
        self.classifier = nn.Linear(config.hidden_size, self.num_labels)
        self.dropout = nn.Dropout(p=classifier_dropout)

    def forward(
        self,
        input_ids,
        attention_mask=None,
        token_type_ids=None,
        position_ids=None,
        head_mask=None,
        **kwargs
    ):
        """
        Constructs the MSBertForSequenceClassification model for a given input.

        Args:
            self (MSBertForSequenceClassification): The instance of the MSBertForSequenceClassification class.
            input_ids (Tensor): The input tensor containing the indices of input tokens.
            attention_mask (Tensor, optional): An optional tensor containing the attention mask for the input.
            token_type_ids (Tensor, optional): An optional tensor containing the token type ids.
            position_ids (Tensor, optional): An optional tensor containing the position ids.
            head_mask (Tensor, optional): An optional tensor containing the head mask.

        Returns:
            tuple: A tuple containing the logits for the classification and additional outputs from the model.

        Raises:
            None
        """
        outputs = self.bert(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
        )
        pooled_output = outputs[1]

        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)

        output = (logits,) + outputs[2:]

        return output

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertForSequenceClassification.__init__(config)

Initializes an instance of the MSBertForSequenceClassification class.

PARAMETER DESCRIPTION
self

The instance of the class.

config

A configuration object containing the settings for the model. It should include the following attributes:

  • num_labels (int): The number of labels for sequence classification.
  • classifier_dropout (float, optional): The dropout probability for the classifier layer. If not provided, the value will default to config.hidden_dropout_prob.

TYPE: object

RETURNS DESCRIPTION
None

This method initializes the instance with the provided configuration.

RAISES DESCRIPTION
TypeError

If the config parameter is not provided or is not of the expected type.

ValueError

If the num_labels attribute is not present in the config object.

AttributeError

If the config object does not contain the necessary attributes for model configuration.

RuntimeError

If an error occurs during model initialization.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
def __init__(self, config):
    """
    Initializes an instance of the MSBertForSequenceClassification class.

    Args:
        self: The instance of the class.
        config (object): A configuration object containing the settings for the model.
            It should include the following attributes:

            - num_labels (int): The number of labels for sequence classification.
            - classifier_dropout (float, optional): The dropout probability for the classifier layer.
            If not provided, the value will default to config.hidden_dropout_prob.

    Returns:
        None: This method initializes the instance with the provided configuration.

    Raises:
        TypeError: If the config parameter is not provided or is not of the expected type.
        ValueError: If the num_labels attribute is not present in the config object.
        AttributeError: If the config object does not contain the necessary attributes for model configuration.
        RuntimeError: If an error occurs during model initialization.
    """
    super().__init__(config)
    self.num_labels = config.num_labels
    self.config = config

    self.bert = MSBertModel(config)
    classifier_dropout = (
        config.classifier_dropout
        if config.classifier_dropout is not None
        else config.hidden_dropout_prob
    )
    self.classifier = nn.Linear(config.hidden_size, self.num_labels)
    self.dropout = nn.Dropout(p=classifier_dropout)

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertForSequenceClassification.forward(input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, **kwargs)

Constructs the MSBertForSequenceClassification model for a given input.

PARAMETER DESCRIPTION
self

The instance of the MSBertForSequenceClassification class.

TYPE: MSBertForSequenceClassification

input_ids

The input tensor containing the indices of input tokens.

TYPE: Tensor

attention_mask

An optional tensor containing the attention mask for the input.

TYPE: Tensor DEFAULT: None

token_type_ids

An optional tensor containing the token type ids.

TYPE: Tensor DEFAULT: None

position_ids

An optional tensor containing the position ids.

TYPE: Tensor DEFAULT: None

head_mask

An optional tensor containing the head mask.

TYPE: Tensor DEFAULT: None

RETURNS DESCRIPTION
tuple

A tuple containing the logits for the classification and additional outputs from the model.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
def forward(
    self,
    input_ids,
    attention_mask=None,
    token_type_ids=None,
    position_ids=None,
    head_mask=None,
    **kwargs
):
    """
    Constructs the MSBertForSequenceClassification model for a given input.

    Args:
        self (MSBertForSequenceClassification): The instance of the MSBertForSequenceClassification class.
        input_ids (Tensor): The input tensor containing the indices of input tokens.
        attention_mask (Tensor, optional): An optional tensor containing the attention mask for the input.
        token_type_ids (Tensor, optional): An optional tensor containing the token type ids.
        position_ids (Tensor, optional): An optional tensor containing the position ids.
        head_mask (Tensor, optional): An optional tensor containing the head mask.

    Returns:
        tuple: A tuple containing the logits for the classification and additional outputs from the model.

    Raises:
        None
    """
    outputs = self.bert(
        input_ids,
        attention_mask=attention_mask,
        token_type_ids=token_type_ids,
        position_ids=position_ids,
        head_mask=head_mask,
    )
    pooled_output = outputs[1]

    pooled_output = self.dropout(pooled_output)
    logits = self.classifier(pooled_output)

    output = (logits,) + outputs[2:]

    return output

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertIntermediate

Bases: Module

Bert Intermediate

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
class MSBertIntermediate(nn.Module):
    r"""
    Bert Intermediate
    """
    def __init__(self, config):
        """
        Initializes an instance of the MSBertIntermediate class.

        Args:
            self: The instance of the MSBertIntermediate class.
            config: An object representing the configuration for the MSBertIntermediate model.
                It contains the following attributes:

                - hidden_size (int): The size of the hidden layer.
                - intermediate_size (int): The size of the intermediate layer.
                - hidden_act (str): The activation function for the hidden layer.

        Returns:
            None.

        Raises:
            TypeError: If the config parameter is not provided or is not of the correct type.
            ValueError: If the config object does not contain the required attributes.
        """
        super().__init__()
        self.dense = nn.Linear(
            config.hidden_size,
            config.intermediate_size,
        )
        self.intermediate_act_fn = ACT2FN[config.hidden_act]

    def forward(self, hidden_states):
        """
        Constructs the intermediate layer of the MSBert model.

        Args:
            self: An instance of the MSBertIntermediate class.
            hidden_states (Tensor): The input hidden states.
                Should be a tensor of shape (batch_size, sequence_length, hidden_size).

        Returns:
            Tensor: The output hidden states after passing through the intermediate layer.
                Has the same shape as the input hidden states.

        Raises:
            None.

        This method takes in the input hidden states and applies the intermediate layer transformations.
        It first passes the hidden states through a dense layer, then applies an activation function.
        The resulting hidden states are returned as the output.
        """
        hidden_states = self.dense(hidden_states)
        hidden_states = self.intermediate_act_fn(hidden_states)
        return hidden_states

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertIntermediate.__init__(config)

Initializes an instance of the MSBertIntermediate class.

PARAMETER DESCRIPTION
self

The instance of the MSBertIntermediate class.

config

An object representing the configuration for the MSBertIntermediate model. It contains the following attributes:

  • hidden_size (int): The size of the hidden layer.
  • intermediate_size (int): The size of the intermediate layer.
  • hidden_act (str): The activation function for the hidden layer.

RETURNS DESCRIPTION

None.

RAISES DESCRIPTION
TypeError

If the config parameter is not provided or is not of the correct type.

ValueError

If the config object does not contain the required attributes.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
def __init__(self, config):
    """
    Initializes an instance of the MSBertIntermediate class.

    Args:
        self: The instance of the MSBertIntermediate class.
        config: An object representing the configuration for the MSBertIntermediate model.
            It contains the following attributes:

            - hidden_size (int): The size of the hidden layer.
            - intermediate_size (int): The size of the intermediate layer.
            - hidden_act (str): The activation function for the hidden layer.

    Returns:
        None.

    Raises:
        TypeError: If the config parameter is not provided or is not of the correct type.
        ValueError: If the config object does not contain the required attributes.
    """
    super().__init__()
    self.dense = nn.Linear(
        config.hidden_size,
        config.intermediate_size,
    )
    self.intermediate_act_fn = ACT2FN[config.hidden_act]

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertIntermediate.forward(hidden_states)

Constructs the intermediate layer of the MSBert model.

PARAMETER DESCRIPTION
self

An instance of the MSBertIntermediate class.

hidden_states

The input hidden states. Should be a tensor of shape (batch_size, sequence_length, hidden_size).

TYPE: Tensor

RETURNS DESCRIPTION
Tensor

The output hidden states after passing through the intermediate layer. Has the same shape as the input hidden states.

This method takes in the input hidden states and applies the intermediate layer transformations. It first passes the hidden states through a dense layer, then applies an activation function. The resulting hidden states are returned as the output.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
def forward(self, hidden_states):
    """
    Constructs the intermediate layer of the MSBert model.

    Args:
        self: An instance of the MSBertIntermediate class.
        hidden_states (Tensor): The input hidden states.
            Should be a tensor of shape (batch_size, sequence_length, hidden_size).

    Returns:
        Tensor: The output hidden states after passing through the intermediate layer.
            Has the same shape as the input hidden states.

    Raises:
        None.

    This method takes in the input hidden states and applies the intermediate layer transformations.
    It first passes the hidden states through a dense layer, then applies an activation function.
    The resulting hidden states are returned as the output.
    """
    hidden_states = self.dense(hidden_states)
    hidden_states = self.intermediate_act_fn(hidden_states)
    return hidden_states

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertLMPredictionHead

Bases: Module

Bert LM Prediction Head

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
class MSBertLMPredictionHead(nn.Module):
    r"""
    Bert LM Prediction Head
    """
    def __init__(self, config):
        """
        Initializes an instance of the MSBertLMPredictionHead class.

        Args:
            self: The object instance.
            config:
                An instance of the configuration class that contains the model's configuration settings.

                - Type: Any
                - Purpose: This parameter is used to configure the MSBertLMPredictionHead instance.
                - Restrictions: None

        Returns:
            None

        Raises:
            None
        """
        super().__init__()
        self.transform = MSBertPredictionHeadTransform(config)

        # The output weights are the same as the input embeddings, but there is
        # an output-only bias for each token.
        self.decoder = nn.Linear(
            config.hidden_size,
            config.vocab_size,
            bias=False,
        )

        self.bias = Parameter(initializer("zeros", config.vocab_size), "bias")

    def forward(self, hidden_states, masked_lm_positions):
        """
        Constructs the MSBertLMPredictionHead.

        This method takes in the hidden states and masked language model positions,
        and applies a series of operations to compute the final hidden states for the MSBertLMPredictionHead.
        The resulting hidden states are then transformed and decoded to produce the final output.

        Args:
            self (MSBertLMPredictionHead): An instance of the MSBertLMPredictionHead class.
            hidden_states (Tensor): A tensor of shape (batch_size, seq_len, hidden_size) containing the hidden states.
            masked_lm_positions (Tensor): A tensor of shape (batch_size, num_masked_lm_positions)
                containing the positions of the masked language model tokens. If None, no masking is applied.

        Returns:
            Tensor:
                A tensor of shape (batch_size, seq_len, hidden_size) containing
                the final hidden states for the MSBertLMPredictionHead.

        Raises:
            None.
        """
        batch_size, seq_len, hidden_size = hidden_states.shape
        if masked_lm_positions is not None:
            flat_offsets = ops.arange(batch_size) * seq_len
            flat_position = (masked_lm_positions + flat_offsets.reshape(-1, 1)).reshape(
                -1
            )
            flat_sequence_tensor = hidden_states.reshape(-1, hidden_size)
            hidden_states = ops.gather(flat_sequence_tensor, flat_position, 0)
        hidden_states = self.transform(hidden_states)
        hidden_states = self.decoder(hidden_states) + self.bias
        return hidden_states

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertLMPredictionHead.__init__(config)

Initializes an instance of the MSBertLMPredictionHead class.

PARAMETER DESCRIPTION
self

The object instance.

config

An instance of the configuration class that contains the model's configuration settings.

  • Type: Any
  • Purpose: This parameter is used to configure the MSBertLMPredictionHead instance.
  • Restrictions: None

RETURNS DESCRIPTION

None

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
def __init__(self, config):
    """
    Initializes an instance of the MSBertLMPredictionHead class.

    Args:
        self: The object instance.
        config:
            An instance of the configuration class that contains the model's configuration settings.

            - Type: Any
            - Purpose: This parameter is used to configure the MSBertLMPredictionHead instance.
            - Restrictions: None

    Returns:
        None

    Raises:
        None
    """
    super().__init__()
    self.transform = MSBertPredictionHeadTransform(config)

    # The output weights are the same as the input embeddings, but there is
    # an output-only bias for each token.
    self.decoder = nn.Linear(
        config.hidden_size,
        config.vocab_size,
        bias=False,
    )

    self.bias = Parameter(initializer("zeros", config.vocab_size), "bias")

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertLMPredictionHead.forward(hidden_states, masked_lm_positions)

Constructs the MSBertLMPredictionHead.

This method takes in the hidden states and masked language model positions, and applies a series of operations to compute the final hidden states for the MSBertLMPredictionHead. The resulting hidden states are then transformed and decoded to produce the final output.

PARAMETER DESCRIPTION
self

An instance of the MSBertLMPredictionHead class.

TYPE: MSBertLMPredictionHead

hidden_states

A tensor of shape (batch_size, seq_len, hidden_size) containing the hidden states.

TYPE: Tensor

masked_lm_positions

A tensor of shape (batch_size, num_masked_lm_positions) containing the positions of the masked language model tokens. If None, no masking is applied.

TYPE: Tensor

RETURNS DESCRIPTION
Tensor

A tensor of shape (batch_size, seq_len, hidden_size) containing the final hidden states for the MSBertLMPredictionHead.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
def forward(self, hidden_states, masked_lm_positions):
    """
    Constructs the MSBertLMPredictionHead.

    This method takes in the hidden states and masked language model positions,
    and applies a series of operations to compute the final hidden states for the MSBertLMPredictionHead.
    The resulting hidden states are then transformed and decoded to produce the final output.

    Args:
        self (MSBertLMPredictionHead): An instance of the MSBertLMPredictionHead class.
        hidden_states (Tensor): A tensor of shape (batch_size, seq_len, hidden_size) containing the hidden states.
        masked_lm_positions (Tensor): A tensor of shape (batch_size, num_masked_lm_positions)
            containing the positions of the masked language model tokens. If None, no masking is applied.

    Returns:
        Tensor:
            A tensor of shape (batch_size, seq_len, hidden_size) containing
            the final hidden states for the MSBertLMPredictionHead.

    Raises:
        None.
    """
    batch_size, seq_len, hidden_size = hidden_states.shape
    if masked_lm_positions is not None:
        flat_offsets = ops.arange(batch_size) * seq_len
        flat_position = (masked_lm_positions + flat_offsets.reshape(-1, 1)).reshape(
            -1
        )
        flat_sequence_tensor = hidden_states.reshape(-1, hidden_size)
        hidden_states = ops.gather(flat_sequence_tensor, flat_position, 0)
    hidden_states = self.transform(hidden_states)
    hidden_states = self.decoder(hidden_states) + self.bias
    return hidden_states

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertLayer

Bases: Module

Bert Layer

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
class MSBertLayer(nn.Module):
    r"""
    Bert Layer
    """
    def __init__(self, config, init_cache=False):
        """
        Initializes an instance of the MSBertLayer class.

        Args:
            self: The instance of the class.
            config (object): The configuration object containing various settings and parameters.
            init_cache (bool, optional): Whether to initialize the cache. Defaults to False.

        Returns:
            None

        Raises:
            None
        """
        super().__init__()
        self.attention = MSBertAttention(config, causal=config.is_decoder, init_cache=init_cache)
        self.intermediate = MSBertIntermediate(config)
        self.output = MSBertOutput(config)
        if config.add_cross_attention:
            self.crossattention = MSBertAttention(config, causal=False, init_cache=init_cache)

    def forward(self, hidden_states, attention_mask=None, head_mask=None,
                encoder_hidden_states = None,
                encoder_attention_mask = None):
        """
        Constructs the MSBertLayer.

        Args:
            self: The instance of the MSBertLayer class.
            hidden_states: The input hidden states (tensor) of shape (batch_size, sequence_length, hidden_size).
            attention_mask:
                Optional attention mask (tensor) of shape (batch_size, sequence_length) or (batch_size, 1, 1, sequence_length).
                Defaults to None.
            head_mask: Optional head mask (tensor) of shape (num_heads,) or (num_layers, num_heads). Defaults to None.
            encoder_hidden_states:
                Optional encoder hidden states (tensor) of shape (batch_size, sequence_length, hidden_size).
                Defaults to None.
            encoder_attention_mask:
                Optional encoder attention mask (tensor) of shape (batch_size, sequence_length) or (batch_size, 1, 1, sequence_length).
                Defaults to None.

        Returns:
            tuple:
                A tuple containing the layer output (tensor) of shape (batch_size, sequence_length, hidden_size)
                and any additional attention outputs.

        Raises:
            None.
        """
        attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
        attention_output = attention_outputs[0]

        # Cross-Attention Block
        if encoder_hidden_states is not None:
            cross_attention_outputs = self.crossattention(
                attention_output,
                attention_mask=encoder_attention_mask,
                head_mask=head_mask,
            )
            attention_output = cross_attention_outputs[0]

        intermediate_output = self.intermediate(attention_output)
        layer_output = self.output(intermediate_output, attention_output)
        outputs = (layer_output,) + attention_outputs[1:]
        return outputs

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertLayer.__init__(config, init_cache=False)

Initializes an instance of the MSBertLayer class.

PARAMETER DESCRIPTION
self

The instance of the class.

config

The configuration object containing various settings and parameters.

TYPE: object

init_cache

Whether to initialize the cache. Defaults to False.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION

None

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
def __init__(self, config, init_cache=False):
    """
    Initializes an instance of the MSBertLayer class.

    Args:
        self: The instance of the class.
        config (object): The configuration object containing various settings and parameters.
        init_cache (bool, optional): Whether to initialize the cache. Defaults to False.

    Returns:
        None

    Raises:
        None
    """
    super().__init__()
    self.attention = MSBertAttention(config, causal=config.is_decoder, init_cache=init_cache)
    self.intermediate = MSBertIntermediate(config)
    self.output = MSBertOutput(config)
    if config.add_cross_attention:
        self.crossattention = MSBertAttention(config, causal=False, init_cache=init_cache)

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertLayer.forward(hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None)

Constructs the MSBertLayer.

PARAMETER DESCRIPTION
self

The instance of the MSBertLayer class.

hidden_states

The input hidden states (tensor) of shape (batch_size, sequence_length, hidden_size).

attention_mask

Optional attention mask (tensor) of shape (batch_size, sequence_length) or (batch_size, 1, 1, sequence_length). Defaults to None.

DEFAULT: None

head_mask

Optional head mask (tensor) of shape (num_heads,) or (num_layers, num_heads). Defaults to None.

DEFAULT: None

encoder_hidden_states

Optional encoder hidden states (tensor) of shape (batch_size, sequence_length, hidden_size). Defaults to None.

DEFAULT: None

encoder_attention_mask

Optional encoder attention mask (tensor) of shape (batch_size, sequence_length) or (batch_size, 1, 1, sequence_length). Defaults to None.

DEFAULT: None

RETURNS DESCRIPTION
tuple

A tuple containing the layer output (tensor) of shape (batch_size, sequence_length, hidden_size) and any additional attention outputs.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
def forward(self, hidden_states, attention_mask=None, head_mask=None,
            encoder_hidden_states = None,
            encoder_attention_mask = None):
    """
    Constructs the MSBertLayer.

    Args:
        self: The instance of the MSBertLayer class.
        hidden_states: The input hidden states (tensor) of shape (batch_size, sequence_length, hidden_size).
        attention_mask:
            Optional attention mask (tensor) of shape (batch_size, sequence_length) or (batch_size, 1, 1, sequence_length).
            Defaults to None.
        head_mask: Optional head mask (tensor) of shape (num_heads,) or (num_layers, num_heads). Defaults to None.
        encoder_hidden_states:
            Optional encoder hidden states (tensor) of shape (batch_size, sequence_length, hidden_size).
            Defaults to None.
        encoder_attention_mask:
            Optional encoder attention mask (tensor) of shape (batch_size, sequence_length) or (batch_size, 1, 1, sequence_length).
            Defaults to None.

    Returns:
        tuple:
            A tuple containing the layer output (tensor) of shape (batch_size, sequence_length, hidden_size)
            and any additional attention outputs.

    Raises:
        None.
    """
    attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
    attention_output = attention_outputs[0]

    # Cross-Attention Block
    if encoder_hidden_states is not None:
        cross_attention_outputs = self.crossattention(
            attention_output,
            attention_mask=encoder_attention_mask,
            head_mask=head_mask,
        )
        attention_output = cross_attention_outputs[0]

    intermediate_output = self.intermediate(attention_output)
    layer_output = self.output(intermediate_output, attention_output)
    outputs = (layer_output,) + attention_outputs[1:]
    return outputs

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertModel

Bases: MSBertPreTrainedModel

Bert Model

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
class MSBertModel(MSBertPreTrainedModel):
    r"""
    Bert Model
    """
    def __init__(self, config, add_pooling_layer=True):
        """
        Initializes the MSBertModel class with the provided configuration and optional pooling layer.

        Args:
            self (MSBertModel): The current instance of the MSBertModel class.
            config (object): The configuration object containing settings for the model.
            add_pooling_layer (bool): Flag indicating whether to add a pooling layer to the model.

        Returns:
            None.

        Raises:
            None
        """
        super().__init__(config)
        self.embeddings = MSBertEmbeddings(config)
        self.encoder = MSBertEncoder(config)
        self.pooler = MSBertPooler(config) if add_pooling_layer else None
        self.num_hidden_layers = config.num_hidden_layers

    def get_input_embeddings(self):
        """
        This method returns the input embeddings of the MSBertModel.

        Args:
            self: The instance of the MSBertModel class.

        Returns:
            word_embeddings:
                This method returns the input embeddings of the MSBertModel.

        Raises:
            None.
        """
        return self.embeddings.word_embeddings

    def set_input_embeddings(self, new_embeddings):
        """
        Set the input embeddings for the MSBertModel.

        Args:
            self (MSBertModel): The MSBertModel instance.
            new_embeddings (object): The new input embeddings to be set. This could be of any type, such as a tensor or an array.

        Returns:
            None.

        Raises:
            None
        """
        self.embeddings.word_embeddings = new_embeddings

    def forward(
        self,
        input_ids,
        attention_mask=None,
        token_type_ids=None,
        position_ids=None,
        head_mask=None,
        encoder_hidden_states = None,
        encoder_attention_mask = None
    ):
        """
        Construct method in the MSBertModel class.

        Args:
            self: MSBertModel object.
            input_ids (Tensor): The input tensor containing the token ids for the input sequence.
            attention_mask (Tensor, optional):
                A mask tensor indicating which tokens should be attended to and which should be ignored.
            token_type_ids (Tensor, optional): A tensor indicating the token types for each token in the input sequence.
            position_ids (Tensor, optional): A tensor specifying the position ids for each token in the input sequence.
            head_mask (Tensor, optional): A mask tensor applied to the attention scores in the self-attention mechanism.
            encoder_hidden_states (Tensor, optional): Hidden states from the encoder.
            encoder_attention_mask (Tensor, optional): A mask tensor indicating which encoder tokens should be attended to in the self-attention mechanism.

        Returns:
            Tuple:
                A tuple containing the following:

                - sequence_output (Tensor): The output tensor from the encoder for each token in the input sequence.
                - pooled_output (Tensor): The pooled output tensor from the pooler layer, if available.
                - Additional encoder outputs.

        Raises:
            ValueError: If the dimensions of the head_mask tensor are incompatible.
        """
        if attention_mask is None:
            attention_mask = ops.ones_like(input_ids)
        if token_type_ids is None:
            token_type_ids = ops.zeros_like(input_ids)
        if position_ids is None:
            position_ids = ops.broadcast_to(ops.arange(ops.atleast_2d(input_ids).shape[-1]), input_ids.shape)

        if head_mask is not None:
            if head_mask.ndim == 1:
                head_mask = (
                    head_mask.expand_dims(0)
                    .expand_dims(0)
                    .expand_dims(-1)
                    .expand_dims(-1)
                )
                head_mask = ops.broadcast_to(
                    head_mask, (self.num_hidden_layers, -1, -1, -1, -1)
                )
            elif head_mask.ndim == 2:
                head_mask = head_mask.expand_dims(1).expand_dims(-1).expand_dims(-1)
        else:
            head_mask = [None] * self.num_hidden_layers

        embedding_output = self.embeddings(
            input_ids, position_ids=position_ids, token_type_ids=token_type_ids
        )
        encoder_outputs = self.encoder(
            embedding_output,
            attention_mask,
            head_mask=head_mask,
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=encoder_attention_mask,
        )
        sequence_output = encoder_outputs[0]
        pooled_output = (
            self.pooler(sequence_output) if self.pooler is not None else None
        )

        outputs = (
            sequence_output,
            pooled_output,
        ) + encoder_outputs[1:]
        # add hidden_states and attentions if they are here
        return outputs  # sequence_output, pooled_output, (hidden_states), (attentions)

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertModel.__init__(config, add_pooling_layer=True)

Initializes the MSBertModel class with the provided configuration and optional pooling layer.

PARAMETER DESCRIPTION
self

The current instance of the MSBertModel class.

TYPE: MSBertModel

config

The configuration object containing settings for the model.

TYPE: object

add_pooling_layer

Flag indicating whether to add a pooling layer to the model.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION

None.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
def __init__(self, config, add_pooling_layer=True):
    """
    Initializes the MSBertModel class with the provided configuration and optional pooling layer.

    Args:
        self (MSBertModel): The current instance of the MSBertModel class.
        config (object): The configuration object containing settings for the model.
        add_pooling_layer (bool): Flag indicating whether to add a pooling layer to the model.

    Returns:
        None.

    Raises:
        None
    """
    super().__init__(config)
    self.embeddings = MSBertEmbeddings(config)
    self.encoder = MSBertEncoder(config)
    self.pooler = MSBertPooler(config) if add_pooling_layer else None
    self.num_hidden_layers = config.num_hidden_layers

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertModel.forward(input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None)

Construct method in the MSBertModel class.

PARAMETER DESCRIPTION
self

MSBertModel object.

input_ids

The input tensor containing the token ids for the input sequence.

TYPE: Tensor

attention_mask

A mask tensor indicating which tokens should be attended to and which should be ignored.

TYPE: Tensor DEFAULT: None

token_type_ids

A tensor indicating the token types for each token in the input sequence.

TYPE: Tensor DEFAULT: None

position_ids

A tensor specifying the position ids for each token in the input sequence.

TYPE: Tensor DEFAULT: None

head_mask

A mask tensor applied to the attention scores in the self-attention mechanism.

TYPE: Tensor DEFAULT: None

encoder_hidden_states

Hidden states from the encoder.

TYPE: Tensor DEFAULT: None

encoder_attention_mask

A mask tensor indicating which encoder tokens should be attended to in the self-attention mechanism.

TYPE: Tensor DEFAULT: None

RETURNS DESCRIPTION
Tuple

A tuple containing the following:

  • sequence_output (Tensor): The output tensor from the encoder for each token in the input sequence.
  • pooled_output (Tensor): The pooled output tensor from the pooler layer, if available.
  • Additional encoder outputs.
RAISES DESCRIPTION
ValueError

If the dimensions of the head_mask tensor are incompatible.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
def forward(
    self,
    input_ids,
    attention_mask=None,
    token_type_ids=None,
    position_ids=None,
    head_mask=None,
    encoder_hidden_states = None,
    encoder_attention_mask = None
):
    """
    Construct method in the MSBertModel class.

    Args:
        self: MSBertModel object.
        input_ids (Tensor): The input tensor containing the token ids for the input sequence.
        attention_mask (Tensor, optional):
            A mask tensor indicating which tokens should be attended to and which should be ignored.
        token_type_ids (Tensor, optional): A tensor indicating the token types for each token in the input sequence.
        position_ids (Tensor, optional): A tensor specifying the position ids for each token in the input sequence.
        head_mask (Tensor, optional): A mask tensor applied to the attention scores in the self-attention mechanism.
        encoder_hidden_states (Tensor, optional): Hidden states from the encoder.
        encoder_attention_mask (Tensor, optional): A mask tensor indicating which encoder tokens should be attended to in the self-attention mechanism.

    Returns:
        Tuple:
            A tuple containing the following:

            - sequence_output (Tensor): The output tensor from the encoder for each token in the input sequence.
            - pooled_output (Tensor): The pooled output tensor from the pooler layer, if available.
            - Additional encoder outputs.

    Raises:
        ValueError: If the dimensions of the head_mask tensor are incompatible.
    """
    if attention_mask is None:
        attention_mask = ops.ones_like(input_ids)
    if token_type_ids is None:
        token_type_ids = ops.zeros_like(input_ids)
    if position_ids is None:
        position_ids = ops.broadcast_to(ops.arange(ops.atleast_2d(input_ids).shape[-1]), input_ids.shape)

    if head_mask is not None:
        if head_mask.ndim == 1:
            head_mask = (
                head_mask.expand_dims(0)
                .expand_dims(0)
                .expand_dims(-1)
                .expand_dims(-1)
            )
            head_mask = ops.broadcast_to(
                head_mask, (self.num_hidden_layers, -1, -1, -1, -1)
            )
        elif head_mask.ndim == 2:
            head_mask = head_mask.expand_dims(1).expand_dims(-1).expand_dims(-1)
    else:
        head_mask = [None] * self.num_hidden_layers

    embedding_output = self.embeddings(
        input_ids, position_ids=position_ids, token_type_ids=token_type_ids
    )
    encoder_outputs = self.encoder(
        embedding_output,
        attention_mask,
        head_mask=head_mask,
        encoder_hidden_states=encoder_hidden_states,
        encoder_attention_mask=encoder_attention_mask,
    )
    sequence_output = encoder_outputs[0]
    pooled_output = (
        self.pooler(sequence_output) if self.pooler is not None else None
    )

    outputs = (
        sequence_output,
        pooled_output,
    ) + encoder_outputs[1:]
    # add hidden_states and attentions if they are here
    return outputs  # sequence_output, pooled_output, (hidden_states), (attentions)

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertModel.get_input_embeddings()

This method returns the input embeddings of the MSBertModel.

PARAMETER DESCRIPTION
self

The instance of the MSBertModel class.

RETURNS DESCRIPTION
word_embeddings

This method returns the input embeddings of the MSBertModel.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
def get_input_embeddings(self):
    """
    This method returns the input embeddings of the MSBertModel.

    Args:
        self: The instance of the MSBertModel class.

    Returns:
        word_embeddings:
            This method returns the input embeddings of the MSBertModel.

    Raises:
        None.
    """
    return self.embeddings.word_embeddings

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertModel.set_input_embeddings(new_embeddings)

Set the input embeddings for the MSBertModel.

PARAMETER DESCRIPTION
self

The MSBertModel instance.

TYPE: MSBertModel

new_embeddings

The new input embeddings to be set. This could be of any type, such as a tensor or an array.

TYPE: object

RETURNS DESCRIPTION

None.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
def set_input_embeddings(self, new_embeddings):
    """
    Set the input embeddings for the MSBertModel.

    Args:
        self (MSBertModel): The MSBertModel instance.
        new_embeddings (object): The new input embeddings to be set. This could be of any type, such as a tensor or an array.

    Returns:
        None.

    Raises:
        None
    """
    self.embeddings.word_embeddings = new_embeddings

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertOutput

Bases: Module

Bert Output

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
class MSBertOutput(nn.Module):
    r"""
    Bert Output
    """
    def __init__(self, config):
        """
        Initializes an instance of the MSBertOutput class.

        Args:
            self: The instance of the class.
            config: An object of type 'config' that contains the configuration parameters for the MSBertOutput.

        Returns:
            None

        Raises:
            None
        """
        super().__init__()
        self.dense = nn.Linear(
            config.intermediate_size,
            config.hidden_size,
        )
        self.LayerNorm = nn.LayerNorm((config.hidden_size,), eps=1e-12)
        self.dropout = nn.Dropout(p=config.hidden_dropout_prob)

    def forward(self, hidden_states, input_tensor):
        """
        This method forwards the output of the MSBert model.

        Args:
            self: The instance of the MSBertOutput class.
            hidden_states (tensor): The hidden states from the MSBert model.
                This tensor contains the encoded information from the input.
            input_tensor (tensor): The input tensor to be added to the hidden states.
                This tensor represents the original input to the MSBert model.

        Returns:
            tensor: The forwarded output tensor representing the final hidden states.
            This tensor is the result of processing the hidden states and input tensor.

        Raises:
            None.
        """
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertOutput.__init__(config)

Initializes an instance of the MSBertOutput class.

PARAMETER DESCRIPTION
self

The instance of the class.

config

An object of type 'config' that contains the configuration parameters for the MSBertOutput.

RETURNS DESCRIPTION

None

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
def __init__(self, config):
    """
    Initializes an instance of the MSBertOutput class.

    Args:
        self: The instance of the class.
        config: An object of type 'config' that contains the configuration parameters for the MSBertOutput.

    Returns:
        None

    Raises:
        None
    """
    super().__init__()
    self.dense = nn.Linear(
        config.intermediate_size,
        config.hidden_size,
    )
    self.LayerNorm = nn.LayerNorm((config.hidden_size,), eps=1e-12)
    self.dropout = nn.Dropout(p=config.hidden_dropout_prob)

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertOutput.forward(hidden_states, input_tensor)

This method forwards the output of the MSBert model.

PARAMETER DESCRIPTION
self

The instance of the MSBertOutput class.

hidden_states

The hidden states from the MSBert model. This tensor contains the encoded information from the input.

TYPE: tensor

input_tensor

The input tensor to be added to the hidden states. This tensor represents the original input to the MSBert model.

TYPE: tensor

RETURNS DESCRIPTION
tensor

The forwarded output tensor representing the final hidden states.

This tensor is the result of processing the hidden states and input tensor.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
def forward(self, hidden_states, input_tensor):
    """
    This method forwards the output of the MSBert model.

    Args:
        self: The instance of the MSBertOutput class.
        hidden_states (tensor): The hidden states from the MSBert model.
            This tensor contains the encoded information from the input.
        input_tensor (tensor): The input tensor to be added to the hidden states.
            This tensor represents the original input to the MSBert model.

    Returns:
        tensor: The forwarded output tensor representing the final hidden states.
        This tensor is the result of processing the hidden states and input tensor.

    Raises:
        None.
    """
    hidden_states = self.dense(hidden_states)
    hidden_states = self.dropout(hidden_states)
    hidden_states = self.LayerNorm(hidden_states + input_tensor)
    return hidden_states

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertPooler

Bases: Module

Bert Pooler

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
class MSBertPooler(nn.Module):
    r"""
    Bert Pooler
    """
    def __init__(self, config):
        """
        Initializes an instance of the MSBertPooler class.

        Args:
            self (MSBertPooler): The instance of the MSBertPooler class.
            config:
                An object containing configuration parameters.

                - Type: Any
                - Purpose: Holds the configuration settings for the MSBertPooler.
                - Restrictions: Must be compatible with the expected configuration format.

        Returns:
            None.

        Raises:
            None
        """
        super().__init__()
        self.dense = nn.Linear(
            config.hidden_size,
            config.hidden_size,
        )
        self.activation = nn.Tanh()

    def forward(self, hidden_states):
        """
        This method forwards a pooled output from the given hidden states.

        Args:
            self (MSBertPooler): The instance of the MSBertPooler class.
            hidden_states (torch.Tensor): A tensor containing the hidden states.
                It is expected to have a shape of (batch_size, sequence_length, hidden_size).

        Returns:
            torch.Tensor: The pooled output tensor obtained by applying dense
                and activation functions to the first token tensor from the hidden_states.

        Raises:
            TypeError: If the input hidden_states is not a torch.Tensor.
            ValueError: If the hidden_states tensor does not have the expected shape of
                (batch_size, sequence_length, hidden_size).
        """
        # We "pool" the model by simply taking the hidden state corresponding.
        # to the first token
        first_token_tensor = hidden_states[:, 0]
        pooled_output = self.dense(first_token_tensor)
        pooled_output = self.activation(pooled_output)
        return pooled_output

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertPooler.__init__(config)

Initializes an instance of the MSBertPooler class.

PARAMETER DESCRIPTION
self

The instance of the MSBertPooler class.

TYPE: MSBertPooler

config

An object containing configuration parameters.

  • Type: Any
  • Purpose: Holds the configuration settings for the MSBertPooler.
  • Restrictions: Must be compatible with the expected configuration format.

RETURNS DESCRIPTION

None.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
def __init__(self, config):
    """
    Initializes an instance of the MSBertPooler class.

    Args:
        self (MSBertPooler): The instance of the MSBertPooler class.
        config:
            An object containing configuration parameters.

            - Type: Any
            - Purpose: Holds the configuration settings for the MSBertPooler.
            - Restrictions: Must be compatible with the expected configuration format.

    Returns:
        None.

    Raises:
        None
    """
    super().__init__()
    self.dense = nn.Linear(
        config.hidden_size,
        config.hidden_size,
    )
    self.activation = nn.Tanh()

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertPooler.forward(hidden_states)

This method forwards a pooled output from the given hidden states.

PARAMETER DESCRIPTION
self

The instance of the MSBertPooler class.

TYPE: MSBertPooler

hidden_states

A tensor containing the hidden states. It is expected to have a shape of (batch_size, sequence_length, hidden_size).

TYPE: Tensor

RETURNS DESCRIPTION

torch.Tensor: The pooled output tensor obtained by applying dense and activation functions to the first token tensor from the hidden_states.

RAISES DESCRIPTION
TypeError

If the input hidden_states is not a torch.Tensor.

ValueError

If the hidden_states tensor does not have the expected shape of (batch_size, sequence_length, hidden_size).

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
def forward(self, hidden_states):
    """
    This method forwards a pooled output from the given hidden states.

    Args:
        self (MSBertPooler): The instance of the MSBertPooler class.
        hidden_states (torch.Tensor): A tensor containing the hidden states.
            It is expected to have a shape of (batch_size, sequence_length, hidden_size).

    Returns:
        torch.Tensor: The pooled output tensor obtained by applying dense
            and activation functions to the first token tensor from the hidden_states.

    Raises:
        TypeError: If the input hidden_states is not a torch.Tensor.
        ValueError: If the hidden_states tensor does not have the expected shape of
            (batch_size, sequence_length, hidden_size).
    """
    # We "pool" the model by simply taking the hidden state corresponding.
    # to the first token
    first_token_tensor = hidden_states[:, 0]
    pooled_output = self.dense(first_token_tensor)
    pooled_output = self.activation(pooled_output)
    return pooled_output

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertPreTrainedModel

Bases: PreTrainedModel

BertPretrainedModel

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
class MSBertPreTrainedModel(PreTrainedModel):
    """BertPretrainedModel"""
    config_class = BertConfig
    base_model_prefix = "bert"
    supports_recompute = True

    def _init_weights(self, cell):
        """Initialize the weights"""
        if isinstance(cell, nn.Linear):
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            cell.weight.set_data(
                initializer(
                    Normal(self.config.initializer_range),
                    cell.weight.shape,
                    cell.weight.dtype,
                )
            )
            if cell.bias is not None:
                cell.bias.set_data(
                    initializer("zeros", cell.bias.shape, cell.bias.dtype)
                )
        elif isinstance(cell, nn.Embedding):
            weight = initializer(
                Normal(self.config.initializer_range),
                cell.weight.shape,
                cell.weight.dtype,
            )
            if cell.padding_idx is not None:
                weight[cell.padding_idx] = 0
            cell.weight.set_data(weight)
        elif isinstance(cell, nn.LayerNorm):
            cell.weight.set_data(initializer("ones", cell.weight.shape, cell.weight.dtype))
            cell.bias.set_data(initializer("zeros", cell.bias.shape, cell.bias.dtype))

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertPreTrainingHeads

Bases: Module

Bert PreTraining Heads

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
class MSBertPreTrainingHeads(nn.Module):
    r"""
    Bert PreTraining Heads
    """
    def __init__(self, config):
        """
        Initialize the MSBertPreTrainingHeads class.

        Args:
            self (object): The instance of the class.
            config (object):
                An object containing configuration settings.

                - Type: Custom class
                - Purpose: Provides configuration parameters for the pre-training heads.
                - Restrictions: Must be compatible with the MSBertLMPredictionHead and nn.Linear classes.

        Returns:
            None.

        Raises:
            None.
        """
        super().__init__()
        self.predictions = MSBertLMPredictionHead(config)
        self.seq_relationship = nn.Linear(config.hidden_size, 2)

    def forward(self, sequence_output, pooled_output, masked_lm_positions):
        """
        Construct method in the MSBertPreTrainingHeads class.

        Args:
            self (object): The instance of the class.
            sequence_output (tensor): The output tensor from the pre-trained model for the input sequence.
            pooled_output (tensor): The output tensor obtained by applying pooling to the sequence_output.
            masked_lm_positions (tensor): The positions of the masked language model tokens in the input sequence.

        Returns:
            Tuple: A tuple containing the prediction_scores (tensor) and seq_relationship_score (tensor)
                calculated based on the inputs.

        Raises:
            None: This method does not raise any exceptions.
        """
        prediction_scores = self.predictions(sequence_output, masked_lm_positions)
        seq_relationship_score = self.seq_relationship(pooled_output)
        return prediction_scores, seq_relationship_score

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertPreTrainingHeads.__init__(config)

Initialize the MSBertPreTrainingHeads class.

PARAMETER DESCRIPTION
self

The instance of the class.

TYPE: object

config

An object containing configuration settings.

  • Type: Custom class
  • Purpose: Provides configuration parameters for the pre-training heads.
  • Restrictions: Must be compatible with the MSBertLMPredictionHead and nn.Linear classes.

TYPE: object

RETURNS DESCRIPTION

None.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
def __init__(self, config):
    """
    Initialize the MSBertPreTrainingHeads class.

    Args:
        self (object): The instance of the class.
        config (object):
            An object containing configuration settings.

            - Type: Custom class
            - Purpose: Provides configuration parameters for the pre-training heads.
            - Restrictions: Must be compatible with the MSBertLMPredictionHead and nn.Linear classes.

    Returns:
        None.

    Raises:
        None.
    """
    super().__init__()
    self.predictions = MSBertLMPredictionHead(config)
    self.seq_relationship = nn.Linear(config.hidden_size, 2)

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertPreTrainingHeads.forward(sequence_output, pooled_output, masked_lm_positions)

Construct method in the MSBertPreTrainingHeads class.

PARAMETER DESCRIPTION
self

The instance of the class.

TYPE: object

sequence_output

The output tensor from the pre-trained model for the input sequence.

TYPE: tensor

pooled_output

The output tensor obtained by applying pooling to the sequence_output.

TYPE: tensor

masked_lm_positions

The positions of the masked language model tokens in the input sequence.

TYPE: tensor

RETURNS DESCRIPTION
Tuple

A tuple containing the prediction_scores (tensor) and seq_relationship_score (tensor) calculated based on the inputs.

RAISES DESCRIPTION
None

This method does not raise any exceptions.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
def forward(self, sequence_output, pooled_output, masked_lm_positions):
    """
    Construct method in the MSBertPreTrainingHeads class.

    Args:
        self (object): The instance of the class.
        sequence_output (tensor): The output tensor from the pre-trained model for the input sequence.
        pooled_output (tensor): The output tensor obtained by applying pooling to the sequence_output.
        masked_lm_positions (tensor): The positions of the masked language model tokens in the input sequence.

    Returns:
        Tuple: A tuple containing the prediction_scores (tensor) and seq_relationship_score (tensor)
            calculated based on the inputs.

    Raises:
        None: This method does not raise any exceptions.
    """
    prediction_scores = self.predictions(sequence_output, masked_lm_positions)
    seq_relationship_score = self.seq_relationship(pooled_output)
    return prediction_scores, seq_relationship_score

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertPredictionHeadTransform

Bases: Module

Bert Prediction Head Transform

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
class MSBertPredictionHeadTransform(nn.Module):
    r"""
    Bert Prediction Head Transform
    """
    def __init__(self, config):
        """
        Initializes an instance of the MSBertPredictionHeadTransform class.

        Args:
            self: An instance of the MSBertPredictionHeadTransform class.
            config: An object containing configuration settings for the transformation.
                It is expected to have the following attributes:

                - hidden_size (int): The size of the hidden layer.
                - hidden_act (str): The activation function to be used for the hidden layer.
                - layer_norm_eps (float): The epsilon value for LayerNorm.

        Returns:
            None: This method initializes the dense layer, activation function, and LayerNorm parameters for the transformation.

        Raises:
            TypeError: If the config parameter is not provided.
            ValueError: If the config parameter is missing any required attributes.
            KeyError: If the hidden activation function specified in the config is not found in the ACT2FN dictionary.
        """
        super().__init__()
        self.dense = nn.Linear(
            config.hidden_size,
            config.hidden_size,
        )
        self.transform_act_fn = ACT2FN[config.hidden_act]
        self.LayerNorm = nn.LayerNorm(
            (config.hidden_size,), eps=config.layer_norm_eps
        )

    def forward(self, hidden_states):
        """
        This method 'forward' is part of the 'MSBertPredictionHeadTransform' class and is used to perform transformations on hidden states.

        Args:
            self:
                The instance of the 'MSBertPredictionHeadTransform' class.

                - Type: MSBertPredictionHeadTransform
                - Purpose: Represents the current instance of the class.
                - Restrictions: None

            hidden_states:
                The input hidden states that need to undergo transformations.

                - Type: Any
                - Purpose: Represents the hidden states to be processed.
                - Restrictions: Should be compatible with the operations performed within the method.

        Returns:
            hidden_states:

                - Type: None
                - Purpose: To return the processed hidden states for further usage.

        Raises:
            None.
        """
        hidden_states = self.dense(hidden_states)
        hidden_states = self.transform_act_fn(hidden_states)
        hidden_states = self.LayerNorm(hidden_states)
        return hidden_states

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertPredictionHeadTransform.__init__(config)

Initializes an instance of the MSBertPredictionHeadTransform class.

PARAMETER DESCRIPTION
self

An instance of the MSBertPredictionHeadTransform class.

config

An object containing configuration settings for the transformation. It is expected to have the following attributes:

  • hidden_size (int): The size of the hidden layer.
  • hidden_act (str): The activation function to be used for the hidden layer.
  • layer_norm_eps (float): The epsilon value for LayerNorm.

RETURNS DESCRIPTION
None

This method initializes the dense layer, activation function, and LayerNorm parameters for the transformation.

RAISES DESCRIPTION
TypeError

If the config parameter is not provided.

ValueError

If the config parameter is missing any required attributes.

KeyError

If the hidden activation function specified in the config is not found in the ACT2FN dictionary.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
def __init__(self, config):
    """
    Initializes an instance of the MSBertPredictionHeadTransform class.

    Args:
        self: An instance of the MSBertPredictionHeadTransform class.
        config: An object containing configuration settings for the transformation.
            It is expected to have the following attributes:

            - hidden_size (int): The size of the hidden layer.
            - hidden_act (str): The activation function to be used for the hidden layer.
            - layer_norm_eps (float): The epsilon value for LayerNorm.

    Returns:
        None: This method initializes the dense layer, activation function, and LayerNorm parameters for the transformation.

    Raises:
        TypeError: If the config parameter is not provided.
        ValueError: If the config parameter is missing any required attributes.
        KeyError: If the hidden activation function specified in the config is not found in the ACT2FN dictionary.
    """
    super().__init__()
    self.dense = nn.Linear(
        config.hidden_size,
        config.hidden_size,
    )
    self.transform_act_fn = ACT2FN[config.hidden_act]
    self.LayerNorm = nn.LayerNorm(
        (config.hidden_size,), eps=config.layer_norm_eps
    )

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertPredictionHeadTransform.forward(hidden_states)

This method 'forward' is part of the 'MSBertPredictionHeadTransform' class and is used to perform transformations on hidden states.

PARAMETER DESCRIPTION
self

The instance of the 'MSBertPredictionHeadTransform' class.

  • Type: MSBertPredictionHeadTransform
  • Purpose: Represents the current instance of the class.
  • Restrictions: None

hidden_states

The input hidden states that need to undergo transformations.

  • Type: Any
  • Purpose: Represents the hidden states to be processed.
  • Restrictions: Should be compatible with the operations performed within the method.

RETURNS DESCRIPTION
hidden_states
  • Type: None
  • Purpose: To return the processed hidden states for further usage.
Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
def forward(self, hidden_states):
    """
    This method 'forward' is part of the 'MSBertPredictionHeadTransform' class and is used to perform transformations on hidden states.

    Args:
        self:
            The instance of the 'MSBertPredictionHeadTransform' class.

            - Type: MSBertPredictionHeadTransform
            - Purpose: Represents the current instance of the class.
            - Restrictions: None

        hidden_states:
            The input hidden states that need to undergo transformations.

            - Type: Any
            - Purpose: Represents the hidden states to be processed.
            - Restrictions: Should be compatible with the operations performed within the method.

    Returns:
        hidden_states:

            - Type: None
            - Purpose: To return the processed hidden states for further usage.

    Raises:
        None.
    """
    hidden_states = self.dense(hidden_states)
    hidden_states = self.transform_act_fn(hidden_states)
    hidden_states = self.LayerNorm(hidden_states)
    return hidden_states

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertSelfAttention

Bases: Module

Self attention layer for BERT.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
class MSBertSelfAttention(nn.Module):
    """
    Self attention layer for BERT.
    """
    def __init__(self, config, causal, init_cache=False):
        """Initializes an instance of the MSBertSelfAttention class.

        Args:
            self: The instance of the class.
            config:
                A configuration object containing various parameters.

                - Type: Object
                - Purpose: Specifies the configuration parameters for the attention mechanism.
                - Restrictions: None

            causal:
                A boolean value indicating whether the attention mechanism is causal or not.

                - Type: bool
                - Purpose: Determines if the attention mechanism is restricted to attend to previous positions only.
                - Restrictions: None

            init_cache:
                A boolean value indicating whether to initialize the cache or not.

                - Type: bool
                - Purpose: Determines if the cache for attention weights and values should be initialized.
                - Restrictions: None

        Returns:
            None.

        Raises:
            ValueError: If the hidden size is not a multiple of the number of attention heads.

        Notes:
            - This method is called when creating an instance of the MSBertSelfAttention class.
            - The attention mechanism is responsible for computing self-attention weights and values based on the input.
            - The method initializes various instance variables and parameters required for the attention mechanism.
            - If the hidden size is not divisible by the number of attention heads, a ValueError is raised.
            - The method also initializes the cache variables if `init_cache` is True, otherwise sets them to None.
            - The method creates dense layers for query, key, and value projections.
            - The method initializes dropout and softmax layers for attention probabilities computation.
            - The method creates a causal mask if `causal` is True, otherwise uses a mask of ones.
        """
        super().__init__()
        if config.hidden_size % config.num_attention_heads != 0:
            raise ValueError(
                f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
                f"heads {config.num_attention_heads}"
            )
        self.output_attentions = config.output_attentions

        self.num_attention_heads = config.num_attention_heads
        self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size

        self.query = nn.Linear(
            config.hidden_size,
            self.all_head_size,
        )
        self.key = nn.Linear(
            config.hidden_size,
            self.all_head_size,
        )
        self.value = nn.Linear(
            config.hidden_size,
            self.all_head_size,
        )

        self.dropout = nn.Dropout(p=config.attention_probs_dropout_prob)
        self.softmax = nn.Softmax(-1)

        self.causal = causal
        self.init_cache = init_cache

        self.causal_mask = F.make_causal_mask(
            ops.ones((1, config.max_position_embeddings), dtype=mstype.bool_),
            dtype=mstype.bool_,
        )

        if not init_cache:
            self.cache_key = None
            self.cache_value = None
            self.cache_index = None
        else:
            self.cache_key = Parameter(
                initializer(
                    "zeros",
                    (
                        config.max_length,
                        config.max_batch_size,
                        config.num_attention_heads,
                        config.attention_head_size,
                    ),
                )
            )
            self.cache_value = Parameter(
                initializer(
                    "zeros",
                    (
                        config.max_length,
                        config.max_batch_size,
                        config.num_attention_heads,
                        config.attention_head_size,
                    ),
                )
            )
            self.cache_index = Parameter(Tensor(0, mstype.int32))

    def _concatenate_to_cache(self, key, value, query, attention_mask):
        """
        Concatenates the given key, value, query, and attention mask to the cache in the MSBertSelfAttention class.

        Args:
            self (MSBertSelfAttention): An instance of the MSBertSelfAttention class.
            key (Tensor): The key tensor to be concatenated to the cache.
                Shape: (batch_size, num_updated_cache_vectors, hidden_size).
            value (Tensor): The value tensor to be concatenated to the cache.
                Shape: (batch_size, num_updated_cache_vectors, hidden_size).
            query (Tensor): The query tensor. Shape: (batch_size, sequence_length, hidden_size).
            attention_mask (Tensor): The attention mask tensor. Shape: (batch_size, sequence_length).

        Returns:
            tuple: A tuple containing the updated key, value, and attention mask tensors.

        Raises:
            None.
        """
        if self.init_cache:
            batch_size = query.shape[0]
            num_updated_cache_vectors = query.shape[1]
            max_length = self.cache_key.shape[0]
            indices = ops.arange(
                self.cache_index, self.cache_index + num_updated_cache_vectors
            )
            key = ops.scatter_update(self.cache_key, indices, key.swapaxes(0, 1))
            value = ops.scatter_update(self.cache_value, indices, value.swapaxes(0, 1))

            self.cache_index += num_updated_cache_vectors

            pad_mask = ops.broadcast_to(
                ops.arange(max_length) < self.cache_index,
                (batch_size, 1, num_updated_cache_vectors, max_length),
            )
            attention_mask = ops.logical_and(attention_mask, pad_mask)

        return key, value, attention_mask

    def transpose_for_scores(self, input_x):
        r"""
        transpose for scores
        """
        new_x_shape = input_x.shape[:-1] + (
            self.num_attention_heads,
            self.attention_head_size,
        )
        input_x = input_x.view(*new_x_shape)
        return input_x.transpose(0, 2, 1, 3)

    def forward(self, hidden_states, attention_mask=None, head_mask=None):
        """
        Constructs the self-attention layer for the MSBert model.

        Args:
            self (MSBertSelfAttention): The instance of the MSBertSelfAttention class.
            hidden_states (Tensor):
                The input tensor of shape (batch_size, seq_length, hidden_size) representing the hidden states.
            attention_mask (Tensor, optional):
                The attention mask tensor of shape (batch_size, seq_length) or (batch_size, seq_length, seq_length)
                to mask out certain positions from the attention computation.
                Defaults to None.
            head_mask (Tensor, optional):
                The tensor of shape (num_attention_heads,) representing the mask for the attention heads.
                Defaults to None.

        Returns:
            outputs (tuple): A tuple containing the context layer tensor of shape (batch_size, seq_length, hidden_size)
                and the attention probabilities tensor of shape (batch_size, num_attention_heads, eq_length, seq_length)
                if self.output_attentions is True, else only the context layer tensor is returned.

        Raises:
            None.
        """
        batch_size = hidden_states.shape[0]

        mixed_query_layer = self.query(hidden_states)
        mixed_key_layer = self.key(hidden_states)
        mixed_value_layer = self.value(hidden_states)
        query_states = self.transpose_for_scores(mixed_query_layer)
        key_states = self.transpose_for_scores(mixed_key_layer)
        value_states = self.transpose_for_scores(mixed_value_layer)

        if self.causal:
            query_length, key_length = query_states.shape[1], key_states.shape[1]
            if self.has_variable("cache", "cached_key"):
                mask_shift = self.variables["cache"]["cache_index"]
                max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
                causal_mask = ops.slice(
                    self.causal_mask,
                    (0, 0, mask_shift, 0),
                    (1, 1, query_length, max_decoder_length),
                )
            else:
                causal_mask = self.causal_mask[:, :, :query_length, :key_length]
            causal_mask = ops.broadcast_to(
                causal_mask, (batch_size,) + causal_mask.shape[1:]
            )
        else:
            causal_mask = None

        if attention_mask is not None and self.causal:
            attention_mask = ops.broadcast_to(
                attention_mask.expand_dims(-2).expand_dims(-3), causal_mask.shape
            )
            attention_mask = ops.logical_and(attention_mask, causal_mask)
        elif self.causal:
            attention_mask = causal_mask
        elif attention_mask is not None:
            attention_mask = attention_mask.expand_dims(-2).expand_dims(-3)

        if self.causal and self.init_cache:
            key_states, value_states, attention_mask = self._concatenate_to_cache(
                key_states, value_states, query_states, attention_mask
            )

        # Convert the boolean attention mask to an attention bias.
        if attention_mask is not None:
            # attention mask in the form of attention bias

            attention_bias = ops.select(
                attention_mask > 0,
                ops.zeros_like(attention_mask).astype(hidden_states.dtype),
                (ops.ones_like(attention_mask) * float(ops.finfo(hidden_states.dtype).min)).astype(
                    hidden_states.dtype
                ),
            )
        else:
            attention_bias = None

        # Take the dot product between "query" snd "key" to get the raw attention scores.
        attention_scores = ops.matmul(query_states, key_states.swapaxes(-1, -2))
        attention_scores = attention_scores / ops.sqrt(
            Tensor(self.attention_head_size, mstype.float32)
        )
        # Apply the attention mask is (precommputed for all layers in BertModel forward() function)
        attention_scores = attention_scores + attention_bias

        # Normalize the attention scores to probabilities.
        attention_probs = self.softmax(attention_scores)

        # This is actually dropping out entire tokens to attend to, which might
        # seem a bit unusual, but is taken from the original Transformer paper.
        attention_probs = self.dropout(attention_probs)

        if head_mask is not None:
            attention_probs = attention_probs * head_mask

        context_layer = ops.matmul(attention_probs, value_states)
        context_layer = context_layer.transpose(0, 2, 1, 3)
        new_context_layer_shape = context_layer.shape[:-2] + (self.all_head_size,)
        context_layer = context_layer.view(*new_context_layer_shape)

        outputs = (
            (context_layer, attention_probs)
            if self.output_attentions
            else (context_layer,)
        )
        return outputs

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertSelfAttention.__init__(config, causal, init_cache=False)

Initializes an instance of the MSBertSelfAttention class.

PARAMETER DESCRIPTION
self

The instance of the class.

config

A configuration object containing various parameters.

  • Type: Object
  • Purpose: Specifies the configuration parameters for the attention mechanism.
  • Restrictions: None

causal

A boolean value indicating whether the attention mechanism is causal or not.

  • Type: bool
  • Purpose: Determines if the attention mechanism is restricted to attend to previous positions only.
  • Restrictions: None

init_cache

A boolean value indicating whether to initialize the cache or not.

  • Type: bool
  • Purpose: Determines if the cache for attention weights and values should be initialized.
  • Restrictions: None

DEFAULT: False

RETURNS DESCRIPTION

None.

RAISES DESCRIPTION
ValueError

If the hidden size is not a multiple of the number of attention heads.

Notes
  • This method is called when creating an instance of the MSBertSelfAttention class.
  • The attention mechanism is responsible for computing self-attention weights and values based on the input.
  • The method initializes various instance variables and parameters required for the attention mechanism.
  • If the hidden size is not divisible by the number of attention heads, a ValueError is raised.
  • The method also initializes the cache variables if init_cache is True, otherwise sets them to None.
  • The method creates dense layers for query, key, and value projections.
  • The method initializes dropout and softmax layers for attention probabilities computation.
  • The method creates a causal mask if causal is True, otherwise uses a mask of ones.
Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
def __init__(self, config, causal, init_cache=False):
    """Initializes an instance of the MSBertSelfAttention class.

    Args:
        self: The instance of the class.
        config:
            A configuration object containing various parameters.

            - Type: Object
            - Purpose: Specifies the configuration parameters for the attention mechanism.
            - Restrictions: None

        causal:
            A boolean value indicating whether the attention mechanism is causal or not.

            - Type: bool
            - Purpose: Determines if the attention mechanism is restricted to attend to previous positions only.
            - Restrictions: None

        init_cache:
            A boolean value indicating whether to initialize the cache or not.

            - Type: bool
            - Purpose: Determines if the cache for attention weights and values should be initialized.
            - Restrictions: None

    Returns:
        None.

    Raises:
        ValueError: If the hidden size is not a multiple of the number of attention heads.

    Notes:
        - This method is called when creating an instance of the MSBertSelfAttention class.
        - The attention mechanism is responsible for computing self-attention weights and values based on the input.
        - The method initializes various instance variables and parameters required for the attention mechanism.
        - If the hidden size is not divisible by the number of attention heads, a ValueError is raised.
        - The method also initializes the cache variables if `init_cache` is True, otherwise sets them to None.
        - The method creates dense layers for query, key, and value projections.
        - The method initializes dropout and softmax layers for attention probabilities computation.
        - The method creates a causal mask if `causal` is True, otherwise uses a mask of ones.
    """
    super().__init__()
    if config.hidden_size % config.num_attention_heads != 0:
        raise ValueError(
            f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
            f"heads {config.num_attention_heads}"
        )
    self.output_attentions = config.output_attentions

    self.num_attention_heads = config.num_attention_heads
    self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
    self.all_head_size = self.num_attention_heads * self.attention_head_size

    self.query = nn.Linear(
        config.hidden_size,
        self.all_head_size,
    )
    self.key = nn.Linear(
        config.hidden_size,
        self.all_head_size,
    )
    self.value = nn.Linear(
        config.hidden_size,
        self.all_head_size,
    )

    self.dropout = nn.Dropout(p=config.attention_probs_dropout_prob)
    self.softmax = nn.Softmax(-1)

    self.causal = causal
    self.init_cache = init_cache

    self.causal_mask = F.make_causal_mask(
        ops.ones((1, config.max_position_embeddings), dtype=mstype.bool_),
        dtype=mstype.bool_,
    )

    if not init_cache:
        self.cache_key = None
        self.cache_value = None
        self.cache_index = None
    else:
        self.cache_key = Parameter(
            initializer(
                "zeros",
                (
                    config.max_length,
                    config.max_batch_size,
                    config.num_attention_heads,
                    config.attention_head_size,
                ),
            )
        )
        self.cache_value = Parameter(
            initializer(
                "zeros",
                (
                    config.max_length,
                    config.max_batch_size,
                    config.num_attention_heads,
                    config.attention_head_size,
                ),
            )
        )
        self.cache_index = Parameter(Tensor(0, mstype.int32))

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertSelfAttention.forward(hidden_states, attention_mask=None, head_mask=None)

Constructs the self-attention layer for the MSBert model.

PARAMETER DESCRIPTION
self

The instance of the MSBertSelfAttention class.

TYPE: MSBertSelfAttention

hidden_states

The input tensor of shape (batch_size, seq_length, hidden_size) representing the hidden states.

TYPE: Tensor

attention_mask

The attention mask tensor of shape (batch_size, seq_length) or (batch_size, seq_length, seq_length) to mask out certain positions from the attention computation. Defaults to None.

TYPE: Tensor DEFAULT: None

head_mask

The tensor of shape (num_attention_heads,) representing the mask for the attention heads. Defaults to None.

TYPE: Tensor DEFAULT: None

RETURNS DESCRIPTION
outputs

A tuple containing the context layer tensor of shape (batch_size, seq_length, hidden_size) and the attention probabilities tensor of shape (batch_size, num_attention_heads, eq_length, seq_length) if self.output_attentions is True, else only the context layer tensor is returned.

TYPE: tuple

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
def forward(self, hidden_states, attention_mask=None, head_mask=None):
    """
    Constructs the self-attention layer for the MSBert model.

    Args:
        self (MSBertSelfAttention): The instance of the MSBertSelfAttention class.
        hidden_states (Tensor):
            The input tensor of shape (batch_size, seq_length, hidden_size) representing the hidden states.
        attention_mask (Tensor, optional):
            The attention mask tensor of shape (batch_size, seq_length) or (batch_size, seq_length, seq_length)
            to mask out certain positions from the attention computation.
            Defaults to None.
        head_mask (Tensor, optional):
            The tensor of shape (num_attention_heads,) representing the mask for the attention heads.
            Defaults to None.

    Returns:
        outputs (tuple): A tuple containing the context layer tensor of shape (batch_size, seq_length, hidden_size)
            and the attention probabilities tensor of shape (batch_size, num_attention_heads, eq_length, seq_length)
            if self.output_attentions is True, else only the context layer tensor is returned.

    Raises:
        None.
    """
    batch_size = hidden_states.shape[0]

    mixed_query_layer = self.query(hidden_states)
    mixed_key_layer = self.key(hidden_states)
    mixed_value_layer = self.value(hidden_states)
    query_states = self.transpose_for_scores(mixed_query_layer)
    key_states = self.transpose_for_scores(mixed_key_layer)
    value_states = self.transpose_for_scores(mixed_value_layer)

    if self.causal:
        query_length, key_length = query_states.shape[1], key_states.shape[1]
        if self.has_variable("cache", "cached_key"):
            mask_shift = self.variables["cache"]["cache_index"]
            max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
            causal_mask = ops.slice(
                self.causal_mask,
                (0, 0, mask_shift, 0),
                (1, 1, query_length, max_decoder_length),
            )
        else:
            causal_mask = self.causal_mask[:, :, :query_length, :key_length]
        causal_mask = ops.broadcast_to(
            causal_mask, (batch_size,) + causal_mask.shape[1:]
        )
    else:
        causal_mask = None

    if attention_mask is not None and self.causal:
        attention_mask = ops.broadcast_to(
            attention_mask.expand_dims(-2).expand_dims(-3), causal_mask.shape
        )
        attention_mask = ops.logical_and(attention_mask, causal_mask)
    elif self.causal:
        attention_mask = causal_mask
    elif attention_mask is not None:
        attention_mask = attention_mask.expand_dims(-2).expand_dims(-3)

    if self.causal and self.init_cache:
        key_states, value_states, attention_mask = self._concatenate_to_cache(
            key_states, value_states, query_states, attention_mask
        )

    # Convert the boolean attention mask to an attention bias.
    if attention_mask is not None:
        # attention mask in the form of attention bias

        attention_bias = ops.select(
            attention_mask > 0,
            ops.zeros_like(attention_mask).astype(hidden_states.dtype),
            (ops.ones_like(attention_mask) * float(ops.finfo(hidden_states.dtype).min)).astype(
                hidden_states.dtype
            ),
        )
    else:
        attention_bias = None

    # Take the dot product between "query" snd "key" to get the raw attention scores.
    attention_scores = ops.matmul(query_states, key_states.swapaxes(-1, -2))
    attention_scores = attention_scores / ops.sqrt(
        Tensor(self.attention_head_size, mstype.float32)
    )
    # Apply the attention mask is (precommputed for all layers in BertModel forward() function)
    attention_scores = attention_scores + attention_bias

    # Normalize the attention scores to probabilities.
    attention_probs = self.softmax(attention_scores)

    # This is actually dropping out entire tokens to attend to, which might
    # seem a bit unusual, but is taken from the original Transformer paper.
    attention_probs = self.dropout(attention_probs)

    if head_mask is not None:
        attention_probs = attention_probs * head_mask

    context_layer = ops.matmul(attention_probs, value_states)
    context_layer = context_layer.transpose(0, 2, 1, 3)
    new_context_layer_shape = context_layer.shape[:-2] + (self.all_head_size,)
    context_layer = context_layer.view(*new_context_layer_shape)

    outputs = (
        (context_layer, attention_probs)
        if self.output_attentions
        else (context_layer,)
    )
    return outputs

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertSelfAttention.transpose_for_scores(input_x)

transpose for scores

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
256
257
258
259
260
261
262
263
264
265
def transpose_for_scores(self, input_x):
    r"""
    transpose for scores
    """
    new_x_shape = input_x.shape[:-1] + (
        self.num_attention_heads,
        self.attention_head_size,
    )
    input_x = input_x.view(*new_x_shape)
    return input_x.transpose(0, 2, 1, 3)

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertSelfOutput

Bases: Module

Bert Self Output

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
class MSBertSelfOutput(nn.Module):
    r"""
    Bert Self Output
    """
    def __init__(self, config):
        """
        Initializes an instance of the MSBertSelfOutput class.

        Args:
            self: The instance of the MSBertSelfOutput class.
            config: An object containing configuration parameters for the MSBertSelfOutput class.

        Returns:
            None.

        Raises:
            TypeError: If the config parameter is not of the expected type.
            ValueError: If the config parameter does not contain the required configuration parameters.
            RuntimeError: If there is an issue with initializing the dense, LayerNorm, or dropout attributes.
        """
        super().__init__()
        self.dense = nn.Linear(
            config.hidden_size,
            config.hidden_size,
        )
        self.LayerNorm = nn.LayerNorm((config.hidden_size,), eps=1e-12)
        self.dropout = nn.Dropout(p=config.hidden_dropout_prob)

    def forward(self, hidden_states, input_tensor):
        """
        This method 'forward' is a part of the 'MSBertSelfOutput' class and is responsible for
        processing the hidden states and input tensor.

        Args:
            self: The instance of the class.

            hidden_states (tensor): The hidden states to be processed. It is expected to be a tensor.

            input_tensor (tensor): The input tensor to be incorporated into the hidden states. It is expected to be a tensor.

        Returns:
            tensor: The processed hidden states with the input tensor incorporated.

        Raises:
            This method does not raise any exceptions.
        """
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertSelfOutput.__init__(config)

Initializes an instance of the MSBertSelfOutput class.

PARAMETER DESCRIPTION
self

The instance of the MSBertSelfOutput class.

config

An object containing configuration parameters for the MSBertSelfOutput class.

RETURNS DESCRIPTION

None.

RAISES DESCRIPTION
TypeError

If the config parameter is not of the expected type.

ValueError

If the config parameter does not contain the required configuration parameters.

RuntimeError

If there is an issue with initializing the dense, LayerNorm, or dropout attributes.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
def __init__(self, config):
    """
    Initializes an instance of the MSBertSelfOutput class.

    Args:
        self: The instance of the MSBertSelfOutput class.
        config: An object containing configuration parameters for the MSBertSelfOutput class.

    Returns:
        None.

    Raises:
        TypeError: If the config parameter is not of the expected type.
        ValueError: If the config parameter does not contain the required configuration parameters.
        RuntimeError: If there is an issue with initializing the dense, LayerNorm, or dropout attributes.
    """
    super().__init__()
    self.dense = nn.Linear(
        config.hidden_size,
        config.hidden_size,
    )
    self.LayerNorm = nn.LayerNorm((config.hidden_size,), eps=1e-12)
    self.dropout = nn.Dropout(p=config.hidden_dropout_prob)

mindnlp.transformers.models.bert.modeling_graph_bert.MSBertSelfOutput.forward(hidden_states, input_tensor)

This method 'forward' is a part of the 'MSBertSelfOutput' class and is responsible for processing the hidden states and input tensor.

PARAMETER DESCRIPTION
self

The instance of the class.

hidden_states

The hidden states to be processed. It is expected to be a tensor.

TYPE: tensor

input_tensor

The input tensor to be incorporated into the hidden states. It is expected to be a tensor.

TYPE: tensor

RETURNS DESCRIPTION
tensor

The processed hidden states with the input tensor incorporated.

Source code in mindnlp\transformers\models\bert\modeling_graph_bert.py
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
def forward(self, hidden_states, input_tensor):
    """
    This method 'forward' is a part of the 'MSBertSelfOutput' class and is responsible for
    processing the hidden states and input tensor.

    Args:
        self: The instance of the class.

        hidden_states (tensor): The hidden states to be processed. It is expected to be a tensor.

        input_tensor (tensor): The input tensor to be incorporated into the hidden states. It is expected to be a tensor.

    Returns:
        tensor: The processed hidden states with the input tensor incorporated.

    Raises:
        This method does not raise any exceptions.
    """
    hidden_states = self.dense(hidden_states)
    hidden_states = self.dropout(hidden_states)
    hidden_states = self.LayerNorm(hidden_states + input_tensor)
    return hidden_states

mindnlp.transformers.models.bert.tokenization_bert.BertTokenizer

Bases: PreTrainedTokenizer

Construct a BERT tokenizer. Based on WordPiece.

This tokenizer inherits from [PreTrainedTokenizer] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.

PARAMETER DESCRIPTION
vocab_file

File containing the vocabulary.

TYPE: `str`

do_lower_case

Whether or not to lowercase the input when tokenizing.

TYPE: `bool`, *optional*, defaults to `True` DEFAULT: True

do_basic_tokenize

Whether or not to do basic tokenization before WordPiece.

TYPE: `bool`, *optional*, defaults to `True` DEFAULT: True

never_split

Collection of tokens which will never be split during tokenization. Only has an effect when do_basic_tokenize=True

TYPE: `Iterable`, *optional* DEFAULT: None

unk_token

The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.

TYPE: `str`, *optional*, defaults to `"[UNK]"` DEFAULT: '[UNK]'

sep_token

The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.

TYPE: `str`, *optional*, defaults to `"[SEP]"` DEFAULT: '[SEP]'

pad_token

The token used for padding, for example when batching sequences of different lengths.

TYPE: `str`, *optional*, defaults to `"[PAD]"` DEFAULT: '[PAD]'

cls_token

The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.

TYPE: `str`, *optional*, defaults to `"[CLS]"` DEFAULT: '[CLS]'

mask_token

The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.

TYPE: `str`, *optional*, defaults to `"[MASK]"` DEFAULT: '[MASK]'

tokenize_chinese_chars

Whether or not to tokenize Chinese characters.

This should likely be deactivated for Japanese (see this issue).

TYPE: `bool`, *optional*, defaults to `True` DEFAULT: True

strip_accents

Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for lowercase (as in the original BERT).

TYPE: `bool`, *optional* DEFAULT: None

Source code in mindnlp\transformers\models\bert\tokenization_bert.py
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
class BertTokenizer(PreTrainedTokenizer):
    r"""
    Construct a BERT tokenizer. Based on WordPiece.

    This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
    this superclass for more information regarding those methods.

    Args:
        vocab_file (`str`):
            File containing the vocabulary.
        do_lower_case (`bool`, *optional*, defaults to `True`):
            Whether or not to lowercase the input when tokenizing.
        do_basic_tokenize (`bool`, *optional*, defaults to `True`):
            Whether or not to do basic tokenization before WordPiece.
        never_split (`Iterable`, *optional*):
            Collection of tokens which will never be split during tokenization. Only has an effect when
            `do_basic_tokenize=True`
        unk_token (`str`, *optional*, defaults to `"[UNK]"`):
            The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
            token instead.
        sep_token (`str`, *optional*, defaults to `"[SEP]"`):
            The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
            sequence classification or for a text and a question for question answering. It is also used as the last
            token of a sequence built with special tokens.
        pad_token (`str`, *optional*, defaults to `"[PAD]"`):
            The token used for padding, for example when batching sequences of different lengths.
        cls_token (`str`, *optional*, defaults to `"[CLS]"`):
            The classifier token which is used when doing sequence classification (classification of the whole sequence
            instead of per-token classification). It is the first token of the sequence when built with special tokens.
        mask_token (`str`, *optional*, defaults to `"[MASK]"`):
            The token used for masking values. This is the token used when training this model with masked language
            modeling. This is the token which the model will try to predict.
        tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
            Whether or not to tokenize Chinese characters.

            This should likely be deactivated for Japanese (see this
            [issue](https://github.com/huggingface/transformers/issues/328)).
        strip_accents (`bool`, *optional*):
            Whether or not to strip all accents. If this option is not specified, then it will be determined by the
            value for `lowercase` (as in the original BERT).
    """
    vocab_files_names = VOCAB_FILES_NAMES
    pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
    pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
    max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES

    def __init__(
        self,
        vocab_file,
        do_lower_case=True,
        do_basic_tokenize=True,
        never_split=None,
        unk_token="[UNK]",
        sep_token="[SEP]",
        pad_token="[PAD]",
        cls_token="[CLS]",
        mask_token="[MASK]",
        tokenize_chinese_chars=True,
        strip_accents=None,
        **kwargs,
    ):
        """
        This method initializes a BertTokenizer object.

        Args:
            self: The instance of the class.
            vocab_file (str): The path to the vocabulary file.
            do_lower_case (bool, optional): Whether to convert tokens to lowercase. Default is True.
            do_basic_tokenize (bool, optional): Whether to perform basic tokenization. Default is True.
            never_split (list, optional): List of tokens that should not be split further.
            unk_token (str, optional): The unknown token representation. Default is '[UNK]'.
            sep_token (str, optional): The separator token. Default is '[SEP]'.
            pad_token (str, optional): The padding token. Default is '[PAD]'.
            cls_token (str, optional): The classification token. Default is '[CLS]'.
            mask_token (str, optional): The masking token. Default is '[MASK]'.
            tokenize_chinese_chars (bool, optional): Whether to tokenize Chinese characters. Default is True.
            strip_accents (str, optional): Method to strip accents. None by default.

        Returns:
            None.

        Raises:
            ValueError: If the vocab_file path is invalid or the file does not exist.
            Exception: Any unexpected errors that may occur during the initialization process.
        """
        if not os.path.isfile(vocab_file):
            raise ValueError(
                f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
                " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
            )
        self.vocab = load_vocab(vocab_file)
        self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
        self.do_basic_tokenize = do_basic_tokenize
        if do_basic_tokenize:
            self.basic_tokenizer = BasicTokenizer(
                do_lower_case=do_lower_case,
                never_split=never_split,
                tokenize_chinese_chars=tokenize_chinese_chars,
                strip_accents=strip_accents,
            )

        self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))

        super().__init__(
            do_lower_case=do_lower_case,
            do_basic_tokenize=do_basic_tokenize,
            never_split=never_split,
            unk_token=unk_token,
            sep_token=sep_token,
            pad_token=pad_token,
            cls_token=cls_token,
            mask_token=mask_token,
            tokenize_chinese_chars=tokenize_chinese_chars,
            strip_accents=strip_accents,
            **kwargs,
        )

    @property
    def do_lower_case(self):
        """
        This method 'do_lower_case' is a property in the class 'BertTokenizer' and returns the value of
        the 'do_lower_case' property of the 'basic_tokenizer' attribute.

        Args:
            self: The instance of the BertTokenizer class.

        Returns:
            None.

        Raises:
            This method does not raise any exceptions.
        """
        return self.basic_tokenizer.do_lower_case

    @property
    def vocab_size(self):
        """
        Method to retrieve the size of the vocabulary used by the BertTokenizer.

        Args:
            self (BertTokenizer): The instance of the BertTokenizer class.
                This parameter is used to access the vocabulary stored within the BertTokenizer instance.

        Returns:
            int: The number of unique tokens in the vocabulary of the BertTokenizer.
                This value represents the size of the vocabulary used by the tokenizer.

        Raises:
            None.
        """
        return len(self.vocab)

    def get_vocab(self):
        """
        Retrieve the vocabulary of the BertTokenizer including any added tokens.

        Args:
            self (BertTokenizer): An instance of the BertTokenizer class.
                It represents the tokenizer object.

        Returns:
            dict: A dictionary containing the vocabulary of the BertTokenizer, including any added tokens.

        Raises:
            None.
        """
        return dict(self.vocab, **self.added_tokens_encoder)

    def _tokenize(self, text, split_special_tokens=False):
        """
        This method _tokenize in the class BertTokenizer tokenizes the input text based on the
        specified tokenizer configurations.

        Args:
            self (object): The instance of the BertTokenizer class.
            text (str): The input text to be tokenized.
            split_special_tokens (bool):
                A flag indicating whether special tokens should be split or not. Default is False.
                If set to True, special tokens will be split.

        Returns:
            list: A list of tokens resulting from tokenizing the input text.
                If the basic tokenization is enabled, the tokens are split further using the wordpiece tokenizer.

        Raises:
            None
        """
        split_tokens = []
        if self.do_basic_tokenize:
            for token in self.basic_tokenizer.tokenize(
                text, never_split=self.all_special_tokens if not split_special_tokens else None
            ):
                # If the token is part of the never_split set
                if token in self.basic_tokenizer.never_split:
                    split_tokens.append(token)
                else:
                    split_tokens += self.wordpiece_tokenizer.tokenize(token)
        else:
            split_tokens = self.wordpiece_tokenizer.tokenize(text)
        return split_tokens

    def _convert_token_to_id(self, token):
        """Converts a token (str) in an id using the vocab."""
        return self.vocab.get(token, self.vocab.get(self.unk_token))

    def _convert_id_to_token(self, index):
        """Converts an index (integer) in a token (str) using the vocab."""
        return self.ids_to_tokens.get(index, self.unk_token)

    def convert_tokens_to_string(self, tokens):
        """Converts a sequence of tokens (string) in a single string."""
        out_string = " ".join(tokens).replace(" ##", "").strip()
        return out_string

    def build_inputs_with_special_tokens(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
    ) -> List[int]:
        """
        Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
        adding special tokens. A BERT sequence has the following format:

        - single sequence: `[CLS] X [SEP]`
        - pair of sequences: `[CLS] A [SEP] B [SEP]`

        Args:
            token_ids_0 (`List[int]`):
                List of IDs to which the special tokens will be added.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
        """
        if token_ids_1 is None:
            return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
        cls = [self.cls_token_id]
        sep = [self.sep_token_id]
        return cls + token_ids_0 + sep + token_ids_1 + sep

    def get_special_tokens_mask(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
    ) -> List[int]:
        """
        Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
        special tokens using the tokenizer `prepare_for_model` method.

        Args:
            token_ids_0 (`List[int]`):
                List of IDs.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.
            already_has_special_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not the token list is already formatted with special tokens for the model.

        Returns:
            `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
        """
        if already_has_special_tokens:
            return super().get_special_tokens_mask(
                token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
            )

        if token_ids_1 is not None:
            return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
        return [1] + ([0] * len(token_ids_0)) + [1]

    def create_token_type_ids_from_sequences(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
    ) -> List[int]:
        """
        Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
        pair mask has the following format:

        ```
        0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
        | first sequence    | second sequence |
        ```

        If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).

        Args:
            token_ids_0 (`List[int]`):
                List of IDs.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
        """
        sep = [self.sep_token_id]
        cls = [self.cls_token_id]
        if token_ids_1 is None:
            return len(cls + token_ids_0 + sep) * [0]
        return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]

    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
        """
        Save the vocabulary of the tokenizer to a file.

        Args:
            self: An instance of the BertTokenizer class.
            save_directory (str): The directory where the vocabulary file will be saved. It can be an existing directory or a file path.
            filename_prefix (Optional[str]): An optional prefix to be added to the vocabulary file name. Default is None.

        Returns:
            Tuple[str]: A tuple containing the path to the saved vocabulary file.

        Raises:
            OSError: If there is an issue with accessing or writing to the save_directory.
            UnicodeEncodeError: If there is an issue encoding the vocabulary file with 'utf-8'.

        The method saves the vocabulary of the tokenizer to a file in the specified save_directory. If save_directory is a directory, the vocabulary file will be saved with the default name (or with the
        filename_prefix if provided) in the directory. If save_directory is a file path, the vocabulary file will be saved with the same name as the file in the specified path.

        The vocabulary is saved in a newline-separated format, where each line contains a token from the vocabulary. The tokens are sorted based on their token_index in the vocabulary dictionary. If the token
        indices are not consecutive, a warning message is logged.

        Example:
            ```python
            >>> tokenizer = BertTokenizer()
            >>> save_directory = '/path/to/save'
            >>> filename_prefix = 'my-vocab'
            >>> saved_file = tokenizer.save_vocabulary(save_directory, filename_prefix)
            ```
        """
        index = 0
        if os.path.isdir(save_directory):
            vocab_file = os.path.join(
                save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
            )
        else:
            vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
        with open(vocab_file, "w", encoding="utf-8") as writer:
            for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
                if index != token_index:
                    logger.warning(
                        f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
                        " Please check that the vocabulary is not corrupted!"
                    )
                    index = token_index
                writer.write(token + "\n")
                index += 1
        return (vocab_file,)

mindnlp.transformers.models.bert.tokenization_bert.BertTokenizer.do_lower_case property

This method 'do_lower_case' is a property in the class 'BertTokenizer' and returns the value of the 'do_lower_case' property of the 'basic_tokenizer' attribute.

PARAMETER DESCRIPTION
self

The instance of the BertTokenizer class.

RETURNS DESCRIPTION

None.

mindnlp.transformers.models.bert.tokenization_bert.BertTokenizer.vocab_size property

Method to retrieve the size of the vocabulary used by the BertTokenizer.

PARAMETER DESCRIPTION
self

The instance of the BertTokenizer class. This parameter is used to access the vocabulary stored within the BertTokenizer instance.

TYPE: BertTokenizer

RETURNS DESCRIPTION
int

The number of unique tokens in the vocabulary of the BertTokenizer. This value represents the size of the vocabulary used by the tokenizer.

mindnlp.transformers.models.bert.tokenization_bert.BertTokenizer.__init__(vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs)

This method initializes a BertTokenizer object.

PARAMETER DESCRIPTION
self

The instance of the class.

vocab_file

The path to the vocabulary file.

TYPE: str

do_lower_case

Whether to convert tokens to lowercase. Default is True.

TYPE: bool DEFAULT: True

do_basic_tokenize

Whether to perform basic tokenization. Default is True.

TYPE: bool DEFAULT: True

never_split

List of tokens that should not be split further.

TYPE: list DEFAULT: None

unk_token

The unknown token representation. Default is '[UNK]'.

TYPE: str DEFAULT: '[UNK]'

sep_token

The separator token. Default is '[SEP]'.

TYPE: str DEFAULT: '[SEP]'

pad_token

The padding token. Default is '[PAD]'.

TYPE: str DEFAULT: '[PAD]'

cls_token

The classification token. Default is '[CLS]'.

TYPE: str DEFAULT: '[CLS]'

mask_token

The masking token. Default is '[MASK]'.

TYPE: str DEFAULT: '[MASK]'

tokenize_chinese_chars

Whether to tokenize Chinese characters. Default is True.

TYPE: bool DEFAULT: True

strip_accents

Method to strip accents. None by default.

TYPE: str DEFAULT: None

RETURNS DESCRIPTION

None.

RAISES DESCRIPTION
ValueError

If the vocab_file path is invalid or the file does not exist.

Exception

Any unexpected errors that may occur during the initialization process.

Source code in mindnlp\transformers\models\bert\tokenization_bert.py
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
def __init__(
    self,
    vocab_file,
    do_lower_case=True,
    do_basic_tokenize=True,
    never_split=None,
    unk_token="[UNK]",
    sep_token="[SEP]",
    pad_token="[PAD]",
    cls_token="[CLS]",
    mask_token="[MASK]",
    tokenize_chinese_chars=True,
    strip_accents=None,
    **kwargs,
):
    """
    This method initializes a BertTokenizer object.

    Args:
        self: The instance of the class.
        vocab_file (str): The path to the vocabulary file.
        do_lower_case (bool, optional): Whether to convert tokens to lowercase. Default is True.
        do_basic_tokenize (bool, optional): Whether to perform basic tokenization. Default is True.
        never_split (list, optional): List of tokens that should not be split further.
        unk_token (str, optional): The unknown token representation. Default is '[UNK]'.
        sep_token (str, optional): The separator token. Default is '[SEP]'.
        pad_token (str, optional): The padding token. Default is '[PAD]'.
        cls_token (str, optional): The classification token. Default is '[CLS]'.
        mask_token (str, optional): The masking token. Default is '[MASK]'.
        tokenize_chinese_chars (bool, optional): Whether to tokenize Chinese characters. Default is True.
        strip_accents (str, optional): Method to strip accents. None by default.

    Returns:
        None.

    Raises:
        ValueError: If the vocab_file path is invalid or the file does not exist.
        Exception: Any unexpected errors that may occur during the initialization process.
    """
    if not os.path.isfile(vocab_file):
        raise ValueError(
            f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
            " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
        )
    self.vocab = load_vocab(vocab_file)
    self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
    self.do_basic_tokenize = do_basic_tokenize
    if do_basic_tokenize:
        self.basic_tokenizer = BasicTokenizer(
            do_lower_case=do_lower_case,
            never_split=never_split,
            tokenize_chinese_chars=tokenize_chinese_chars,
            strip_accents=strip_accents,
        )

    self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))

    super().__init__(
        do_lower_case=do_lower_case,
        do_basic_tokenize=do_basic_tokenize,
        never_split=never_split,
        unk_token=unk_token,
        sep_token=sep_token,
        pad_token=pad_token,
        cls_token=cls_token,
        mask_token=mask_token,
        tokenize_chinese_chars=tokenize_chinese_chars,
        strip_accents=strip_accents,
        **kwargs,
    )

mindnlp.transformers.models.bert.tokenization_bert.BertTokenizer.build_inputs_with_special_tokens(token_ids_0, token_ids_1=None)

Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format:

  • single sequence: [CLS] X [SEP]
  • pair of sequences: [CLS] A [SEP] B [SEP]
PARAMETER DESCRIPTION
token_ids_0

List of IDs to which the special tokens will be added.

TYPE: `List[int]`

token_ids_1

Optional second list of IDs for sequence pairs.

TYPE: `List[int]`, *optional* DEFAULT: None

RETURNS DESCRIPTION
List[int]

List[int]: List of input IDs with the appropriate special tokens.

Source code in mindnlp\transformers\models\bert\tokenization_bert.py
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
def build_inputs_with_special_tokens(
    self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
    """
    Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
    adding special tokens. A BERT sequence has the following format:

    - single sequence: `[CLS] X [SEP]`
    - pair of sequences: `[CLS] A [SEP] B [SEP]`

    Args:
        token_ids_0 (`List[int]`):
            List of IDs to which the special tokens will be added.
        token_ids_1 (`List[int]`, *optional*):
            Optional second list of IDs for sequence pairs.

    Returns:
        `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
    """
    if token_ids_1 is None:
        return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
    cls = [self.cls_token_id]
    sep = [self.sep_token_id]
    return cls + token_ids_0 + sep + token_ids_1 + sep

mindnlp.transformers.models.bert.tokenization_bert.BertTokenizer.convert_tokens_to_string(tokens)

Converts a sequence of tokens (string) in a single string.

Source code in mindnlp\transformers\models\bert\tokenization_bert.py
346
347
348
349
def convert_tokens_to_string(self, tokens):
    """Converts a sequence of tokens (string) in a single string."""
    out_string = " ".join(tokens).replace(" ##", "").strip()
    return out_string

mindnlp.transformers.models.bert.tokenization_bert.BertTokenizer.create_token_type_ids_from_sequences(token_ids_0, token_ids_1=None)

Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format:

0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence    | second sequence |

If token_ids_1 is None, this method only returns the first portion of the mask (0s).

PARAMETER DESCRIPTION
token_ids_0

List of IDs.

TYPE: `List[int]`

token_ids_1

Optional second list of IDs for sequence pairs.

TYPE: `List[int]`, *optional* DEFAULT: None

RETURNS DESCRIPTION
List[int]

List[int]: List of token type IDs according to the given sequence(s).

Source code in mindnlp\transformers\models\bert\tokenization_bert.py
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
def create_token_type_ids_from_sequences(
    self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
    """
    Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
    pair mask has the following format:

    ```
    0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
    | first sequence    | second sequence |
    ```

    If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).

    Args:
        token_ids_0 (`List[int]`):
            List of IDs.
        token_ids_1 (`List[int]`, *optional*):
            Optional second list of IDs for sequence pairs.

    Returns:
        `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
    """
    sep = [self.sep_token_id]
    cls = [self.cls_token_id]
    if token_ids_1 is None:
        return len(cls + token_ids_0 + sep) * [0]
    return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]

mindnlp.transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask(token_ids_0, token_ids_1=None, already_has_special_tokens=False)

Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer prepare_for_model method.

PARAMETER DESCRIPTION
token_ids_0

List of IDs.

TYPE: `List[int]`

token_ids_1

Optional second list of IDs for sequence pairs.

TYPE: `List[int]`, *optional* DEFAULT: None

already_has_special_tokens

Whether or not the token list is already formatted with special tokens for the model.

TYPE: `bool`, *optional*, defaults to `False` DEFAULT: False

RETURNS DESCRIPTION
List[int]

List[int]: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.

Source code in mindnlp\transformers\models\bert\tokenization_bert.py
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
def get_special_tokens_mask(
    self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
    """
    Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
    special tokens using the tokenizer `prepare_for_model` method.

    Args:
        token_ids_0 (`List[int]`):
            List of IDs.
        token_ids_1 (`List[int]`, *optional*):
            Optional second list of IDs for sequence pairs.
        already_has_special_tokens (`bool`, *optional*, defaults to `False`):
            Whether or not the token list is already formatted with special tokens for the model.

    Returns:
        `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
    """
    if already_has_special_tokens:
        return super().get_special_tokens_mask(
            token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
        )

    if token_ids_1 is not None:
        return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
    return [1] + ([0] * len(token_ids_0)) + [1]

mindnlp.transformers.models.bert.tokenization_bert.BertTokenizer.get_vocab()

Retrieve the vocabulary of the BertTokenizer including any added tokens.

PARAMETER DESCRIPTION
self

An instance of the BertTokenizer class. It represents the tokenizer object.

TYPE: BertTokenizer

RETURNS DESCRIPTION
dict

A dictionary containing the vocabulary of the BertTokenizer, including any added tokens.

Source code in mindnlp\transformers\models\bert\tokenization_bert.py
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
def get_vocab(self):
    """
    Retrieve the vocabulary of the BertTokenizer including any added tokens.

    Args:
        self (BertTokenizer): An instance of the BertTokenizer class.
            It represents the tokenizer object.

    Returns:
        dict: A dictionary containing the vocabulary of the BertTokenizer, including any added tokens.

    Raises:
        None.
    """
    return dict(self.vocab, **self.added_tokens_encoder)

mindnlp.transformers.models.bert.tokenization_bert.BertTokenizer.save_vocabulary(save_directory, filename_prefix=None)

Save the vocabulary of the tokenizer to a file.

PARAMETER DESCRIPTION
self

An instance of the BertTokenizer class.

save_directory

The directory where the vocabulary file will be saved. It can be an existing directory or a file path.

TYPE: str

filename_prefix

An optional prefix to be added to the vocabulary file name. Default is None.

TYPE: Optional[str] DEFAULT: None

RETURNS DESCRIPTION
Tuple[str]

Tuple[str]: A tuple containing the path to the saved vocabulary file.

RAISES DESCRIPTION
OSError

If there is an issue with accessing or writing to the save_directory.

UnicodeEncodeError

If there is an issue encoding the vocabulary file with 'utf-8'.

The method saves the vocabulary of the tokenizer to a file in the specified save_directory. If save_directory is a directory, the vocabulary file will be saved with the default name (or with the filename_prefix if provided) in the directory. If save_directory is a file path, the vocabulary file will be saved with the same name as the file in the specified path.

The vocabulary is saved in a newline-separated format, where each line contains a token from the vocabulary. The tokens are sorted based on their token_index in the vocabulary dictionary. If the token indices are not consecutive, a warning message is logged.

Example
>>> tokenizer = BertTokenizer()
>>> save_directory = '/path/to/save'
>>> filename_prefix = 'my-vocab'
>>> saved_file = tokenizer.save_vocabulary(save_directory, filename_prefix)
Source code in mindnlp\transformers\models\bert\tokenization_bert.py
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
    """
    Save the vocabulary of the tokenizer to a file.

    Args:
        self: An instance of the BertTokenizer class.
        save_directory (str): The directory where the vocabulary file will be saved. It can be an existing directory or a file path.
        filename_prefix (Optional[str]): An optional prefix to be added to the vocabulary file name. Default is None.

    Returns:
        Tuple[str]: A tuple containing the path to the saved vocabulary file.

    Raises:
        OSError: If there is an issue with accessing or writing to the save_directory.
        UnicodeEncodeError: If there is an issue encoding the vocabulary file with 'utf-8'.

    The method saves the vocabulary of the tokenizer to a file in the specified save_directory. If save_directory is a directory, the vocabulary file will be saved with the default name (or with the
    filename_prefix if provided) in the directory. If save_directory is a file path, the vocabulary file will be saved with the same name as the file in the specified path.

    The vocabulary is saved in a newline-separated format, where each line contains a token from the vocabulary. The tokens are sorted based on their token_index in the vocabulary dictionary. If the token
    indices are not consecutive, a warning message is logged.

    Example:
        ```python
        >>> tokenizer = BertTokenizer()
        >>> save_directory = '/path/to/save'
        >>> filename_prefix = 'my-vocab'
        >>> saved_file = tokenizer.save_vocabulary(save_directory, filename_prefix)
        ```
    """
    index = 0
    if os.path.isdir(save_directory):
        vocab_file = os.path.join(
            save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
        )
    else:
        vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
    with open(vocab_file, "w", encoding="utf-8") as writer:
        for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
            if index != token_index:
                logger.warning(
                    f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
                    " Please check that the vocabulary is not corrupted!"
                )
                index = token_index
            writer.write(token + "\n")
            index += 1
    return (vocab_file,)

mindnlp.transformers.models.bert.tokenization_bert_fast.BertTokenizerFast

Bases: PreTrainedTokenizerFast

Construct a "fast" BERT tokenizer (backed by HuggingFace's tokenizers library). Based on WordPiece.

This tokenizer inherits from [PreTrainedTokenizerFast] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.

PARAMETER DESCRIPTION
vocab_file

File containing the vocabulary.

TYPE: `str` DEFAULT: None

do_lower_case

Whether or not to lowercase the input when tokenizing.

TYPE: `bool`, *optional*, defaults to `True` DEFAULT: True

unk_token

The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.

TYPE: `str`, *optional*, defaults to `"[UNK]"` DEFAULT: '[UNK]'

sep_token

The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.

TYPE: `str`, *optional*, defaults to `"[SEP]"` DEFAULT: '[SEP]'

pad_token

The token used for padding, for example when batching sequences of different lengths.

TYPE: `str`, *optional*, defaults to `"[PAD]"` DEFAULT: '[PAD]'

cls_token

The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.

TYPE: `str`, *optional*, defaults to `"[CLS]"` DEFAULT: '[CLS]'

mask_token

The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.

TYPE: `str`, *optional*, defaults to `"[MASK]"` DEFAULT: '[MASK]'

clean_text

Whether or not to clean the text before tokenization by removing any control characters and replacing all whitespaces by the classic one.

TYPE: `bool`, *optional*, defaults to `True`

tokenize_chinese_chars

Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this issue).

TYPE: `bool`, *optional*, defaults to `True` DEFAULT: True

strip_accents

Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for lowercase (as in the original BERT).

TYPE: `bool`, *optional* DEFAULT: None

wordpieces_prefix

The prefix for subwords.

TYPE: `str`, *optional*, defaults to `"##"`

Source code in mindnlp\transformers\models\bert\tokenization_bert_fast.py
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
class BertTokenizerFast(PreTrainedTokenizerFast):
    r"""
    Construct a "fast" BERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.

    This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
    refer to this superclass for more information regarding those methods.

    Args:
        vocab_file (`str`):
            File containing the vocabulary.
        do_lower_case (`bool`, *optional*, defaults to `True`):
            Whether or not to lowercase the input when tokenizing.
        unk_token (`str`, *optional*, defaults to `"[UNK]"`):
            The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
            token instead.
        sep_token (`str`, *optional*, defaults to `"[SEP]"`):
            The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
            sequence classification or for a text and a question for question answering. It is also used as the last
            token of a sequence built with special tokens.
        pad_token (`str`, *optional*, defaults to `"[PAD]"`):
            The token used for padding, for example when batching sequences of different lengths.
        cls_token (`str`, *optional*, defaults to `"[CLS]"`):
            The classifier token which is used when doing sequence classification (classification of the whole sequence
            instead of per-token classification). It is the first token of the sequence when built with special tokens.
        mask_token (`str`, *optional*, defaults to `"[MASK]"`):
            The token used for masking values. This is the token used when training this model with masked language
            modeling. This is the token which the model will try to predict.
        clean_text (`bool`, *optional*, defaults to `True`):
            Whether or not to clean the text before tokenization by removing any control characters and replacing all
            whitespaces by the classic one.
        tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
            Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
            issue](https://github.com/huggingface/transformers/issues/328)).
        strip_accents (`bool`, *optional*):
            Whether or not to strip all accents. If this option is not specified, then it will be determined by the
            value for `lowercase` (as in the original BERT).
        wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
            The prefix for subwords.
    """
    vocab_files_names = VOCAB_FILES_NAMES
    pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
    pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
    max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
    slow_tokenizer_class = BertTokenizer

    def __init__(
        self,
        vocab_file=None,
        tokenizer_file=None,
        do_lower_case=True,
        unk_token="[UNK]",
        sep_token="[SEP]",
        pad_token="[PAD]",
        cls_token="[CLS]",
        mask_token="[MASK]",
        tokenize_chinese_chars=True,
        strip_accents=None,
        **kwargs,
    ):
        """
        Initialize the BertTokenizerFast class.

        Args:
            self: The instance of the class.
            vocab_file (str): The file path to the vocabulary file. Defaults to None.
            tokenizer_file (str): The file path to the tokenizer file. Defaults to None.
            do_lower_case (bool): Flag indicating whether to convert tokens to lowercase. Defaults to True.
            unk_token (str): The special token for unknown tokens. Defaults to '[UNK]'.
            sep_token (str): The special token for separating sequences. Defaults to '[SEP]'.
            pad_token (str): The special token for padding sequences. Defaults to '[PAD]'.
            cls_token (str): The special token for classifying sequences. Defaults to '[CLS]'.
            mask_token (str): The special token for masking tokens. Defaults to '[MASK]'.
            tokenize_chinese_chars (bool): Flag indicating whether to tokenize Chinese characters. Defaults to True.
            strip_accents (str or None): Flag indicating whether to strip accents. Defaults to None.
            **kwargs: Additional keyword arguments.

        Returns:
            None.

        Raises:
            Exception: If an error occurs during the initialization process.
        """
        super().__init__(
            vocab_file,
            tokenizer_file=tokenizer_file,
            do_lower_case=do_lower_case,
            unk_token=unk_token,
            sep_token=sep_token,
            pad_token=pad_token,
            cls_token=cls_token,
            mask_token=mask_token,
            tokenize_chinese_chars=tokenize_chinese_chars,
            strip_accents=strip_accents,
            **kwargs,
        )

        normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
        if (
            normalizer_state.get("lowercase", do_lower_case) != do_lower_case
            or normalizer_state.get("strip_accents", strip_accents) != strip_accents
            or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
        ):
            normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
            normalizer_state["lowercase"] = do_lower_case
            normalizer_state["strip_accents"] = strip_accents
            normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
            self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)

        self.do_lower_case = do_lower_case

    def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
        """
        Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
        adding special tokens. A BERT sequence has the following format:

        - single sequence: `[CLS] X [SEP]`
        - pair of sequences: `[CLS] A [SEP] B [SEP]`

        Args:
            token_ids_0 (`List[int]`):
                List of IDs to which the special tokens will be added.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
        """
        output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]

        if token_ids_1 is not None:
            output += token_ids_1 + [self.sep_token_id]

        return output

    def create_token_type_ids_from_sequences(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
    ) -> List[int]:
        """
        Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
        pair mask has the following format:

        ```
        0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
        | first sequence    | second sequence |
        ```

        If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).

        Args:
            token_ids_0 (`List[int]`):
                List of IDs.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
        """
        sep = [self.sep_token_id]
        cls = [self.cls_token_id]
        if token_ids_1 is None:
            return len(cls + token_ids_0 + sep) * [0]
        return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]

    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
        """
        Save the vocabulary of the BertTokenizerFast model to the specified directory.

        Args:
            self (BertTokenizerFast): The instance of the BertTokenizerFast class.
            save_directory (str): The directory where the vocabulary files will be saved.
            filename_prefix (Optional[str]): An optional prefix for the saved vocabulary files. Defaults to None.

        Returns:
            Tuple[str]: A tuple containing the names of the saved files.

        Raises:
            This method does not explicitly raise any exceptions.
        """
        files = self._tokenizer.model.save(save_directory, name=filename_prefix)
        return tuple(files)

mindnlp.transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.__init__(vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs)

Initialize the BertTokenizerFast class.

PARAMETER DESCRIPTION
self

The instance of the class.

vocab_file

The file path to the vocabulary file. Defaults to None.

TYPE: str DEFAULT: None

tokenizer_file

The file path to the tokenizer file. Defaults to None.

TYPE: str DEFAULT: None

do_lower_case

Flag indicating whether to convert tokens to lowercase. Defaults to True.

TYPE: bool DEFAULT: True

unk_token

The special token for unknown tokens. Defaults to '[UNK]'.

TYPE: str DEFAULT: '[UNK]'

sep_token

The special token for separating sequences. Defaults to '[SEP]'.

TYPE: str DEFAULT: '[SEP]'

pad_token

The special token for padding sequences. Defaults to '[PAD]'.

TYPE: str DEFAULT: '[PAD]'

cls_token

The special token for classifying sequences. Defaults to '[CLS]'.

TYPE: str DEFAULT: '[CLS]'

mask_token

The special token for masking tokens. Defaults to '[MASK]'.

TYPE: str DEFAULT: '[MASK]'

tokenize_chinese_chars

Flag indicating whether to tokenize Chinese characters. Defaults to True.

TYPE: bool DEFAULT: True

strip_accents

Flag indicating whether to strip accents. Defaults to None.

TYPE: str or None DEFAULT: None

**kwargs

Additional keyword arguments.

DEFAULT: {}

RETURNS DESCRIPTION

None.

RAISES DESCRIPTION
Exception

If an error occurs during the initialization process.

Source code in mindnlp\transformers\models\bert\tokenization_bert_fast.py
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
def __init__(
    self,
    vocab_file=None,
    tokenizer_file=None,
    do_lower_case=True,
    unk_token="[UNK]",
    sep_token="[SEP]",
    pad_token="[PAD]",
    cls_token="[CLS]",
    mask_token="[MASK]",
    tokenize_chinese_chars=True,
    strip_accents=None,
    **kwargs,
):
    """
    Initialize the BertTokenizerFast class.

    Args:
        self: The instance of the class.
        vocab_file (str): The file path to the vocabulary file. Defaults to None.
        tokenizer_file (str): The file path to the tokenizer file. Defaults to None.
        do_lower_case (bool): Flag indicating whether to convert tokens to lowercase. Defaults to True.
        unk_token (str): The special token for unknown tokens. Defaults to '[UNK]'.
        sep_token (str): The special token for separating sequences. Defaults to '[SEP]'.
        pad_token (str): The special token for padding sequences. Defaults to '[PAD]'.
        cls_token (str): The special token for classifying sequences. Defaults to '[CLS]'.
        mask_token (str): The special token for masking tokens. Defaults to '[MASK]'.
        tokenize_chinese_chars (bool): Flag indicating whether to tokenize Chinese characters. Defaults to True.
        strip_accents (str or None): Flag indicating whether to strip accents. Defaults to None.
        **kwargs: Additional keyword arguments.

    Returns:
        None.

    Raises:
        Exception: If an error occurs during the initialization process.
    """
    super().__init__(
        vocab_file,
        tokenizer_file=tokenizer_file,
        do_lower_case=do_lower_case,
        unk_token=unk_token,
        sep_token=sep_token,
        pad_token=pad_token,
        cls_token=cls_token,
        mask_token=mask_token,
        tokenize_chinese_chars=tokenize_chinese_chars,
        strip_accents=strip_accents,
        **kwargs,
    )

    normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
    if (
        normalizer_state.get("lowercase", do_lower_case) != do_lower_case
        or normalizer_state.get("strip_accents", strip_accents) != strip_accents
        or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
    ):
        normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
        normalizer_state["lowercase"] = do_lower_case
        normalizer_state["strip_accents"] = strip_accents
        normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
        self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)

    self.do_lower_case = do_lower_case

mindnlp.transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.build_inputs_with_special_tokens(token_ids_0, token_ids_1=None)

Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format:

  • single sequence: [CLS] X [SEP]
  • pair of sequences: [CLS] A [SEP] B [SEP]
PARAMETER DESCRIPTION
token_ids_0

List of IDs to which the special tokens will be added.

TYPE: `List[int]`

token_ids_1

Optional second list of IDs for sequence pairs.

TYPE: `List[int]`, *optional* DEFAULT: None

RETURNS DESCRIPTION

List[int]: List of input IDs with the appropriate special tokens.

Source code in mindnlp\transformers\models\bert\tokenization_bert_fast.py
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
    """
    Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
    adding special tokens. A BERT sequence has the following format:

    - single sequence: `[CLS] X [SEP]`
    - pair of sequences: `[CLS] A [SEP] B [SEP]`

    Args:
        token_ids_0 (`List[int]`):
            List of IDs to which the special tokens will be added.
        token_ids_1 (`List[int]`, *optional*):
            Optional second list of IDs for sequence pairs.

    Returns:
        `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
    """
    output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]

    if token_ids_1 is not None:
        output += token_ids_1 + [self.sep_token_id]

    return output

mindnlp.transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.create_token_type_ids_from_sequences(token_ids_0, token_ids_1=None)

Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format:

0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence    | second sequence |

If token_ids_1 is None, this method only returns the first portion of the mask (0s).

PARAMETER DESCRIPTION
token_ids_0

List of IDs.

TYPE: `List[int]`

token_ids_1

Optional second list of IDs for sequence pairs.

TYPE: `List[int]`, *optional* DEFAULT: None

RETURNS DESCRIPTION
List[int]

List[int]: List of token type IDs according to the given sequence(s).

Source code in mindnlp\transformers\models\bert\tokenization_bert_fast.py
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
def create_token_type_ids_from_sequences(
    self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
    """
    Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
    pair mask has the following format:

    ```
    0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
    | first sequence    | second sequence |
    ```

    If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).

    Args:
        token_ids_0 (`List[int]`):
            List of IDs.
        token_ids_1 (`List[int]`, *optional*):
            Optional second list of IDs for sequence pairs.

    Returns:
        `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
    """
    sep = [self.sep_token_id]
    cls = [self.cls_token_id]
    if token_ids_1 is None:
        return len(cls + token_ids_0 + sep) * [0]
    return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]

mindnlp.transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.save_vocabulary(save_directory, filename_prefix=None)

Save the vocabulary of the BertTokenizerFast model to the specified directory.

PARAMETER DESCRIPTION
self

The instance of the BertTokenizerFast class.

TYPE: BertTokenizerFast

save_directory

The directory where the vocabulary files will be saved.

TYPE: str

filename_prefix

An optional prefix for the saved vocabulary files. Defaults to None.

TYPE: Optional[str] DEFAULT: None

RETURNS DESCRIPTION
Tuple[str]

Tuple[str]: A tuple containing the names of the saved files.

Source code in mindnlp\transformers\models\bert\tokenization_bert_fast.py
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
    """
    Save the vocabulary of the BertTokenizerFast model to the specified directory.

    Args:
        self (BertTokenizerFast): The instance of the BertTokenizerFast class.
        save_directory (str): The directory where the vocabulary files will be saved.
        filename_prefix (Optional[str]): An optional prefix for the saved vocabulary files. Defaults to None.

    Returns:
        Tuple[str]: A tuple containing the names of the saved files.

    Raises:
        This method does not explicitly raise any exceptions.
    """
    files = self._tokenizer.model.save(save_directory, name=filename_prefix)
    return tuple(files)