跳转至

xlnet

mindnlp.transformers.models.xlnet.modeling_xlnet

MindSpore XLNet model.

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetForMultipleChoice

Bases: XLNetPreTrainedModel

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
class XLNetForMultipleChoice(XLNetPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)

        self.transformer = XLNetModel(config)
        self.sequence_summary = SequenceSummary(config)
        self.logits_proj = nn.Linear(config.d_model, 1)

        # Initialize weights and apply final processing
        self.post_init()

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        input_mask: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        mems: Optional[mindspore.Tensor] = None,
        perm_mask: Optional[mindspore.Tensor] = None,
        target_mapping: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        labels: Optional[mindspore.Tensor] = None,
        use_mems: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        **kwargs,  # delete when `use_cache` is removed in XLNetModel
    ) -> Union[Tuple, XLNetForMultipleChoiceOutput]:
        r"""
        labels (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
            num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
            `input_ids` above)
        """
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]

        flat_input_ids = input_ids.view(-1, input_ids.shape[-1]) if input_ids is not None else None
        flat_token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None
        flat_attention_mask = attention_mask.view(-1, attention_mask.shape[-1]) if attention_mask is not None else None
        flat_input_mask = input_mask.view(-1, input_mask.shape[-1]) if input_mask is not None else None
        flat_inputs_embeds = (
            inputs_embeds.view(-1, inputs_embeds.shape[-2], inputs_embeds.shape[-1])
            if inputs_embeds is not None
            else None
        )

        transformer_outputs = self.transformer(
            flat_input_ids,
            token_type_ids=flat_token_type_ids,
            input_mask=flat_input_mask,
            attention_mask=flat_attention_mask,
            mems=mems,
            perm_mask=perm_mask,
            target_mapping=target_mapping,
            head_mask=head_mask,
            inputs_embeds=flat_inputs_embeds,
            use_mems=use_mems,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
            **kwargs,
        )

        output = transformer_outputs[0]

        output = self.sequence_summary(output)
        logits = self.logits_proj(output)
        reshaped_logits = logits.view(-1, num_choices)

        loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(reshaped_logits, labels.view(-1))

        if not return_dict:
            output = (reshaped_logits,) + transformer_outputs[1:]
            return ((loss,) + output) if loss is not None else output

        return XLNetForMultipleChoiceOutput(
            loss=loss,
            logits=reshaped_logits,
            mems=transformer_outputs.mems,
            hidden_states=transformer_outputs.hidden_states,
            attentions=transformer_outputs.attentions,
        )

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetForMultipleChoice.forward(input_ids=None, token_type_ids=None, input_mask=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, head_mask=None, inputs_embeds=None, labels=None, use_mems=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs)

labels (mindspore.Tensor of shape (batch_size,), optional): Labels for computing the multiple choice classification loss. Indices should be in [0, ..., num_choices-1] where num_choices is the size of the second dimension of the input tensors. (See input_ids above)

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
def forward(
    self,
    input_ids: Optional[mindspore.Tensor] = None,
    token_type_ids: Optional[mindspore.Tensor] = None,
    input_mask: Optional[mindspore.Tensor] = None,
    attention_mask: Optional[mindspore.Tensor] = None,
    mems: Optional[mindspore.Tensor] = None,
    perm_mask: Optional[mindspore.Tensor] = None,
    target_mapping: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    inputs_embeds: Optional[mindspore.Tensor] = None,
    labels: Optional[mindspore.Tensor] = None,
    use_mems: Optional[bool] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
    **kwargs,  # delete when `use_cache` is removed in XLNetModel
) -> Union[Tuple, XLNetForMultipleChoiceOutput]:
    r"""
    labels (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
        Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
        num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
        `input_ids` above)
    """
    return_dict = return_dict if return_dict is not None else self.config.use_return_dict

    num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]

    flat_input_ids = input_ids.view(-1, input_ids.shape[-1]) if input_ids is not None else None
    flat_token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None
    flat_attention_mask = attention_mask.view(-1, attention_mask.shape[-1]) if attention_mask is not None else None
    flat_input_mask = input_mask.view(-1, input_mask.shape[-1]) if input_mask is not None else None
    flat_inputs_embeds = (
        inputs_embeds.view(-1, inputs_embeds.shape[-2], inputs_embeds.shape[-1])
        if inputs_embeds is not None
        else None
    )

    transformer_outputs = self.transformer(
        flat_input_ids,
        token_type_ids=flat_token_type_ids,
        input_mask=flat_input_mask,
        attention_mask=flat_attention_mask,
        mems=mems,
        perm_mask=perm_mask,
        target_mapping=target_mapping,
        head_mask=head_mask,
        inputs_embeds=flat_inputs_embeds,
        use_mems=use_mems,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
        **kwargs,
    )

    output = transformer_outputs[0]

    output = self.sequence_summary(output)
    logits = self.logits_proj(output)
    reshaped_logits = logits.view(-1, num_choices)

    loss = None
    if labels is not None:
        loss_fct = CrossEntropyLoss()
        loss = loss_fct(reshaped_logits, labels.view(-1))

    if not return_dict:
        output = (reshaped_logits,) + transformer_outputs[1:]
        return ((loss,) + output) if loss is not None else output

    return XLNetForMultipleChoiceOutput(
        loss=loss,
        logits=reshaped_logits,
        mems=transformer_outputs.mems,
        hidden_states=transformer_outputs.hidden_states,
        attentions=transformer_outputs.attentions,
    )

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetForMultipleChoiceOutput dataclass

Bases: ModelOutput

Output type of [XLNetForMultipleChoice].

PARAMETER DESCRIPTION
loss

Classification loss.

TYPE: `mindspore.Tensor` of shape *(1,)*, *optional*, returned when `labels` is provided DEFAULT: None

logits

num_choices is the second dimension of the input tensors. (see input_ids above).

Classification scores (before SoftMax).

TYPE: `mindspore.Tensor` of shape `(batch_size, num_choices)` DEFAULT: None

mems

Contains pre-computed hidden-states. Can be used (see mems input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input_ids as they have already been computed.

TYPE: `List[mindspore.Tensor]` of length `config.n_layers` DEFAULT: None

hidden_states

Tuple of mindspore.Tensor (one for the output of the embeddings + one for the output of each layer) of shape (batch_size, sequence_length, hidden_size).

Hidden-states of the model at the output of each layer plus the initial embedding outputs.

TYPE: `tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True` DEFAULT: None

attentions

Tuple of mindspore.Tensor (one for each layer) of shape (batch_size, num_heads, sequence_length, sequence_length).

Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.

TYPE: `tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True` DEFAULT: None

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
@dataclass
class XLNetForMultipleChoiceOutput(ModelOutput):
    """
    Output type of [`XLNetForMultipleChoice`].

    Args:
        loss (`mindspore.Tensor` of shape *(1,)*, *optional*, returned when `labels` is provided):
            Classification loss.
        logits (`mindspore.Tensor` of shape `(batch_size, num_choices)`):
            *num_choices* is the second dimension of the input tensors. (see *input_ids* above).

            Classification scores (before SoftMax).
        mems (`List[mindspore.Tensor]` of length `config.n_layers`):
            Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
            token ids which have their past given to this model should not be passed as `input_ids` as they have
            already been computed.
        hidden_states (`tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `mindspore.Tensor` (one for the output of the embeddings + one for the output of each layer) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
    """

    loss: Optional[mindspore.Tensor] = None
    logits: mindspore.Tensor = None
    mems: Optional[List[mindspore.Tensor]] = None
    hidden_states: Optional[Tuple[mindspore.Tensor, ...]] = None
    attentions: Optional[Tuple[mindspore.Tensor, ...]] = None

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnswering

Bases: XLNetPreTrainedModel

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
class XLNetForQuestionAnswering(XLNetPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)
        self.start_n_top = config.start_n_top
        self.end_n_top = config.end_n_top

        self.transformer = XLNetModel(config)
        self.start_logits = PoolerStartLogits(config)
        self.end_logits = PoolerEndLogits(config)
        self.answer_class = PoolerAnswerClass(config)

        # Initialize weights and apply final processing
        self.post_init()

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        mems: Optional[mindspore.Tensor] = None,
        perm_mask: Optional[mindspore.Tensor] = None,
        target_mapping: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        input_mask: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        start_positions: Optional[mindspore.Tensor] = None,
        end_positions: Optional[mindspore.Tensor] = None,
        is_impossible: Optional[mindspore.Tensor] = None,
        cls_index: Optional[mindspore.Tensor] = None,
        p_mask: Optional[mindspore.Tensor] = None,
        use_mems: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        **kwargs,  # delete when `use_cache` is removed in XLNetModel
    ) -> Union[Tuple, XLNetForQuestionAnsweringOutput]:
        r"""
        start_positions (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the start of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
            are not taken into account for computing the loss.
        end_positions (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the end of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
            are not taken into account for computing the loss.
        is_impossible (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
            Labels whether a question has an answer or no answer (SQuAD 2.0)
        cls_index (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the classification token to use as input for computing plausibility of the
            answer.
        p_mask (`mindspore.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...). 1.0 means token should be
            masked. 0.0 mean token is not masked.

        Returns:

        Example:

        ```python
        >>> from transformers import AutoTokenizer, XLNetForQuestionAnswering
        >>> import torch

        >>> tokenizer = AutoTokenizer.from_pretrained("xlnet/xlnet-base-cased")
        >>> model = XLNetForQuestionAnswering.from_pretrained("xlnet/xlnet-base-cased")

        >>> input_ids = mindspore.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(
        ...     0
        ... )  # Batch size 1
        >>> start_positions = mindspore.tensor([1])
        >>> end_positions = mindspore.tensor([3])
        >>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)

        >>> loss = outputs.loss
        ```"""
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        transformer_outputs = self.transformer(
            input_ids,
            attention_mask=attention_mask,
            mems=mems,
            perm_mask=perm_mask,
            target_mapping=target_mapping,
            token_type_ids=token_type_ids,
            input_mask=input_mask,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            use_mems=use_mems,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
            **kwargs,
        )
        hidden_states = transformer_outputs[0]
        start_logits = self.start_logits(hidden_states, p_mask=p_mask)

        outputs = transformer_outputs[1:]  # Keep mems, hidden states, attentions if there are in it

        if start_positions is not None and end_positions is not None:
            # If we are on multi-GPU, let's remove the dimension added by batch splitting
            for x in (start_positions, end_positions, cls_index, is_impossible):
                if x is not None and x.dim() > 1:
                    x.squeeze_(-1)

            # during training, compute the end logits based on the ground truth of the start position
            end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)

            loss_fct = CrossEntropyLoss()
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2

            if cls_index is not None and is_impossible is not None:
                # Predict answerability from the representation of CLS and START
                cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
                loss_fct_cls = nn.BCEWithLogitsLoss()
                cls_loss = loss_fct_cls(cls_logits, is_impossible)

                # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
                total_loss += cls_loss * 0.5

            if not return_dict:
                return (total_loss,) + transformer_outputs[1:]
            else:
                return XLNetForQuestionAnsweringOutput(
                    loss=total_loss,
                    mems=transformer_outputs.mems,
                    hidden_states=transformer_outputs.hidden_states,
                    attentions=transformer_outputs.attentions,
                )

        else:
            # during inference, compute the end logits based on beam search
            bsz, slen, hsz = hidden_states.shape
            start_log_probs = nn.functional.softmax(start_logits, dim=-1)  # shape (bsz, slen)

            start_top_log_probs, start_top_index = ops.topk(
                start_log_probs, self.start_n_top, dim=-1
            )  # shape (bsz, start_n_top)
            start_top_index_exp = start_top_index.unsqueeze(-1).broadcast_to((-1, -1, hsz))  # shape (bsz, start_n_top, hsz)
            start_states = ops.gather(hidden_states, -2, start_top_index_exp)  # shape (bsz, start_n_top, hsz)
            start_states = start_states.unsqueeze(1).broadcast_to((-1, slen, -1, -1))  # shape (bsz, slen, start_n_top, hsz)

            hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
                start_states
            )  # shape (bsz, slen, start_n_top, hsz)
            p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
            end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
            end_log_probs = nn.functional.softmax(end_logits, dim=1)  # shape (bsz, slen, start_n_top)

            end_top_log_probs, end_top_index = ops.topk(
                end_log_probs, self.end_n_top, dim=1
            )  # shape (bsz, end_n_top, start_n_top)
            end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
            end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)

            start_states = ops.einsum(
                "blh,bl->bh", hidden_states, start_log_probs
            )  # get the representation of START as weighted sum of hidden states
            cls_logits = self.answer_class(
                hidden_states, start_states=start_states, cls_index=cls_index
            )  # Shape (batch size,): one single `cls_logits` for each sample

            if not return_dict:
                outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits)
                return outputs + transformer_outputs[1:]
            else:
                return XLNetForQuestionAnsweringOutput(
                    start_top_log_probs=start_top_log_probs,
                    start_top_index=start_top_index,
                    end_top_log_probs=end_top_log_probs,
                    end_top_index=end_top_index,
                    cls_logits=cls_logits,
                    mems=transformer_outputs.mems,
                    hidden_states=transformer_outputs.hidden_states,
                    attentions=transformer_outputs.attentions,
                )

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnswering.forward(input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, is_impossible=None, cls_index=None, p_mask=None, use_mems=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs)

start_positions (mindspore.Tensor of shape (batch_size,), optional): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (sequence_length). Position outside of the sequence are not taken into account for computing the loss. end_positions (mindspore.Tensor of shape (batch_size,), optional): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (sequence_length). Position outside of the sequence are not taken into account for computing the loss. is_impossible (mindspore.Tensor of shape (batch_size,), optional): Labels whether a question has an answer or no answer (SQuAD 2.0) cls_index (mindspore.Tensor of shape (batch_size,), optional): Labels for position (index) of the classification token to use as input for computing plausibility of the answer. p_mask (mindspore.Tensor of shape (batch_size, sequence_length), optional): Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...). 1.0 means token should be masked. 0.0 mean token is not masked.

Returns:

Example:

>>> from transformers import AutoTokenizer, XLNetForQuestionAnswering
>>> import torch

>>> tokenizer = AutoTokenizer.from_pretrained("xlnet/xlnet-base-cased")
>>> model = XLNetForQuestionAnswering.from_pretrained("xlnet/xlnet-base-cased")

>>> input_ids = mindspore.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(
...     0
... )  # Batch size 1
>>> start_positions = mindspore.tensor([1])
>>> end_positions = mindspore.tensor([3])
>>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)

>>> loss = outputs.loss
Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
def forward(
    self,
    input_ids: Optional[mindspore.Tensor] = None,
    attention_mask: Optional[mindspore.Tensor] = None,
    mems: Optional[mindspore.Tensor] = None,
    perm_mask: Optional[mindspore.Tensor] = None,
    target_mapping: Optional[mindspore.Tensor] = None,
    token_type_ids: Optional[mindspore.Tensor] = None,
    input_mask: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    inputs_embeds: Optional[mindspore.Tensor] = None,
    start_positions: Optional[mindspore.Tensor] = None,
    end_positions: Optional[mindspore.Tensor] = None,
    is_impossible: Optional[mindspore.Tensor] = None,
    cls_index: Optional[mindspore.Tensor] = None,
    p_mask: Optional[mindspore.Tensor] = None,
    use_mems: Optional[bool] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
    **kwargs,  # delete when `use_cache` is removed in XLNetModel
) -> Union[Tuple, XLNetForQuestionAnsweringOutput]:
    r"""
    start_positions (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
        Labels for position (index) of the start of the labelled span for computing the token classification loss.
        Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
        are not taken into account for computing the loss.
    end_positions (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
        Labels for position (index) of the end of the labelled span for computing the token classification loss.
        Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
        are not taken into account for computing the loss.
    is_impossible (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
        Labels whether a question has an answer or no answer (SQuAD 2.0)
    cls_index (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
        Labels for position (index) of the classification token to use as input for computing plausibility of the
        answer.
    p_mask (`mindspore.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
        Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...). 1.0 means token should be
        masked. 0.0 mean token is not masked.

    Returns:

    Example:

    ```python
    >>> from transformers import AutoTokenizer, XLNetForQuestionAnswering
    >>> import torch

    >>> tokenizer = AutoTokenizer.from_pretrained("xlnet/xlnet-base-cased")
    >>> model = XLNetForQuestionAnswering.from_pretrained("xlnet/xlnet-base-cased")

    >>> input_ids = mindspore.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(
    ...     0
    ... )  # Batch size 1
    >>> start_positions = mindspore.tensor([1])
    >>> end_positions = mindspore.tensor([3])
    >>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)

    >>> loss = outputs.loss
    ```"""
    return_dict = return_dict if return_dict is not None else self.config.use_return_dict

    transformer_outputs = self.transformer(
        input_ids,
        attention_mask=attention_mask,
        mems=mems,
        perm_mask=perm_mask,
        target_mapping=target_mapping,
        token_type_ids=token_type_ids,
        input_mask=input_mask,
        head_mask=head_mask,
        inputs_embeds=inputs_embeds,
        use_mems=use_mems,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
        **kwargs,
    )
    hidden_states = transformer_outputs[0]
    start_logits = self.start_logits(hidden_states, p_mask=p_mask)

    outputs = transformer_outputs[1:]  # Keep mems, hidden states, attentions if there are in it

    if start_positions is not None and end_positions is not None:
        # If we are on multi-GPU, let's remove the dimension added by batch splitting
        for x in (start_positions, end_positions, cls_index, is_impossible):
            if x is not None and x.dim() > 1:
                x.squeeze_(-1)

        # during training, compute the end logits based on the ground truth of the start position
        end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)

        loss_fct = CrossEntropyLoss()
        start_loss = loss_fct(start_logits, start_positions)
        end_loss = loss_fct(end_logits, end_positions)
        total_loss = (start_loss + end_loss) / 2

        if cls_index is not None and is_impossible is not None:
            # Predict answerability from the representation of CLS and START
            cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
            loss_fct_cls = nn.BCEWithLogitsLoss()
            cls_loss = loss_fct_cls(cls_logits, is_impossible)

            # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
            total_loss += cls_loss * 0.5

        if not return_dict:
            return (total_loss,) + transformer_outputs[1:]
        else:
            return XLNetForQuestionAnsweringOutput(
                loss=total_loss,
                mems=transformer_outputs.mems,
                hidden_states=transformer_outputs.hidden_states,
                attentions=transformer_outputs.attentions,
            )

    else:
        # during inference, compute the end logits based on beam search
        bsz, slen, hsz = hidden_states.shape
        start_log_probs = nn.functional.softmax(start_logits, dim=-1)  # shape (bsz, slen)

        start_top_log_probs, start_top_index = ops.topk(
            start_log_probs, self.start_n_top, dim=-1
        )  # shape (bsz, start_n_top)
        start_top_index_exp = start_top_index.unsqueeze(-1).broadcast_to((-1, -1, hsz))  # shape (bsz, start_n_top, hsz)
        start_states = ops.gather(hidden_states, -2, start_top_index_exp)  # shape (bsz, start_n_top, hsz)
        start_states = start_states.unsqueeze(1).broadcast_to((-1, slen, -1, -1))  # shape (bsz, slen, start_n_top, hsz)

        hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
            start_states
        )  # shape (bsz, slen, start_n_top, hsz)
        p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
        end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
        end_log_probs = nn.functional.softmax(end_logits, dim=1)  # shape (bsz, slen, start_n_top)

        end_top_log_probs, end_top_index = ops.topk(
            end_log_probs, self.end_n_top, dim=1
        )  # shape (bsz, end_n_top, start_n_top)
        end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
        end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)

        start_states = ops.einsum(
            "blh,bl->bh", hidden_states, start_log_probs
        )  # get the representation of START as weighted sum of hidden states
        cls_logits = self.answer_class(
            hidden_states, start_states=start_states, cls_index=cls_index
        )  # Shape (batch size,): one single `cls_logits` for each sample

        if not return_dict:
            outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits)
            return outputs + transformer_outputs[1:]
        else:
            return XLNetForQuestionAnsweringOutput(
                start_top_log_probs=start_top_log_probs,
                start_top_index=start_top_index,
                end_top_log_probs=end_top_log_probs,
                end_top_index=end_top_index,
                cls_logits=cls_logits,
                mems=transformer_outputs.mems,
                hidden_states=transformer_outputs.hidden_states,
                attentions=transformer_outputs.attentions,
            )

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringOutput dataclass

Bases: ModelOutput

Output type of [XLNetForQuestionAnswering].

PARAMETER DESCRIPTION
loss

Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.

TYPE: `mindspore.Tensor` of shape `(1,)`, *optional*, returned if both `start_positions` and `end_positions` are provided DEFAULT: None

start_top_log_probs

Log probabilities for the top config.start_n_top start token possibilities (beam-search).

TYPE: `mindspore.Tensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided DEFAULT: None

start_top_index

Indices for the top config.start_n_top start token possibilities (beam-search).

TYPE: `mindspore.Tensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided DEFAULT: None

end_top_log_probs

Log probabilities for the top config.start_n_top * config.end_n_top end token possibilities (beam-search).

TYPE: `mindspore.Tensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided DEFAULT: None

end_top_index

Indices for the top config.start_n_top * config.end_n_top end token possibilities (beam-search).

TYPE: `mindspore.Tensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided DEFAULT: None

cls_logits

Log probabilities for the is_impossible label of the answers.

TYPE: `mindspore.Tensor` of shape `(batch_size,)`, *optional*, returned if `start_positions` or `end_positions` is not provided DEFAULT: None

mems

Contains pre-computed hidden-states. Can be used (see mems input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input_ids as they have already been computed.

TYPE: `List[mindspore.Tensor]` of length `config.n_layers` DEFAULT: None

hidden_states

Tuple of mindspore.Tensor (one for the output of the embeddings + one for the output of each layer) of shape (batch_size, sequence_length, hidden_size).

Hidden-states of the model at the output of each layer plus the initial embedding outputs.

TYPE: `tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True` DEFAULT: None

attentions

Tuple of mindspore.Tensor (one for each layer) of shape (batch_size, num_heads, sequence_length, sequence_length).

Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.

TYPE: `tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True` DEFAULT: None

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
@dataclass
class XLNetForQuestionAnsweringOutput(ModelOutput):
    """
    Output type of [`XLNetForQuestionAnswering`].

    Args:
        loss (`mindspore.Tensor` of shape `(1,)`, *optional*, returned if both `start_positions` and `end_positions` are provided):
            Classification loss as the sum of start token, end token (and is_impossible if provided) classification
            losses.
        start_top_log_probs (`mindspore.Tensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
            Log probabilities for the top config.start_n_top start token possibilities (beam-search).
        start_top_index (`mindspore.Tensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
            Indices for the top config.start_n_top start token possibilities (beam-search).
        end_top_log_probs (`mindspore.Tensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
            Log probabilities for the top `config.start_n_top * config.end_n_top` end token possibilities
            (beam-search).
        end_top_index (`mindspore.Tensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
            Indices for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search).
        cls_logits (`mindspore.Tensor` of shape `(batch_size,)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
            Log probabilities for the `is_impossible` label of the answers.
        mems (`List[mindspore.Tensor]` of length `config.n_layers`):
            Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
            token ids which have their past given to this model should not be passed as `input_ids` as they have
            already been computed.
        hidden_states (`tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `mindspore.Tensor` (one for the output of the embeddings + one for the output of each layer) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
    """

    loss: Optional[mindspore.Tensor] = None
    start_top_log_probs: Optional[mindspore.Tensor] = None
    start_top_index: Optional[mindspore.Tensor] = None
    end_top_log_probs: Optional[mindspore.Tensor] = None
    end_top_index: Optional[mindspore.Tensor] = None
    cls_logits: Optional[mindspore.Tensor] = None
    mems: Optional[List[mindspore.Tensor]] = None
    hidden_states: Optional[Tuple[mindspore.Tensor, ...]] = None
    attentions: Optional[Tuple[mindspore.Tensor, ...]] = None

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringSimple

Bases: XLNetPreTrainedModel

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
class XLNetForQuestionAnsweringSimple(XLNetPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels

        self.transformer = XLNetModel(config)
        self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)

        # Initialize weights and apply final processing
        self.post_init()

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        mems: Optional[mindspore.Tensor] = None,
        perm_mask: Optional[mindspore.Tensor] = None,
        target_mapping: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        input_mask: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        start_positions: Optional[mindspore.Tensor] = None,
        end_positions: Optional[mindspore.Tensor] = None,
        use_mems: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        **kwargs,  # delete when `use_cache` is removed in XLNetModel
    ) -> Union[Tuple, XLNetForQuestionAnsweringSimpleOutput]:
        r"""
        start_positions (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the start of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
            are not taken into account for computing the loss.
        end_positions (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the end of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
            are not taken into account for computing the loss.
        """
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        outputs = self.transformer(
            input_ids,
            attention_mask=attention_mask,
            mems=mems,
            perm_mask=perm_mask,
            target_mapping=target_mapping,
            token_type_ids=token_type_ids,
            input_mask=input_mask,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            use_mems=use_mems,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
            **kwargs,
        )

        sequence_output = outputs[0]

        logits = self.qa_outputs(sequence_output)
        start_logits, end_logits = ops.split(logits, 1, dim=-1)
        start_logits = start_logits.squeeze(-1)
        end_logits = end_logits.squeeze(-1)

        total_loss = None
        if start_positions is not None and end_positions is not None:
            # If we are on multi-GPU, split add a dimension
            if len(start_positions.shape) > 1:
                start_positions = start_positions.squeeze(-1)
            if len(end_positions.shape) > 1:
                end_positions = end_positions.squeeze(-1)
            # sometimes the start/end positions are outside our model inputs, we ignore these terms
            ignored_index = start_logits.shape[1]
            start_positions = start_positions.clamp(0, ignored_index)
            end_positions = end_positions.clamp(0, ignored_index)

            loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2

        if not return_dict:
            output = (start_logits, end_logits) + outputs[1:]
            return ((total_loss,) + output) if total_loss is not None else output

        return XLNetForQuestionAnsweringSimpleOutput(
            loss=total_loss,
            start_logits=start_logits,
            end_logits=end_logits,
            mems=outputs.mems,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringSimple.forward(input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, use_mems=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs)

start_positions (mindspore.Tensor of shape (batch_size,), optional): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (sequence_length). Position outside of the sequence are not taken into account for computing the loss. end_positions (mindspore.Tensor of shape (batch_size,), optional): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (sequence_length). Position outside of the sequence are not taken into account for computing the loss.

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
def forward(
    self,
    input_ids: Optional[mindspore.Tensor] = None,
    attention_mask: Optional[mindspore.Tensor] = None,
    mems: Optional[mindspore.Tensor] = None,
    perm_mask: Optional[mindspore.Tensor] = None,
    target_mapping: Optional[mindspore.Tensor] = None,
    token_type_ids: Optional[mindspore.Tensor] = None,
    input_mask: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    inputs_embeds: Optional[mindspore.Tensor] = None,
    start_positions: Optional[mindspore.Tensor] = None,
    end_positions: Optional[mindspore.Tensor] = None,
    use_mems: Optional[bool] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
    **kwargs,  # delete when `use_cache` is removed in XLNetModel
) -> Union[Tuple, XLNetForQuestionAnsweringSimpleOutput]:
    r"""
    start_positions (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
        Labels for position (index) of the start of the labelled span for computing the token classification loss.
        Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
        are not taken into account for computing the loss.
    end_positions (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
        Labels for position (index) of the end of the labelled span for computing the token classification loss.
        Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
        are not taken into account for computing the loss.
    """
    return_dict = return_dict if return_dict is not None else self.config.use_return_dict

    outputs = self.transformer(
        input_ids,
        attention_mask=attention_mask,
        mems=mems,
        perm_mask=perm_mask,
        target_mapping=target_mapping,
        token_type_ids=token_type_ids,
        input_mask=input_mask,
        head_mask=head_mask,
        inputs_embeds=inputs_embeds,
        use_mems=use_mems,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
        **kwargs,
    )

    sequence_output = outputs[0]

    logits = self.qa_outputs(sequence_output)
    start_logits, end_logits = ops.split(logits, 1, dim=-1)
    start_logits = start_logits.squeeze(-1)
    end_logits = end_logits.squeeze(-1)

    total_loss = None
    if start_positions is not None and end_positions is not None:
        # If we are on multi-GPU, split add a dimension
        if len(start_positions.shape) > 1:
            start_positions = start_positions.squeeze(-1)
        if len(end_positions.shape) > 1:
            end_positions = end_positions.squeeze(-1)
        # sometimes the start/end positions are outside our model inputs, we ignore these terms
        ignored_index = start_logits.shape[1]
        start_positions = start_positions.clamp(0, ignored_index)
        end_positions = end_positions.clamp(0, ignored_index)

        loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
        start_loss = loss_fct(start_logits, start_positions)
        end_loss = loss_fct(end_logits, end_positions)
        total_loss = (start_loss + end_loss) / 2

    if not return_dict:
        output = (start_logits, end_logits) + outputs[1:]
        return ((total_loss,) + output) if total_loss is not None else output

    return XLNetForQuestionAnsweringSimpleOutput(
        loss=total_loss,
        start_logits=start_logits,
        end_logits=end_logits,
        mems=outputs.mems,
        hidden_states=outputs.hidden_states,
        attentions=outputs.attentions,
    )

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringSimpleOutput dataclass

Bases: ModelOutput

Output type of [XLNetForQuestionAnsweringSimple].

PARAMETER DESCRIPTION
loss

Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.

TYPE: `mindspore.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided DEFAULT: None

start_logits

Span-start scores (before SoftMax).

TYPE: `mindspore.Tensor` of shape `(batch_size, sequence_length,)` DEFAULT: None

end_logits

Span-end scores (before SoftMax).

TYPE: `mindspore.Tensor` of shape `(batch_size, sequence_length,)` DEFAULT: None

mems

Contains pre-computed hidden-states. Can be used (see mems input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input_ids as they have already been computed.

TYPE: `List[mindspore.Tensor]` of length `config.n_layers` DEFAULT: None

hidden_states

Tuple of mindspore.Tensor (one for the output of the embeddings + one for the output of each layer) of shape (batch_size, sequence_length, hidden_size).

Hidden-states of the model at the output of each layer plus the initial embedding outputs.

TYPE: `tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True` DEFAULT: None

attentions

Tuple of mindspore.Tensor (one for each layer) of shape (batch_size, num_heads, sequence_length, sequence_length).

Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.

TYPE: `tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True` DEFAULT: None

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
@dataclass
class XLNetForQuestionAnsweringSimpleOutput(ModelOutput):
    """
    Output type of [`XLNetForQuestionAnsweringSimple`].

    Args:
        loss (`mindspore.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
            Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
        start_logits (`mindspore.Tensor` of shape `(batch_size, sequence_length,)`):
            Span-start scores (before SoftMax).
        end_logits (`mindspore.Tensor` of shape `(batch_size, sequence_length,)`):
            Span-end scores (before SoftMax).
        mems (`List[mindspore.Tensor]` of length `config.n_layers`):
            Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
            token ids which have their past given to this model should not be passed as `input_ids` as they have
            already been computed.
        hidden_states (`tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `mindspore.Tensor` (one for the output of the embeddings + one for the output of each layer) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
    """

    loss: Optional[mindspore.Tensor] = None
    start_logits: mindspore.Tensor = None
    end_logits: mindspore.Tensor = None
    mems: Optional[List[mindspore.Tensor]] = None
    hidden_states: Optional[Tuple[mindspore.Tensor, ...]] = None
    attentions: Optional[Tuple[mindspore.Tensor, ...]] = None

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetForSequenceClassification

Bases: XLNetPreTrainedModel

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
class XLNetForSequenceClassification(XLNetPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.config = config

        self.transformer = XLNetModel(config)
        self.sequence_summary = SequenceSummary(config)
        self.logits_proj = nn.Linear(config.d_model, config.num_labels)

        # Initialize weights and apply final processing
        self.post_init()

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        mems: Optional[mindspore.Tensor] = None,
        perm_mask: Optional[mindspore.Tensor] = None,
        target_mapping: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        input_mask: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        labels: Optional[mindspore.Tensor] = None,
        use_mems: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        **kwargs,  # delete when `use_cache` is removed in XLNetModel
    ) -> Union[Tuple, XLNetForSequenceClassificationOutput]:
        r"""
        labels (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        """
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        transformer_outputs = self.transformer(
            input_ids,
            attention_mask=attention_mask,
            mems=mems,
            perm_mask=perm_mask,
            target_mapping=target_mapping,
            token_type_ids=token_type_ids,
            input_mask=input_mask,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            use_mems=use_mems,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
            **kwargs,
        )
        output = transformer_outputs[0]

        output = self.sequence_summary(output)
        logits = self.logits_proj(output)

        loss = None
        if labels is not None:
            if self.config.problem_type is None:
                if self.num_labels == 1:
                    self.config.problem_type = "regression"
                elif self.num_labels > 1 and labels.dtype in (mindspore.int64, mindspore.int32):
                    self.config.problem_type = "single_label_classification"
                else:
                    self.config.problem_type = "multi_label_classification"

            if self.config.problem_type == "regression":
                loss_fct = MSELoss()
                if self.num_labels == 1:
                    loss = loss_fct(logits.squeeze(), labels.squeeze())
                else:
                    loss = loss_fct(logits, labels)
            elif self.config.problem_type == "single_label_classification":
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
            elif self.config.problem_type == "multi_label_classification":
                loss_fct = BCEWithLogitsLoss()
                loss = loss_fct(logits, labels)

        if not return_dict:
            output = (logits,) + transformer_outputs[1:]
            return ((loss,) + output) if loss is not None else output

        return XLNetForSequenceClassificationOutput(
            loss=loss,
            logits=logits,
            mems=transformer_outputs.mems,
            hidden_states=transformer_outputs.hidden_states,
            attentions=transformer_outputs.attentions,
        )

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetForSequenceClassification.forward(input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, labels=None, use_mems=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs)

labels (mindspore.Tensor of shape (batch_size,), optional): Labels for computing the sequence classification/regression loss. Indices should be in [0, ..., config.num_labels - 1]. If config.num_labels == 1 a regression loss is computed (Mean-Square loss), If config.num_labels > 1 a classification loss is computed (Cross-Entropy).

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
def forward(
    self,
    input_ids: Optional[mindspore.Tensor] = None,
    attention_mask: Optional[mindspore.Tensor] = None,
    mems: Optional[mindspore.Tensor] = None,
    perm_mask: Optional[mindspore.Tensor] = None,
    target_mapping: Optional[mindspore.Tensor] = None,
    token_type_ids: Optional[mindspore.Tensor] = None,
    input_mask: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    inputs_embeds: Optional[mindspore.Tensor] = None,
    labels: Optional[mindspore.Tensor] = None,
    use_mems: Optional[bool] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
    **kwargs,  # delete when `use_cache` is removed in XLNetModel
) -> Union[Tuple, XLNetForSequenceClassificationOutput]:
    r"""
    labels (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
        Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
        config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
        `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
    """
    return_dict = return_dict if return_dict is not None else self.config.use_return_dict

    transformer_outputs = self.transformer(
        input_ids,
        attention_mask=attention_mask,
        mems=mems,
        perm_mask=perm_mask,
        target_mapping=target_mapping,
        token_type_ids=token_type_ids,
        input_mask=input_mask,
        head_mask=head_mask,
        inputs_embeds=inputs_embeds,
        use_mems=use_mems,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
        **kwargs,
    )
    output = transformer_outputs[0]

    output = self.sequence_summary(output)
    logits = self.logits_proj(output)

    loss = None
    if labels is not None:
        if self.config.problem_type is None:
            if self.num_labels == 1:
                self.config.problem_type = "regression"
            elif self.num_labels > 1 and labels.dtype in (mindspore.int64, mindspore.int32):
                self.config.problem_type = "single_label_classification"
            else:
                self.config.problem_type = "multi_label_classification"

        if self.config.problem_type == "regression":
            loss_fct = MSELoss()
            if self.num_labels == 1:
                loss = loss_fct(logits.squeeze(), labels.squeeze())
            else:
                loss = loss_fct(logits, labels)
        elif self.config.problem_type == "single_label_classification":
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
        elif self.config.problem_type == "multi_label_classification":
            loss_fct = BCEWithLogitsLoss()
            loss = loss_fct(logits, labels)

    if not return_dict:
        output = (logits,) + transformer_outputs[1:]
        return ((loss,) + output) if loss is not None else output

    return XLNetForSequenceClassificationOutput(
        loss=loss,
        logits=logits,
        mems=transformer_outputs.mems,
        hidden_states=transformer_outputs.hidden_states,
        attentions=transformer_outputs.attentions,
    )

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetForSequenceClassificationOutput dataclass

Bases: ModelOutput

Output type of [XLNetForSequenceClassification].

PARAMETER DESCRIPTION
loss

Classification (or regression if config.num_labels==1) loss.

TYPE: `mindspore.Tensor` of shape `(1,)`, *optional*, returned when `label` is provided DEFAULT: None

logits

Classification (or regression if config.num_labels==1) scores (before SoftMax).

TYPE: `mindspore.Tensor` of shape `(batch_size, config.num_labels)` DEFAULT: None

mems

Contains pre-computed hidden-states. Can be used (see mems input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input_ids as they have already been computed.

TYPE: `List[mindspore.Tensor]` of length `config.n_layers` DEFAULT: None

hidden_states

Tuple of mindspore.Tensor (one for the output of the embeddings + one for the output of each layer) of shape (batch_size, sequence_length, hidden_size).

Hidden-states of the model at the output of each layer plus the initial embedding outputs.

TYPE: `tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True` DEFAULT: None

attentions

Tuple of mindspore.Tensor (one for each layer) of shape (batch_size, num_heads, sequence_length, sequence_length).

Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.

TYPE: `tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True` DEFAULT: None

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
@dataclass
class XLNetForSequenceClassificationOutput(ModelOutput):
    """
    Output type of [`XLNetForSequenceClassification`].

    Args:
        loss (`mindspore.Tensor` of shape `(1,)`, *optional*, returned when `label` is provided):
            Classification (or regression if config.num_labels==1) loss.
        logits (`mindspore.Tensor` of shape `(batch_size, config.num_labels)`):
            Classification (or regression if config.num_labels==1) scores (before SoftMax).
        mems (`List[mindspore.Tensor]` of length `config.n_layers`):
            Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
            token ids which have their past given to this model should not be passed as `input_ids` as they have
            already been computed.
        hidden_states (`tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `mindspore.Tensor` (one for the output of the embeddings + one for the output of each layer) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
    """

    loss: Optional[mindspore.Tensor] = None
    logits: mindspore.Tensor = None
    mems: Optional[List[mindspore.Tensor]] = None
    hidden_states: Optional[Tuple[mindspore.Tensor, ...]] = None
    attentions: Optional[Tuple[mindspore.Tensor, ...]] = None

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetForTokenClassification

Bases: XLNetPreTrainedModel

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
class XLNetForTokenClassification(XLNetPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels

        self.transformer = XLNetModel(config)
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)

        # Initialize weights and apply final processing
        self.post_init()

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        mems: Optional[mindspore.Tensor] = None,
        perm_mask: Optional[mindspore.Tensor] = None,
        target_mapping: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        input_mask: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        labels: Optional[mindspore.Tensor] = None,
        use_mems: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        **kwargs,  # delete when `use_cache` is removed in XLNetModel
    ) -> Union[Tuple, XLNetForTokenClassificationOutput]:
        r"""
        labels (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
            where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above)
        """
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        outputs = self.transformer(
            input_ids,
            attention_mask=attention_mask,
            mems=mems,
            perm_mask=perm_mask,
            target_mapping=target_mapping,
            token_type_ids=token_type_ids,
            input_mask=input_mask,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            use_mems=use_mems,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        sequence_output = outputs[0]

        logits = self.classifier(sequence_output)

        loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))

        if not return_dict:
            output = (logits,) + outputs[1:]
            return ((loss,) + output) if loss is not None else output

        return XLNetForTokenClassificationOutput(
            loss=loss,
            logits=logits,
            mems=outputs.mems,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetForTokenClassification.forward(input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, labels=None, use_mems=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs)

labels (mindspore.Tensor of shape (batch_size,), optional): Labels for computing the multiple choice classification loss. Indices should be in [0, ..., num_choices] where num_choices is the size of the second dimension of the input tensors. (see input_ids above)

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
def forward(
    self,
    input_ids: Optional[mindspore.Tensor] = None,
    attention_mask: Optional[mindspore.Tensor] = None,
    mems: Optional[mindspore.Tensor] = None,
    perm_mask: Optional[mindspore.Tensor] = None,
    target_mapping: Optional[mindspore.Tensor] = None,
    token_type_ids: Optional[mindspore.Tensor] = None,
    input_mask: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    inputs_embeds: Optional[mindspore.Tensor] = None,
    labels: Optional[mindspore.Tensor] = None,
    use_mems: Optional[bool] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
    **kwargs,  # delete when `use_cache` is removed in XLNetModel
) -> Union[Tuple, XLNetForTokenClassificationOutput]:
    r"""
    labels (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
        Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
        where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above)
    """
    return_dict = return_dict if return_dict is not None else self.config.use_return_dict

    outputs = self.transformer(
        input_ids,
        attention_mask=attention_mask,
        mems=mems,
        perm_mask=perm_mask,
        target_mapping=target_mapping,
        token_type_ids=token_type_ids,
        input_mask=input_mask,
        head_mask=head_mask,
        inputs_embeds=inputs_embeds,
        use_mems=use_mems,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
    )

    sequence_output = outputs[0]

    logits = self.classifier(sequence_output)

    loss = None
    if labels is not None:
        loss_fct = CrossEntropyLoss()
        loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))

    if not return_dict:
        output = (logits,) + outputs[1:]
        return ((loss,) + output) if loss is not None else output

    return XLNetForTokenClassificationOutput(
        loss=loss,
        logits=logits,
        mems=outputs.mems,
        hidden_states=outputs.hidden_states,
        attentions=outputs.attentions,
    )

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetForTokenClassificationOutput dataclass

Bases: ModelOutput

Output type of [XLNetForTokenClassificationOutput].

PARAMETER DESCRIPTION
loss

Classification loss.

TYPE: `mindspore.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided) DEFAULT: None

logits

Classification scores (before SoftMax).

TYPE: `mindspore.Tensor` of shape `(batch_size, sequence_length, config.num_labels)` DEFAULT: None

mems

Contains pre-computed hidden-states. Can be used (see mems input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input_ids as they have already been computed.

TYPE: `List[mindspore.Tensor]` of length `config.n_layers` DEFAULT: None

hidden_states

Tuple of mindspore.Tensor (one for the output of the embeddings + one for the output of each layer) of shape (batch_size, sequence_length, hidden_size).

Hidden-states of the model at the output of each layer plus the initial embedding outputs.

TYPE: `tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True` DEFAULT: None

attentions

Tuple of mindspore.Tensor (one for each layer) of shape (batch_size, num_heads, sequence_length, sequence_length).

Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.

TYPE: `tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True` DEFAULT: None

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
@dataclass
class XLNetForTokenClassificationOutput(ModelOutput):
    """
    Output type of [`XLNetForTokenClassificationOutput`].

    Args:
        loss (`mindspore.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :
            Classification loss.
        logits (`mindspore.Tensor` of shape `(batch_size, sequence_length, config.num_labels)`):
            Classification scores (before SoftMax).
        mems (`List[mindspore.Tensor]` of length `config.n_layers`):
            Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
            token ids which have their past given to this model should not be passed as `input_ids` as they have
            already been computed.
        hidden_states (`tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `mindspore.Tensor` (one for the output of the embeddings + one for the output of each layer) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
    """

    loss: Optional[mindspore.Tensor] = None
    logits: mindspore.Tensor = None
    mems: Optional[List[mindspore.Tensor]] = None
    hidden_states: Optional[Tuple[mindspore.Tensor, ...]] = None
    attentions: Optional[Tuple[mindspore.Tensor, ...]] = None

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetLMHeadModel

Bases: XLNetPreTrainedModel

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
class XLNetLMHeadModel(XLNetPreTrainedModel):
    _tied_weights_keys = ["lm_loss.weight"]

    def __init__(self, config):
        super().__init__(config)
        self.attn_type = config.attn_type
        self.same_length = config.same_length

        self.transformer = XLNetModel(config)
        self.lm_loss = nn.Linear(config.d_model, config.vocab_size, bias=True)

        # Initialize weights and apply final processing
        self.post_init()

    def get_output_embeddings(self):
        return self.lm_loss

    def set_output_embeddings(self, new_embeddings):
        self.lm_loss = new_embeddings

    def prepare_inputs_for_generation(self, input_ids, past_key_values=None, use_mems=None, **kwargs):
        # Add dummy token at the end (no attention on this one)

        effective_batch_size = input_ids.shape[0]
        dummy_token = ops.zeros((effective_batch_size, 1), dtype=mindspore.int64)

        # At every pass, the attention values for the new token and the two last generated tokens
        # are computed, the rest is reloaded from the `past` cache. A purely auto-regressive model would have
        # offset = 1; offset = 2 seems to have slightly better computation.
        offset = 2

        if past_key_values:
            input_ids = ops.cat([input_ids[:, -offset:], dummy_token], dim=1)
        else:
            input_ids = ops.cat([input_ids, dummy_token], dim=1)

        # Build permutation mask so that previous tokens don't see last token
        sequence_length = input_ids.shape[1]
        perm_mask = ops.zeros(
            (effective_batch_size, sequence_length, sequence_length), dtype=mindspore.float32)
        perm_mask[:, :, -1] = 1.0

        # We'll only predict the last token
        target_mapping = ops.zeros(
            (effective_batch_size, 1, sequence_length), dtype=mindspore.float32)
        target_mapping[:, 0, -1] = 1.0

        inputs = {
            "input_ids": input_ids,
            "perm_mask": perm_mask,
            "target_mapping": target_mapping,
            "use_mems": use_mems,
        }

        # if past is defined in model kwargs then use it for faster decoding
        if past_key_values:
            inputs["mems"] = tuple(layer_past[:-offset, :, :] for layer_past in past_key_values)

        return inputs

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        mems: Optional[mindspore.Tensor] = None,
        perm_mask: Optional[mindspore.Tensor] = None,
        target_mapping: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        input_mask: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        labels: Optional[mindspore.Tensor] = None,
        use_mems: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        **kwargs,  # delete when `use_cache` is removed in XLNetModel
    ) -> Union[Tuple, XLNetLMHeadModelOutput]:
        r"""
        labels (`mindspore.Tensor` of shape `(batch_size, num_predict)`, *optional*):
            Labels for masked language modeling. `num_predict` corresponds to `target_mapping.shape[1]`. If
            `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.

            The labels should correspond to the masked input words that should be predicted and depends on
            `target_mapping`. Note in order to perform standard auto-regressive language modeling a *<mask>* token has
            to be added to the `input_ids` (see the `prepare_inputs_for_generation` function and examples below)

            Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored, the loss
            is only computed for labels in `[0, ..., config.vocab_size]`

        Return:

        Examples:

        ```python
        >>> from transformers import AutoTokenizer, XLNetLMHeadModel
        >>> import torch

        >>> tokenizer = AutoTokenizer.from_pretrained("xlnet/xlnet-large-cased")
        >>> model = XLNetLMHeadModel.from_pretrained("xlnet/xlnet-large-cased")

        >>> # We show how to setup inputs to predict a next token using a bi-directional context.
        >>> input_ids = mindspore.tensor(
        ...     tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)
        ... ).unsqueeze(
        ...     0
        ... )  # We will predict the masked token
        >>> perm_mask = ops.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=mindspore.float)
        >>> perm_mask[:, :, -1] = 1.0  # Previous tokens don't see last token
        >>> target_mapping = ops.zeros(
        ...     (1, 1, input_ids.shape[1]), dtype=mindspore.float
        ... )  # Shape [1, 1, seq_length] => let's predict one token
        >>> target_mapping[
        ...     0, 0, -1
        ... ] = 1.0  # Our first (and only) prediction will be the last token of the sequence (the masked token)

        >>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping)
        >>> next_token_logits = outputs[
        ...     0
        ... ]  # Output has shape [target_mapping.shape[0], target_mapping.shape[1], config.vocab_size]

        >>> # The same way can the XLNetLMHeadModel be used to be trained by standard auto-regressive language modeling.
        >>> input_ids = mindspore.tensor(
        ...     tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)
        ... ).unsqueeze(
        ...     0
        ... )  # We will predict the masked token
        >>> labels = mindspore.tensor(tokenizer.encode("cute", add_special_tokens=False)).unsqueeze(0)
        >>> assert labels.shape[0] == 1, "only one word will be predicted"
        >>> perm_mask = ops.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=mindspore.float)
        >>> perm_mask[
        ...     :, :, -1
        ... ] = 1.0  # Previous tokens don't see last token as is done in standard auto-regressive lm training
        >>> target_mapping = ops.zeros(
        ...     (1, 1, input_ids.shape[1]), dtype=mindspore.float
        ... )  # Shape [1, 1, seq_length] => let's predict one token
        >>> target_mapping[
        ...     0, 0, -1
        ... ] = 1.0  # Our first (and only) prediction will be the last token of the sequence (the masked token)

        >>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping, labels=labels)
        >>> loss = outputs.loss
        >>> next_token_logits = (
        ...     outputs.logits
        ... )  # Logits have shape [target_mapping.shape[0], target_mapping.shape[1], config.vocab_size]
        ```"""
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        transformer_outputs = self.transformer(
            input_ids,
            attention_mask=attention_mask,
            mems=mems,
            perm_mask=perm_mask,
            target_mapping=target_mapping,
            token_type_ids=token_type_ids,
            input_mask=input_mask,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            use_mems=use_mems,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
            **kwargs,
        )

        logits = self.lm_loss(transformer_outputs[0])

        loss = None
        if labels is not None:
            # Flatten the tokens
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(logits.view(-1, logits.shape[-1]), labels.view(-1))

        if not return_dict:
            output = (logits,) + transformer_outputs[1:]
            return ((loss,) + output) if loss is not None else output

        return XLNetLMHeadModelOutput(
            loss=loss,
            logits=logits,
            mems=transformer_outputs.mems,
            hidden_states=transformer_outputs.hidden_states,
            attentions=transformer_outputs.attentions,
        )

    @staticmethod
    def _reorder_cache(mems: List[mindspore.Tensor], beam_idx: mindspore.Tensor) -> List[mindspore.Tensor]:
        """
        This function is used to re-order the `mems` cache if [`~PreTrainedModel.beam_search`] or
        [`~PreTrainedModel.beam_sample`] is called. This is required to match `mems` with the correct beam_idx at every
        generation step.
        """
        return [layer_past.index_select(1, beam_idx) for layer_past in mems]

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetLMHeadModel.forward(input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, labels=None, use_mems=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs)

labels (mindspore.Tensor of shape (batch_size, num_predict), optional): Labels for masked language modeling. num_predict corresponds to target_mapping.shape[1]. If target_mapping is None, then num_predict corresponds to sequence_length.

The labels should correspond to the masked input words that should be predicted and depends on
`target_mapping`. Note in order to perform standard auto-regressive language modeling a *<mask>* token has
to be added to the `input_ids` (see the `prepare_inputs_for_generation` function and examples below)

Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored, the loss
is only computed for labels in `[0, ..., config.vocab_size]`

Return:

Examples:

>>> from transformers import AutoTokenizer, XLNetLMHeadModel
>>> import torch

>>> tokenizer = AutoTokenizer.from_pretrained("xlnet/xlnet-large-cased")
>>> model = XLNetLMHeadModel.from_pretrained("xlnet/xlnet-large-cased")

>>> # We show how to setup inputs to predict a next token using a bi-directional context.
>>> input_ids = mindspore.tensor(
...     tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)
... ).unsqueeze(
...     0
... )  # We will predict the masked token
>>> perm_mask = ops.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=mindspore.float)
>>> perm_mask[:, :, -1] = 1.0  # Previous tokens don't see last token
>>> target_mapping = ops.zeros(
...     (1, 1, input_ids.shape[1]), dtype=mindspore.float
... )  # Shape [1, 1, seq_length] => let's predict one token
>>> target_mapping[
...     0, 0, -1
... ] = 1.0  # Our first (and only) prediction will be the last token of the sequence (the masked token)

>>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping)
>>> next_token_logits = outputs[
...     0
... ]  # Output has shape [target_mapping.shape[0], target_mapping.shape[1], config.vocab_size]

>>> # The same way can the XLNetLMHeadModel be used to be trained by standard auto-regressive language modeling.
>>> input_ids = mindspore.tensor(
...     tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)
... ).unsqueeze(
...     0
... )  # We will predict the masked token
>>> labels = mindspore.tensor(tokenizer.encode("cute", add_special_tokens=False)).unsqueeze(0)
>>> assert labels.shape[0] == 1, "only one word will be predicted"
>>> perm_mask = ops.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=mindspore.float)
>>> perm_mask[
...     :, :, -1
... ] = 1.0  # Previous tokens don't see last token as is done in standard auto-regressive lm training
>>> target_mapping = ops.zeros(
...     (1, 1, input_ids.shape[1]), dtype=mindspore.float
... )  # Shape [1, 1, seq_length] => let's predict one token
>>> target_mapping[
...     0, 0, -1
... ] = 1.0  # Our first (and only) prediction will be the last token of the sequence (the masked token)

>>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping, labels=labels)
>>> loss = outputs.loss
>>> next_token_logits = (
...     outputs.logits
... )  # Logits have shape [target_mapping.shape[0], target_mapping.shape[1], config.vocab_size]
Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
def forward(
    self,
    input_ids: Optional[mindspore.Tensor] = None,
    attention_mask: Optional[mindspore.Tensor] = None,
    mems: Optional[mindspore.Tensor] = None,
    perm_mask: Optional[mindspore.Tensor] = None,
    target_mapping: Optional[mindspore.Tensor] = None,
    token_type_ids: Optional[mindspore.Tensor] = None,
    input_mask: Optional[mindspore.Tensor] = None,
    head_mask: Optional[mindspore.Tensor] = None,
    inputs_embeds: Optional[mindspore.Tensor] = None,
    labels: Optional[mindspore.Tensor] = None,
    use_mems: Optional[bool] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
    **kwargs,  # delete when `use_cache` is removed in XLNetModel
) -> Union[Tuple, XLNetLMHeadModelOutput]:
    r"""
    labels (`mindspore.Tensor` of shape `(batch_size, num_predict)`, *optional*):
        Labels for masked language modeling. `num_predict` corresponds to `target_mapping.shape[1]`. If
        `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.

        The labels should correspond to the masked input words that should be predicted and depends on
        `target_mapping`. Note in order to perform standard auto-regressive language modeling a *<mask>* token has
        to be added to the `input_ids` (see the `prepare_inputs_for_generation` function and examples below)

        Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored, the loss
        is only computed for labels in `[0, ..., config.vocab_size]`

    Return:

    Examples:

    ```python
    >>> from transformers import AutoTokenizer, XLNetLMHeadModel
    >>> import torch

    >>> tokenizer = AutoTokenizer.from_pretrained("xlnet/xlnet-large-cased")
    >>> model = XLNetLMHeadModel.from_pretrained("xlnet/xlnet-large-cased")

    >>> # We show how to setup inputs to predict a next token using a bi-directional context.
    >>> input_ids = mindspore.tensor(
    ...     tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)
    ... ).unsqueeze(
    ...     0
    ... )  # We will predict the masked token
    >>> perm_mask = ops.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=mindspore.float)
    >>> perm_mask[:, :, -1] = 1.0  # Previous tokens don't see last token
    >>> target_mapping = ops.zeros(
    ...     (1, 1, input_ids.shape[1]), dtype=mindspore.float
    ... )  # Shape [1, 1, seq_length] => let's predict one token
    >>> target_mapping[
    ...     0, 0, -1
    ... ] = 1.0  # Our first (and only) prediction will be the last token of the sequence (the masked token)

    >>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping)
    >>> next_token_logits = outputs[
    ...     0
    ... ]  # Output has shape [target_mapping.shape[0], target_mapping.shape[1], config.vocab_size]

    >>> # The same way can the XLNetLMHeadModel be used to be trained by standard auto-regressive language modeling.
    >>> input_ids = mindspore.tensor(
    ...     tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)
    ... ).unsqueeze(
    ...     0
    ... )  # We will predict the masked token
    >>> labels = mindspore.tensor(tokenizer.encode("cute", add_special_tokens=False)).unsqueeze(0)
    >>> assert labels.shape[0] == 1, "only one word will be predicted"
    >>> perm_mask = ops.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=mindspore.float)
    >>> perm_mask[
    ...     :, :, -1
    ... ] = 1.0  # Previous tokens don't see last token as is done in standard auto-regressive lm training
    >>> target_mapping = ops.zeros(
    ...     (1, 1, input_ids.shape[1]), dtype=mindspore.float
    ... )  # Shape [1, 1, seq_length] => let's predict one token
    >>> target_mapping[
    ...     0, 0, -1
    ... ] = 1.0  # Our first (and only) prediction will be the last token of the sequence (the masked token)

    >>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping, labels=labels)
    >>> loss = outputs.loss
    >>> next_token_logits = (
    ...     outputs.logits
    ... )  # Logits have shape [target_mapping.shape[0], target_mapping.shape[1], config.vocab_size]
    ```"""
    return_dict = return_dict if return_dict is not None else self.config.use_return_dict

    transformer_outputs = self.transformer(
        input_ids,
        attention_mask=attention_mask,
        mems=mems,
        perm_mask=perm_mask,
        target_mapping=target_mapping,
        token_type_ids=token_type_ids,
        input_mask=input_mask,
        head_mask=head_mask,
        inputs_embeds=inputs_embeds,
        use_mems=use_mems,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
        **kwargs,
    )

    logits = self.lm_loss(transformer_outputs[0])

    loss = None
    if labels is not None:
        # Flatten the tokens
        loss_fct = CrossEntropyLoss()
        loss = loss_fct(logits.view(-1, logits.shape[-1]), labels.view(-1))

    if not return_dict:
        output = (logits,) + transformer_outputs[1:]
        return ((loss,) + output) if loss is not None else output

    return XLNetLMHeadModelOutput(
        loss=loss,
        logits=logits,
        mems=transformer_outputs.mems,
        hidden_states=transformer_outputs.hidden_states,
        attentions=transformer_outputs.attentions,
    )

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetLMHeadModelOutput dataclass

Bases: ModelOutput

Output type of [XLNetLMHeadModel].

PARAMETER DESCRIPTION
logits

Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).

num_predict corresponds to target_mapping.shape[1]. If target_mapping is None, then num_predict corresponds to sequence_length.

TYPE: `mindspore.Tensor` of shape `(batch_size, num_predict, config.vocab_size)` DEFAULT: None

mems

Contains pre-computed hidden-states. Can be used (see mems input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input_ids as they have already been computed.

TYPE: `List[mindspore.Tensor]` of length `config.n_layers` DEFAULT: None

hidden_states

Tuple of mindspore.Tensor (one for the output of the embeddings + one for the output of each layer) of shape (batch_size, sequence_length, hidden_size).

Hidden-states of the model at the output of each layer plus the initial embedding outputs.

TYPE: `tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True` DEFAULT: None

attentions

Tuple of mindspore.Tensor (one for each layer) of shape (batch_size, num_heads, sequence_length, sequence_length).

Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.

TYPE: `tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True` DEFAULT: None

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
@dataclass
class XLNetLMHeadModelOutput(ModelOutput):
    """
    Output type of [`XLNetLMHeadModel`].

    Args:
        loss (`mindspore.Tensor` of shape *(1,)*, *optional*, returned when `labels` is provided)
            Language modeling loss (for next-token prediction).
        logits (`mindspore.Tensor` of shape `(batch_size, num_predict, config.vocab_size)`):
            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).

            `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict`
            corresponds to `sequence_length`.
        mems (`List[mindspore.Tensor]` of length `config.n_layers`):
            Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
            token ids which have their past given to this model should not be passed as `input_ids` as they have
            already been computed.
        hidden_states (`tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `mindspore.Tensor` (one for the output of the embeddings + one for the output of each layer) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
    """

    loss: Optional[mindspore.Tensor] = None
    logits: mindspore.Tensor = None
    mems: Optional[List[mindspore.Tensor]] = None
    hidden_states: Optional[Tuple[mindspore.Tensor, ...]] = None
    attentions: Optional[Tuple[mindspore.Tensor, ...]] = None

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetModel

Bases: XLNetPreTrainedModel

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
class XLNetModel(XLNetPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)

        self.mem_len = config.mem_len
        self.reuse_len = config.reuse_len
        self.d_model = config.d_model
        self.same_length = config.same_length
        self.attn_type = config.attn_type
        self.bi_data = config.bi_data
        self.clamp_len = config.clamp_len
        self.n_layer = config.n_layer

        self.word_embedding = nn.Embedding(config.vocab_size, config.d_model)
        self.mask_emb = nn.Parameter(ops.randn(1, 1, config.d_model))
        self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)])
        self.dropout = nn.Dropout(config.dropout)

        # Initialize weights and apply final processing
        self.post_init()

    def get_input_embeddings(self):
        return self.word_embedding

    def set_input_embeddings(self, new_embeddings):
        self.word_embedding = new_embeddings

    def _prune_heads(self, heads_to_prune):
        raise NotImplementedError

    def create_mask(self, qlen, mlen):
        """
        Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.

        Args:
            qlen: Sequence length
            mlen: Mask length

        ::

                  same_length=False: same_length=True: <mlen > < qlen > <mlen > < qlen >
               ^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1]
                 [0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1]
            qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1]
                 [0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1]
               v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0]

        """
        mask = ops.ones((qlen, qlen + mlen))
        if self.same_length:
            mask_lo = mask[:, :qlen].tril(-1)
            mask.triu_(mlen + 1)
            mask[:, :qlen] += mask_lo
        else:
            mask.triu_(mlen + 1)

        return mask

    def cache_mem(self, curr_out, prev_mem):
        # cache hidden states into memory.
        if self.reuse_len is not None and self.reuse_len > 0:
            curr_out = curr_out[: self.reuse_len]

        if self.mem_len is None or self.mem_len == 0:
            # If `use_mems` is active but no `mem_len` is defined, the model behaves like GPT-2 at inference time
            # and returns all of the past and current hidden states.
            cutoff = 0
        else:
            # If `use_mems` is active and `mem_len` is defined, the model returns the last `mem_len` hidden
            # states. This is the preferred setting for training and long-form generation.
            cutoff = -self.mem_len
        if prev_mem is None:
            # if `use_mems` is active and `mem_len` is defined, the model
            new_mem = curr_out[cutoff:]
        else:
            new_mem = ops.cat([prev_mem, curr_out], dim=0)[cutoff:]

        return ops.stop_gradient(new_mem)

    @staticmethod
    def positional_embedding(pos_seq, inv_freq, bsz=None):
        sinusoid_inp = ops.einsum("i,d->id", pos_seq, inv_freq)
        pos_emb = ops.cat([ops.sin(sinusoid_inp), ops.cos(sinusoid_inp)], dim=-1)
        pos_emb = pos_emb[:, None, :]

        if bsz is not None:
            pos_emb = pos_emb.broadcast_to((-1, bsz, -1))

        return pos_emb

    def relative_positional_encoding(self, qlen, klen, bsz=None):
        # create relative positional encoding.
        freq_seq = ops.arange(0, self.d_model, 2.0, dtype=mindspore.int64).float()
        inv_freq = 1 / ops.pow(10000, (freq_seq / self.d_model))

        if self.attn_type == "bi":
            # beg, end = klen - 1, -qlen
            beg, end = klen, -qlen
        elif self.attn_type == "uni":
            # beg, end = klen - 1, -1
            beg, end = klen, -1
        else:
            raise ValueError(f"Unknown `attn_type` {self.attn_type}.")

        if self.bi_data:
            fwd_pos_seq = ops.arange(beg, end, -1.0, dtype=mindspore.int64).float()
            bwd_pos_seq = ops.arange(-beg, -end, 1.0, dtype=mindspore.int64).float()

            if self.clamp_len > 0:
                fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
                bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)

            if bsz is not None:
                fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2)
                bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2)
            else:
                fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
                bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)

            pos_emb = ops.cat([fwd_pos_emb, bwd_pos_emb], dim=1)
        else:
            fwd_pos_seq = ops.arange(beg, end, -1.0, dtype=mindspore.int64).float()
            if self.clamp_len > 0:
                fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
            pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)

        return pos_emb

    def forward(
        self,
        input_ids: Optional[mindspore.Tensor] = None,
        attention_mask: Optional[mindspore.Tensor] = None,
        mems: Optional[mindspore.Tensor] = None,
        perm_mask: Optional[mindspore.Tensor] = None,
        target_mapping: Optional[mindspore.Tensor] = None,
        token_type_ids: Optional[mindspore.Tensor] = None,
        input_mask: Optional[mindspore.Tensor] = None,
        head_mask: Optional[mindspore.Tensor] = None,
        inputs_embeds: Optional[mindspore.Tensor] = None,
        use_mems: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        **kwargs,  # delete after depreciation warning is removed
    ) -> Union[Tuple, XLNetModelOutput]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = (
            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        )
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        if "use_cache" in kwargs:
            warnings.warn(
                "The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems`"
                " instead.",
                FutureWarning,
            )
            use_mems = kwargs["use_cache"]

        if self.training:
            use_mems = use_mems if use_mems is not None else self.config.use_mems_train
        else:
            use_mems = use_mems if use_mems is not None else self.config.use_mems_eval

        # the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end
        # but we want a unified interface in the library with the batch size on the first dimension
        # so we move here the first dimension (batch) to the end
        if input_ids is not None and inputs_embeds is not None:
            raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
        elif input_ids is not None:
            input_ids = ops.transpose(input_ids, 0, 1)
            qlen, bsz = input_ids.shape[0], input_ids.shape[1]
        elif inputs_embeds is not None:
            inputs_embeds = ops.transpose(inputs_embeds, 0, 1)
            qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]
        else:
            raise ValueError("You have to specify either input_ids or inputs_embeds")

        token_type_ids = ops.transpose(token_type_ids, 0, 1) if token_type_ids is not None else None
        input_mask = ops.transpose(input_mask, 0, 1) if input_mask is not None else None
        attention_mask = ops.transpose(attention_mask, 0, 1) if attention_mask is not None else None
        perm_mask = perm_mask.permute(1, 2, 0) if perm_mask is not None else None
        target_mapping = target_mapping.permute(1, 2, 0) if target_mapping is not None else None

        mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0
        klen = mlen + qlen

        dtype_float = self.dtype

        # Attention mask
        # causal attention mask
        if self.attn_type == "uni":
            attn_mask = self.create_mask(qlen, mlen)
            attn_mask = attn_mask[:, :, None, None]
        elif self.attn_type == "bi":
            attn_mask = None
        else:
            raise ValueError(f"Unsupported attention type: {self.attn_type}")

        # data mask: input mask & perm mask
        assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) "
        "or attention_mask (uses 0 for padding, added for compatibility with BERT). Please choose one."
        if input_mask is None and attention_mask is not None:
            input_mask = 1.0 - attention_mask
        if input_mask is not None and perm_mask is not None:
            data_mask = input_mask[None] + perm_mask
        elif input_mask is not None and perm_mask is None:
            data_mask = input_mask[None]
        elif input_mask is None and perm_mask is not None:
            data_mask = perm_mask
        else:
            data_mask = None

        if data_mask is not None:
            # all mems can be attended to
            if mlen > 0:
                mems_mask = ops.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask.dtype)
                data_mask = ops.cat([mems_mask, data_mask], dim=1)
            if attn_mask is None:
                attn_mask = data_mask[:, :, :, None]
            else:
                attn_mask += data_mask[:, :, :, None]

        if attn_mask is not None:
            attn_mask = (attn_mask > 0).to(dtype_float)

        if attn_mask is not None:
            non_tgt_mask = -ops.eye(qlen).to(attn_mask.dtype)
            if mlen > 0:
                non_tgt_mask = ops.cat([ops.zeros([qlen, mlen]).to(attn_mask.dtype), non_tgt_mask], dim=-1)
            non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask.dtype)
        else:
            non_tgt_mask = None

        # Word embeddings and prepare h & g hidden states
        if inputs_embeds is not None:
            word_emb_k = inputs_embeds
        else:
            word_emb_k = self.word_embedding(input_ids)
        output_h = self.dropout(word_emb_k)
        if target_mapping is not None:
            word_emb_q = self.mask_emb.broadcast_to((target_mapping.shape[0], bsz, -1))
            # else:  # We removed the inp_q input which was same as target mapping
            #     inp_q_ext = inp_q[:, :, None]
            #     word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k
            output_g = self.dropout(word_emb_q)
        else:
            output_g = None

        # Segment embedding
        if token_type_ids is not None:
            # Convert `token_type_ids` to one-hot `seg_mat`
            if mlen > 0:
                mem_pad = ops.zeros([mlen, bsz], dtype=mindspore.int64)
                cat_ids = ops.cat([mem_pad, token_type_ids], dim=0)
            else:
                cat_ids = token_type_ids

            # `1` indicates not in the same segment [qlen x klen x bsz]
            seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long()
            seg_mat = nn.functional.one_hot(seg_mat, num_classes=2).to(dtype_float)
        else:
            seg_mat = None

        # Positional encoding
        pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz)
        pos_emb = self.dropout(pos_emb)

        # Prepare head mask if needed
        # 1.0 in head_mask indicate we keep the head
        # attention_probs has shape bsz x n_heads x N x N
        # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
        # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
        if head_mask is not None:
            if head_mask.dim() == 1:
                head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
                head_mask = head_mask.broadcast_to((self.n_layer, -1, -1, -1, -1))
            elif head_mask.dim() == 2:
                head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
            head_mask = head_mask.to(
                dtype=next(self.parameters()).dtype
            )  # switch to float if need + fp16 compatibility
        else:
            head_mask = [None] * self.n_layer

        new_mems = ()
        if mems is None:
            mems = [None] * len(self.layer)

        attentions = [] if output_attentions else None
        hidden_states = [] if output_hidden_states else None
        for i, layer_module in enumerate(self.layer):
            if use_mems:
                # cache new mems
                new_mems = new_mems + (self.cache_mem(output_h, mems[i]),)
            if output_hidden_states:
                hidden_states.append((output_h, output_g) if output_g is not None else output_h)

            outputs = layer_module(
                output_h,
                output_g,
                attn_mask_h=non_tgt_mask,
                attn_mask_g=attn_mask,
                r=pos_emb,
                seg_mat=seg_mat,
                mems=mems[i],
                target_mapping=target_mapping,
                head_mask=head_mask[i],
                output_attentions=output_attentions,
            )
            output_h, output_g = outputs[:2]
            if output_attentions:
                attentions.append(outputs[2])

        # Add last hidden state
        if output_hidden_states:
            hidden_states.append((output_h, output_g) if output_g is not None else output_h)

        output = self.dropout(output_g if output_g is not None else output_h)

        # Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method)
        output = output.permute(1, 0, 2)

        if not use_mems:
            new_mems = None

        if output_hidden_states:
            if output_g is not None:
                hidden_states = tuple(h.permute(1, 0, 2) for hs in hidden_states for h in hs)
            else:
                hidden_states = tuple(hs.permute(1, 0, 2) for hs in hidden_states)

        if output_attentions:
            if target_mapping is not None:
                # when target_mapping is provided, there are 2-tuple of attentions
                attentions = tuple(
                    tuple(att_stream.permute(2, 3, 0, 1) for att_stream in t) for t in attentions
                )
            else:
                attentions = tuple(t.permute(2, 3, 0, 1) for t in attentions)

        if not return_dict:
            return tuple(v for v in [output, new_mems, hidden_states, attentions] if v is not None)

        return XLNetModelOutput(
            last_hidden_state=output, mems=new_mems, hidden_states=hidden_states, attentions=attentions
        )

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetModel.create_mask(qlen, mlen)

Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.

PARAMETER DESCRIPTION
qlen

Sequence length

mlen

Mask length

::

      same_length=False: same_length=True: <mlen > < qlen > <mlen > < qlen >
   ^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1]
     [0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1]
qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1]
     [0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1]
   v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0]
Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
def create_mask(self, qlen, mlen):
    """
    Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.

    Args:
        qlen: Sequence length
        mlen: Mask length

    ::

              same_length=False: same_length=True: <mlen > < qlen > <mlen > < qlen >
           ^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1]
             [0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1]
        qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1]
             [0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1]
           v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0]

    """
    mask = ops.ones((qlen, qlen + mlen))
    if self.same_length:
        mask_lo = mask[:, :qlen].tril(-1)
        mask.triu_(mlen + 1)
        mask[:, :qlen] += mask_lo
    else:
        mask.triu_(mlen + 1)

    return mask

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetModelOutput dataclass

Bases: ModelOutput

Output type of [XLNetModel].

PARAMETER DESCRIPTION
last_hidden_state

Sequence of hidden-states at the last layer of the model.

num_predict corresponds to target_mapping.shape[1]. If target_mapping is None, then num_predict corresponds to sequence_length.

TYPE: `mindspore.Tensor` of shape `(batch_size, num_predict, hidden_size)`

mems

Contains pre-computed hidden-states. Can be used (see mems input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input_ids as they have already been computed.

TYPE: `List[mindspore.Tensor]` of length `config.n_layers` DEFAULT: None

hidden_states

Tuple of mindspore.Tensor (one for the output of the embeddings + one for the output of each layer) of shape (batch_size, sequence_length, hidden_size).

Hidden-states of the model at the output of each layer plus the initial embedding outputs.

TYPE: `tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True` DEFAULT: None

attentions

Tuple of mindspore.Tensor (one for each layer) of shape (batch_size, num_heads, sequence_length, sequence_length).

Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.

TYPE: `tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True` DEFAULT: None

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
@dataclass
class XLNetModelOutput(ModelOutput):
    """
    Output type of [`XLNetModel`].

    Args:
        last_hidden_state (`mindspore.Tensor` of shape `(batch_size, num_predict, hidden_size)`):
            Sequence of hidden-states at the last layer of the model.

            `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict`
            corresponds to `sequence_length`.
        mems (`List[mindspore.Tensor]` of length `config.n_layers`):
            Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
            token ids which have their past given to this model should not be passed as `input_ids` as they have
            already been computed.
        hidden_states (`tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `mindspore.Tensor` (one for the output of the embeddings + one for the output of each layer) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
    """

    last_hidden_state: mindspore.Tensor
    mems: Optional[List[mindspore.Tensor]] = None
    hidden_states: Optional[Tuple[mindspore.Tensor, ...]] = None
    attentions: Optional[Tuple[mindspore.Tensor, ...]] = None

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetPreTrainedModel

Bases: PreTrainedModel

An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
class XLNetPreTrainedModel(PreTrainedModel):
    """
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    """

    config_class = XLNetConfig
    base_model_prefix = "transformer"

    def _init_weights(self, module):
        """Initialize the weights."""
        if isinstance(module, nn.Linear):
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
            if module.bias is not None:
                nn.init.zeros_(module.bias)
        elif isinstance(module, nn.Embedding):
            nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
            if module.padding_idx is not None:
                module.weight[module.padding_idx] = 0
        elif isinstance(module, nn.LayerNorm):
            nn.init.zeros_(module.bias)
            nn.init.ones_(module.weight)
        elif isinstance(module, XLNetRelativeAttention):
            for param in [
                module.q,
                module.k,
                module.v,
                module.o,
                module.r,
                module.r_r_bias,
                module.r_s_bias,
                module.r_w_bias,
                module.seg_embed,
            ]:
                nn.init.normal_(param, mean=0.0, std=self.config.initializer_range)
        elif isinstance(module, XLNetModel):
            nn.init.normal_(module.mask_emb, mean=0.0, std=self.config.initializer_range)

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetRelativeAttention

Bases: Module

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
class XLNetRelativeAttention(nn.Module):
    def __init__(self, config):
        super().__init__()

        if config.d_model % config.n_head != 0:
            raise ValueError(
                f"The hidden size ({config.d_model}) is not a multiple of the number of attention "
                f"heads ({config.n_head}"
            )

        self.n_head = config.n_head
        self.d_head = config.d_head
        self.d_model = config.d_model
        self.scale = 1 / (config.d_head**0.5)

        self.q = nn.Parameter(ops.randn(config.d_model, self.n_head, self.d_head))
        self.k = nn.Parameter(ops.randn(config.d_model, self.n_head, self.d_head))
        self.v = nn.Parameter(ops.randn(config.d_model, self.n_head, self.d_head))
        self.o = nn.Parameter(ops.randn(config.d_model, self.n_head, self.d_head))
        self.r = nn.Parameter(ops.randn(config.d_model, self.n_head, self.d_head))

        self.r_r_bias = nn.Parameter(ops.randn(self.n_head, self.d_head))
        self.r_s_bias = nn.Parameter(ops.randn(self.n_head, self.d_head))
        self.r_w_bias = nn.Parameter(ops.randn(self.n_head, self.d_head))
        self.seg_embed = nn.Parameter(ops.randn(2, self.n_head, self.d_head))

        self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps)
        self.dropout = nn.Dropout(config.dropout)

    def prune_heads(self, heads):
        raise NotImplementedError

    @staticmethod
    def rel_shift(x, klen=-1):
        """perform relative shift to form the relative attention score."""
        x_size = x.shape

        x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])
        x = x[1:, ...]
        x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])
        # x = x[:, 0:klen, :, :]
        x = ops.index_select(x, 1, ops.arange(klen, dtype=mindspore.int64))

        return x

    @staticmethod
    def rel_shift_bnij(x, klen=-1):
        x_size = x.shape

        x = x.reshape(x_size[0], x_size[1], x_size[3], x_size[2])
        x = x[:, :, 1:, :]
        x = x.reshape(x_size[0], x_size[1], x_size[2], x_size[3] - 1)
        # Note: the tensor-slice form was faster in my testing than ops.index_select
        #       However, tracing doesn't like the nature of the slice, and if klen changes
        #       during the run then it'll fail, whereas index_select will be fine.
        x = ops.index_select(x, 3, ops.arange(klen, dtype=mindspore.int64))
        # x = x[:, :, :, :klen]

        return x

    def rel_attn_core(
        self,
        q_head,
        k_head_h,
        v_head_h,
        k_head_r,
        seg_mat=None,
        attn_mask=None,
        head_mask=None,
        output_attentions=False,
    ):
        """Core relative positional attention operations."""

        # content based attention score
        ac = ops.einsum("ibnd,jbnd->bnij", q_head + self.r_w_bias, k_head_h)

        # position based attention score
        bd = ops.einsum("ibnd,jbnd->bnij", q_head + self.r_r_bias, k_head_r)
        bd = self.rel_shift_bnij(bd, klen=ac.shape[3])

        # segment based attention score
        if seg_mat is None:
            ef = 0
        else:
            ef = ops.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed)
            ef = ops.einsum("ijbs,ibns->bnij", seg_mat, ef)

        # merge attention scores and perform masking
        attn_score = (ac + bd + ef) * self.scale
        if attn_mask is not None:
            # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
            if attn_mask.dtype == mindspore.float16:
                attn_score = attn_score - 65500 * ops.einsum("ijbn->bnij", attn_mask)
            else:
                attn_score = attn_score - 1e30 * ops.einsum("ijbn->bnij", attn_mask)

        # attention probability
        attn_prob = nn.functional.softmax(attn_score, dim=3)
        attn_prob = self.dropout(attn_prob)

        # Mask heads if we want to
        if head_mask is not None:
            attn_prob = attn_prob * ops.einsum("ijbn->bnij", head_mask)

        # attention output
        attn_vec = ops.einsum("bnij,jbnd->ibnd", attn_prob, v_head_h)

        if output_attentions:
            return attn_vec, ops.einsum("bnij->ijbn", attn_prob)

        return attn_vec

    def post_attention(self, h, attn_vec, residual=True):
        """Post-attention processing."""
        # post-attention projection (back to `d_model`)
        attn_out = ops.einsum("ibnd,hnd->ibh", attn_vec, self.o)

        attn_out = self.dropout(attn_out)
        if residual:
            attn_out = attn_out + h
        output = self.layer_norm(attn_out)

        return output

    def forward(
        self,
        h,
        g,
        attn_mask_h,
        attn_mask_g,
        r,
        seg_mat,
        mems=None,
        target_mapping=None,
        head_mask=None,
        output_attentions=False,
    ):
        if g is not None:
            # Two-stream attention with relative positional encoding.
            # content based attention score
            if mems is not None and mems.dim() > 1:
                cat = ops.cat([mems, h], dim=0)
            else:
                cat = h

            # content-based key head
            k_head_h = ops.einsum("ibh,hnd->ibnd", cat, self.k)

            # content-based value head
            v_head_h = ops.einsum("ibh,hnd->ibnd", cat, self.v)

            # position-based key head
            k_head_r = ops.einsum("ibh,hnd->ibnd", r, self.r)

            # h-stream
            # content-stream query head
            q_head_h = ops.einsum("ibh,hnd->ibnd", h, self.q)

            # core attention ops
            attn_vec_h = self.rel_attn_core(
                q_head_h,
                k_head_h,
                v_head_h,
                k_head_r,
                seg_mat=seg_mat,
                attn_mask=attn_mask_h,
                head_mask=head_mask,
                output_attentions=output_attentions,
            )

            if output_attentions:
                attn_vec_h, attn_prob_h = attn_vec_h

            # post processing
            output_h = self.post_attention(h, attn_vec_h)

            # g-stream
            # query-stream query head
            q_head_g = ops.einsum("ibh,hnd->ibnd", g, self.q)

            # core attention ops
            if target_mapping is not None:
                q_head_g = ops.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping)
                attn_vec_g = self.rel_attn_core(
                    q_head_g,
                    k_head_h,
                    v_head_h,
                    k_head_r,
                    seg_mat=seg_mat,
                    attn_mask=attn_mask_g,
                    head_mask=head_mask,
                    output_attentions=output_attentions,
                )

                if output_attentions:
                    attn_vec_g, attn_prob_g = attn_vec_g

                attn_vec_g = ops.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping)
            else:
                attn_vec_g = self.rel_attn_core(
                    q_head_g,
                    k_head_h,
                    v_head_h,
                    k_head_r,
                    seg_mat=seg_mat,
                    attn_mask=attn_mask_g,
                    head_mask=head_mask,
                    output_attentions=output_attentions,
                )

                if output_attentions:
                    attn_vec_g, attn_prob_g = attn_vec_g

            # post processing
            output_g = self.post_attention(g, attn_vec_g)

            if output_attentions:
                attn_prob = attn_prob_h, attn_prob_g

        else:
            # Multi-head attention with relative positional encoding
            if mems is not None and mems.dim() > 1:
                cat = ops.cat([mems, h], dim=0)
            else:
                cat = h

            # content heads
            q_head_h = ops.einsum("ibh,hnd->ibnd", h, self.q)
            k_head_h = ops.einsum("ibh,hnd->ibnd", cat, self.k)
            v_head_h = ops.einsum("ibh,hnd->ibnd", cat, self.v)

            # positional heads
            # type casting for fp16 support
            k_head_r = ops.einsum("ibh,hnd->ibnd", r.type(self.r.dtype), self.r)

            # core attention ops
            attn_vec = self.rel_attn_core(
                q_head_h,
                k_head_h,
                v_head_h,
                k_head_r,
                seg_mat=seg_mat,
                attn_mask=attn_mask_h,
                head_mask=head_mask,
                output_attentions=output_attentions,
            )

            if output_attentions:
                attn_vec, attn_prob = attn_vec

            # post processing
            output_h = self.post_attention(h, attn_vec)
            output_g = None

        outputs = (output_h, output_g)
        if output_attentions:
            outputs = outputs + (attn_prob,)
        return outputs

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetRelativeAttention.post_attention(h, attn_vec, residual=True)

Post-attention processing.

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
156
157
158
159
160
161
162
163
164
165
166
def post_attention(self, h, attn_vec, residual=True):
    """Post-attention processing."""
    # post-attention projection (back to `d_model`)
    attn_out = ops.einsum("ibnd,hnd->ibh", attn_vec, self.o)

    attn_out = self.dropout(attn_out)
    if residual:
        attn_out = attn_out + h
    output = self.layer_norm(attn_out)

    return output

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetRelativeAttention.rel_attn_core(q_head, k_head_h, v_head_h, k_head_r, seg_mat=None, attn_mask=None, head_mask=None, output_attentions=False)

Core relative positional attention operations.

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
def rel_attn_core(
    self,
    q_head,
    k_head_h,
    v_head_h,
    k_head_r,
    seg_mat=None,
    attn_mask=None,
    head_mask=None,
    output_attentions=False,
):
    """Core relative positional attention operations."""

    # content based attention score
    ac = ops.einsum("ibnd,jbnd->bnij", q_head + self.r_w_bias, k_head_h)

    # position based attention score
    bd = ops.einsum("ibnd,jbnd->bnij", q_head + self.r_r_bias, k_head_r)
    bd = self.rel_shift_bnij(bd, klen=ac.shape[3])

    # segment based attention score
    if seg_mat is None:
        ef = 0
    else:
        ef = ops.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed)
        ef = ops.einsum("ijbs,ibns->bnij", seg_mat, ef)

    # merge attention scores and perform masking
    attn_score = (ac + bd + ef) * self.scale
    if attn_mask is not None:
        # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
        if attn_mask.dtype == mindspore.float16:
            attn_score = attn_score - 65500 * ops.einsum("ijbn->bnij", attn_mask)
        else:
            attn_score = attn_score - 1e30 * ops.einsum("ijbn->bnij", attn_mask)

    # attention probability
    attn_prob = nn.functional.softmax(attn_score, dim=3)
    attn_prob = self.dropout(attn_prob)

    # Mask heads if we want to
    if head_mask is not None:
        attn_prob = attn_prob * ops.einsum("ijbn->bnij", head_mask)

    # attention output
    attn_vec = ops.einsum("bnij,jbnd->ibnd", attn_prob, v_head_h)

    if output_attentions:
        return attn_vec, ops.einsum("bnij->ijbn", attn_prob)

    return attn_vec

mindnlp.transformers.models.xlnet.modeling_xlnet.XLNetRelativeAttention.rel_shift(x, klen=-1) staticmethod

perform relative shift to form the relative attention score.

Source code in mindnlp\transformers\models\xlnet\modeling_xlnet.py
76
77
78
79
80
81
82
83
84
85
86
87
@staticmethod
def rel_shift(x, klen=-1):
    """perform relative shift to form the relative attention score."""
    x_size = x.shape

    x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])
    x = x[1:, ...]
    x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])
    # x = x[:, 0:klen, :, :]
    x = ops.index_select(x, 1, ops.arange(klen, dtype=mindspore.int64))

    return x

mindnlp.transformers.models.xlnet.configuration_xlnet

XLNet configuration

mindnlp.transformers.models.xlnet.configuration_xlnet.XLNetConfig

Bases: PretrainedConfig

Configuration for XLNet

Source code in mindnlp\transformers\models\xlnet\configuration_xlnet.py
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
class XLNetConfig(PretrainedConfig):
    """
    Configuration for XLNet
    """
    model_type = "xlnet"
    keys_to_ignore_at_inference = ["mems"]
    attribute_map = {
        "n_token": "vocab_size",  # Backward compatibility
        "hidden_size": "d_model",
        "num_attention_heads": "n_head",
        "num_hidden_layers": "n_layer",
    }

    def __init__(
            self,
            vocab_size=32000,
            d_model=1024,
            n_layer=24,
            n_head=16,
            d_inner=4096,
            ff_activation="gelu",
            untie_r=True,
            attn_type="bi",
            initializer_range=0.02,
            layer_norm_eps=1e-12,
            dropout=0.1,
            mem_len=512,
            reuse_len=None,
            use_mems_eval=True,
            use_mems_train=False,
            bi_data=False,
            clamp_len=-1,
            same_length=False,
            summary_type="last",
            summary_use_proj=True,
            summary_activation="tanh",
            summary_last_dropout=0.1,
            start_n_top=5,
            end_n_top=5,
            pad_token_id=5,
            bos_token_id=1,
            eos_token_id=2,
            **kwargs,
    ):
        """Constructs XLNetConfig."""
        self.vocab_size = vocab_size
        self.d_model = d_model
        self.n_layer = n_layer
        self.n_head = n_head
        if d_model % n_head != 0:
            raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
        if "d_head" in kwargs:
            if kwargs["d_head"] != d_model // n_head:
                raise ValueError(
                    f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})"
                )
        self.d_head = d_model // n_head
        self.ff_activation = ff_activation
        self.d_inner = d_inner
        self.untie_r = untie_r
        self.attn_type = attn_type

        self.initializer_range = initializer_range
        self.layer_norm_eps = layer_norm_eps

        self.dropout = dropout
        self.mem_len = mem_len
        self.reuse_len = reuse_len
        self.bi_data = bi_data
        self.clamp_len = clamp_len
        self.same_length = same_length

        self.summary_type = summary_type
        self.summary_use_proj = summary_use_proj
        self.summary_activation = summary_activation
        self.summary_last_dropout = summary_last_dropout
        self.start_n_top = start_n_top
        self.end_n_top = end_n_top

        self.bos_token_id = bos_token_id
        self.pad_token_id = pad_token_id
        self.eos_token_id = eos_token_id

        if "use_cache" in kwargs:
            warnings.warn(
                "The `use_cache` argument is deprecated, use `use_mems_eval`"
                " instead.",
                FutureWarning,
            )
            use_mems_eval = kwargs["use_cache"]

        self.use_mems_eval = use_mems_eval
        self.use_mems_train = use_mems_train
        super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)

    @property
    def max_position_embeddings(self):
        """
        This method returns the maximum position embeddings for the XLNet model.

        Args:
            self (XLNetConfig): The instance of the XLNetConfig class.

        Returns:
            None: This method does not return any specific value, as it only logs a message and returns -1.

        Raises:
            None
        """
        logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
        return -1

    @max_position_embeddings.setter
    def max_position_embeddings(self, value):
        """
        Sets the maximum position embeddings for the XLNetConfig class.

        Args:
            self (XLNetConfig): An instance of the XLNetConfig class.
            value: The desired value for the maximum position embeddings. It should be an integer.

        Returns:
            None.

        Raises:
            NotImplementedError: This exception is raised when trying to set the maximum position embeddings for
                the XLNetConfig class. Since the model type is one of the few models that has no sequence length
                limit, setting the maximum position embeddings is not allowed.

        Note:
            The model type should be specified before using this method.
        """
        # Message copied from Transformer-XL documentation
        raise NotImplementedError(
            f"The model {self.model_type} is one of the few models that has no sequence length limit."
        )

mindnlp.transformers.models.xlnet.configuration_xlnet.XLNetConfig.max_position_embeddings property writable

This method returns the maximum position embeddings for the XLNet model.

PARAMETER DESCRIPTION
self

The instance of the XLNetConfig class.

TYPE: XLNetConfig

RETURNS DESCRIPTION
None

This method does not return any specific value, as it only logs a message and returns -1.

mindnlp.transformers.models.xlnet.configuration_xlnet.XLNetConfig.__init__(vocab_size=32000, d_model=1024, n_layer=24, n_head=16, d_inner=4096, ff_activation='gelu', untie_r=True, attn_type='bi', initializer_range=0.02, layer_norm_eps=1e-12, dropout=0.1, mem_len=512, reuse_len=None, use_mems_eval=True, use_mems_train=False, bi_data=False, clamp_len=-1, same_length=False, summary_type='last', summary_use_proj=True, summary_activation='tanh', summary_last_dropout=0.1, start_n_top=5, end_n_top=5, pad_token_id=5, bos_token_id=1, eos_token_id=2, **kwargs)

Constructs XLNetConfig.

Source code in mindnlp\transformers\models\xlnet\configuration_xlnet.py
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
def __init__(
        self,
        vocab_size=32000,
        d_model=1024,
        n_layer=24,
        n_head=16,
        d_inner=4096,
        ff_activation="gelu",
        untie_r=True,
        attn_type="bi",
        initializer_range=0.02,
        layer_norm_eps=1e-12,
        dropout=0.1,
        mem_len=512,
        reuse_len=None,
        use_mems_eval=True,
        use_mems_train=False,
        bi_data=False,
        clamp_len=-1,
        same_length=False,
        summary_type="last",
        summary_use_proj=True,
        summary_activation="tanh",
        summary_last_dropout=0.1,
        start_n_top=5,
        end_n_top=5,
        pad_token_id=5,
        bos_token_id=1,
        eos_token_id=2,
        **kwargs,
):
    """Constructs XLNetConfig."""
    self.vocab_size = vocab_size
    self.d_model = d_model
    self.n_layer = n_layer
    self.n_head = n_head
    if d_model % n_head != 0:
        raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
    if "d_head" in kwargs:
        if kwargs["d_head"] != d_model // n_head:
            raise ValueError(
                f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})"
            )
    self.d_head = d_model // n_head
    self.ff_activation = ff_activation
    self.d_inner = d_inner
    self.untie_r = untie_r
    self.attn_type = attn_type

    self.initializer_range = initializer_range
    self.layer_norm_eps = layer_norm_eps

    self.dropout = dropout
    self.mem_len = mem_len
    self.reuse_len = reuse_len
    self.bi_data = bi_data
    self.clamp_len = clamp_len
    self.same_length = same_length

    self.summary_type = summary_type
    self.summary_use_proj = summary_use_proj
    self.summary_activation = summary_activation
    self.summary_last_dropout = summary_last_dropout
    self.start_n_top = start_n_top
    self.end_n_top = end_n_top

    self.bos_token_id = bos_token_id
    self.pad_token_id = pad_token_id
    self.eos_token_id = eos_token_id

    if "use_cache" in kwargs:
        warnings.warn(
            "The `use_cache` argument is deprecated, use `use_mems_eval`"
            " instead.",
            FutureWarning,
        )
        use_mems_eval = kwargs["use_cache"]

    self.use_mems_eval = use_mems_eval
    self.use_mems_train = use_mems_train
    super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)

mindnlp.transformers.models.xlnet.tokenization_xlnet

Tokenization classes for XLNet model.

mindnlp.transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer

Bases: PreTrainedTokenizer

Construct an XLNet tokenizer. Based on SentencePiece.

This tokenizer inherits from [PreTrainedTokenizer] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.

PARAMETER DESCRIPTION
vocab_file

SentencePiece file (generally has a .spm extension) that contains the vocabulary necessary to instantiate a tokenizer.

TYPE: `str`

do_lower_case

Whether to lowercase the input when tokenizing.

TYPE: `bool`, *optional*, defaults to `False` DEFAULT: False

remove_space

Whether to strip the text when tokenizing (removing excess spaces before and after the string).

TYPE: `bool`, *optional*, defaults to `True` DEFAULT: True

keep_accents

Whether to keep accents when tokenizing.

TYPE: `bool`, *optional*, defaults to `False` DEFAULT: False

bos_token

The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.

When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the cls_token.

TYPE: `str`, *optional*, defaults to `"<s>"` DEFAULT: '<s>'

eos_token

The end of sequence token.

When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the sep_token.

TYPE: `str`, *optional*, defaults to `"</s>"` DEFAULT: '</s>'

unk_token

The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.

TYPE: `str`, *optional*, defaults to `"<unk>"` DEFAULT: '<unk>'

sep_token

The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.

TYPE: `str`, *optional*, defaults to `"<sep>"` DEFAULT: '<sep>'

pad_token

The token used for padding, for example when batching sequences of different lengths.

TYPE: `str`, *optional*, defaults to `"<pad>"` DEFAULT: '<pad>'

cls_token

The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.

TYPE: `str`, *optional*, defaults to `"<cls>"` DEFAULT: '<cls>'

mask_token

The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.

TYPE: `str`, *optional*, defaults to `"<mask>"` DEFAULT: '<mask>'

additional_special_tokens

Additional special tokens used by the tokenizer.

TYPE: `List[str]`, *optional*, defaults to `['<eop>', '<eod>']` DEFAULT: ['<eop>', '<eod>']

sp_model_kwargs

Will be passed to the SentencePieceProcessor.__init__() method. The Python wrapper for SentencePiece can be used, among other things, to set:

  • enable_sampling: Enable subword regularization.
  • nbest_size: Sampling parameters for unigram. Invalid for BPE-Dropout.

    • nbest_size = {0,1}: No sampling is performed.
    • nbest_size > 1: samples from the nbest_size results.
    • nbest_size < 0: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm.
  • alpha: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout.

TYPE: `dict`, *optional* DEFAULT: None

ATTRIBUTE DESCRIPTION
sp_model

The SentencePiece processor that is used for every conversion (string, tokens and IDs).

TYPE: `SentencePieceProcessor`

Source code in mindnlp\transformers\models\xlnet\tokenization_xlnet.py
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
class XLNetTokenizer(PreTrainedTokenizer):
    """
    Construct an XLNet tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).

    This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
    this superclass for more information regarding those methods.

    Args:
        vocab_file (`str`):
            [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
            contains the vocabulary necessary to instantiate a tokenizer.
        do_lower_case (`bool`, *optional*, defaults to `False`):
            Whether to lowercase the input when tokenizing.
        remove_space (`bool`, *optional*, defaults to `True`):
            Whether to strip the text when tokenizing (removing excess spaces before and after the string).
        keep_accents (`bool`, *optional*, defaults to `False`):
            Whether to keep accents when tokenizing.
        bos_token (`str`, *optional*, defaults to `"<s>"`):
            The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.

            <Tip>

            When building a sequence using special tokens, this is not the token that is used for the beginning of
            sequence. The token used is the `cls_token`.

            </Tip>

        eos_token (`str`, *optional*, defaults to `"</s>"`):
            The end of sequence token.

            <Tip>

            When building a sequence using special tokens, this is not the token that is used for the end of sequence.
            The token used is the `sep_token`.

            </Tip>

        unk_token (`str`, *optional*, defaults to `"<unk>"`):
            The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
            token instead.
        sep_token (`str`, *optional*, defaults to `"<sep>"`):
            The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
            sequence classification or for a text and a question for question answering. It is also used as the last
            token of a sequence built with special tokens.
        pad_token (`str`, *optional*, defaults to `"<pad>"`):
            The token used for padding, for example when batching sequences of different lengths.
        cls_token (`str`, *optional*, defaults to `"<cls>"`):
            The classifier token which is used when doing sequence classification (classification of the whole sequence
            instead of per-token classification). It is the first token of the sequence when built with special tokens.
        mask_token (`str`, *optional*, defaults to `"<mask>"`):
            The token used for masking values. This is the token used when training this model with masked language
            modeling. This is the token which the model will try to predict.
        additional_special_tokens (`List[str]`, *optional*, defaults to `['<eop>', '<eod>']`):
            Additional special tokens used by the tokenizer.
        sp_model_kwargs (`dict`, *optional*):
            Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
            SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
            to set:

            - `enable_sampling`: Enable subword regularization.
            - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.

                - `nbest_size = {0,1}`: No sampling is performed.
                - `nbest_size > 1`: samples from the nbest_size results.
                - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
                using forward-filtering-and-backward-sampling algorithm.

            - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
            BPE-dropout.

    Attributes:
        sp_model (`SentencePieceProcessor`):
            The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
    """
    vocab_files_names = VOCAB_FILES_NAMES
    pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
    max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
    padding_side = "left"

    def __init__(
            self,
            vocab_file,
            do_lower_case=False,
            remove_space=True,
            keep_accents=False,
            bos_token="<s>",
            eos_token="</s>",
            unk_token="<unk>",
            sep_token="<sep>",
            pad_token="<pad>",
            cls_token="<cls>",
            mask_token="<mask>",
            additional_special_tokens=["<eop>", "<eod>"],
            sp_model_kwargs: Optional[Dict[str, Any]] = None,
            **kwargs,
    ) -> None:
        """
        Initialize an XLNetTokenizer object.

        Args:
            vocab_file (str): Path to the vocabulary file.
            do_lower_case (bool, optional): Whether to lowercase the input tokens. Defaults to False.
            remove_space (bool, optional): Whether to remove spaces in the input tokens. Defaults to True.
            keep_accents (bool, optional): Whether to keep accents in the input tokens. Defaults to False.
            bos_token (str, optional): Beginning of sentence token. Defaults to '<s>'.
            eos_token (str, optional): End of sentence token. Defaults to '</s>'.
            unk_token (str, optional): Unknown token. Defaults to '<unk>'.
            sep_token (str, optional): Separator token. Defaults to '<sep>'.
            pad_token (str, optional): Padding token. Defaults to '<pad>'.
            cls_token (str, optional): Classification token. Defaults to '<cls>'.
            mask_token (str, optional): Mask token. Defaults to '<mask>'.
            additional_special_tokens (list, optional): Additional special tokens to include.
                Defaults to ['<eop>', '<eod>'].
            sp_model_kwargs (Dict[str, Any], optional): SentencePiece model keyword arguments. Defaults to None.
            **kwargs: Additional keyword arguments.

        Returns:
            None

        Raises:
            TypeError: If the mask_token is not a string.
        """
        # Mask token behave like a normal word, i.e. include the space before it
        mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token

        self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs

        self.do_lower_case = do_lower_case
        self.remove_space = remove_space
        self.keep_accents = keep_accents
        self.vocab_file = vocab_file

        self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
        self.sp_model.Load(vocab_file)

        super().__init__(
            do_lower_case=do_lower_case,
            remove_space=remove_space,
            keep_accents=keep_accents,
            bos_token=bos_token,
            eos_token=eos_token,
            unk_token=unk_token,
            sep_token=sep_token,
            pad_token=pad_token,
            cls_token=cls_token,
            mask_token=mask_token,
            additional_special_tokens=additional_special_tokens,
            sp_model_kwargs=self.sp_model_kwargs,
            **kwargs,
        )

        self._pad_token_type_id = 3

    @property
    def vocab_size(self):
        """
        Returns the vocabulary size of the XLNetTokenizer.

        Args:
            self (XLNetTokenizer): An instance of the XLNetTokenizer class.

        Returns:
            int: The vocabulary size of the tokenizer.

        Raises:
            None

        This method calculates and returns the size of the vocabulary used by the XLNetTokenizer.
        The vocabulary size is determined by the number of unique tokens present in the tokenizer's sp_model.

        Note:
            The vocabulary size represents the number of distinct tokens that the tokenizer can recognize and encode.

        Example:
            ```python
            >>> tokenizer = XLNetTokenizer()
            >>> size = tokenizer.vocab_size()
            >>> print(size)
            32000
            ```
        """
        return len(self.sp_model)

    def get_vocab(self):
        """
        Returns the vocabulary of the XLNetTokenizer.

        Args:
            self: The instance of the XLNetTokenizer class.

        Returns:
            dict:
                A dictionary containing the vocabulary of the XLNetTokenizer.
                The keys of the dictionary are the tokens, and the values are their corresponding indices.

        Raises:
            None.

        Example:
            ```python
            >>> tokenizer = XLNetTokenizer()
            >>> tokenizer.get_vocab()
            {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3, '<mask>': 4, 'hello': 5, 'world': 6}
            ```
        """
        vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
        vocab.update(self.added_tokens_encoder)
        return vocab

    def __getstate__(self):
        """
        Method '__getstate__' in the class 'XLNetTokenizer'.

        Args:
            self (XLNetTokenizer): The instance of the XLNetTokenizer class.
                Represents the current XLNetTokenizer object.

        Returns:
            dict: This method returns a dictionary representing the state of the XLNetTokenizer object.
                The 'sp_model' key in the dictionary is set to None before returning.

        Raises:
            None.
        """
        state = self.__dict__.copy()
        state["sp_model"] = None
        return state

    def __setstate__(self, d):
        """
        This method __setstate__ is defined in the class XLNetTokenizer and is used to set the internal state of the
        object based on the provided dictionary 'd'.

        Args:
            self (XLNetTokenizer): The instance of the XLNetTokenizer class.
            d (dict): A dictionary containing the state information to be set. The keys and values in the dictionary
                are used to update the internal state of the XLNetTokenizer object.

        Returns:
            None. This method does not return any value.

        Raises:
            None:
                However, potential exceptions that could be raised include:

                - AttributeError: If the 'sp_model_kwargs' attribute is not found within the XLNetTokenizer object.
                - TypeError: If the provided 'd' parameter is not a dictionary.
                - Other exceptions related to the SentencePieceProcessor object creation or loading process may be
                raised from the spm.SentencePieceProcessor forwardor or Load method.
        """
        self.__dict__ = d

        # for backward compatibility
        if not hasattr(self, "sp_model_kwargs"):
            self.sp_model_kwargs = {}

        self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
        self.sp_model.Load(self.vocab_file)

    def preprocess_text(self, inputs):
        """
        This method preprocesses the input text according to the specified configuration settings.

        Args:
            self (XLNetTokenizer): The instance of the XLNetTokenizer class.
            inputs (str): The input text to be preprocessed. It should be a string representation.

        Returns:
            str: The preprocessed text based on the applied configuration settings.

        Raises:
            None
        """
        if self.remove_space:
            outputs = " ".join(inputs.strip().split())
        else:
            outputs = inputs
        outputs = outputs.replace("``", '"').replace("''", '"')

        if not self.keep_accents:
            outputs = unicodedata.normalize("NFKD", outputs)
            outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
        if self.do_lower_case:
            outputs = outputs.lower()

        return outputs

    def _tokenize(self, text: str) -> List[str]:
        """Tokenize a string."""
        text = self.preprocess_text(text)
        pieces = self.sp_model.encode(text, out_type=str)
        new_pieces = []
        for piece in pieces:
            if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
                cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
                if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
                    if len(cur_pieces[0]) == 1:
                        cur_pieces = cur_pieces[1:]
                    else:
                        cur_pieces[0] = cur_pieces[0][1:]
                cur_pieces.append(piece[-1])
                new_pieces.extend(cur_pieces)
            else:
                new_pieces.append(piece)

        return new_pieces

    def _convert_token_to_id(self, token):
        """Converts a token (str) in an id using the vocab."""
        return self.sp_model.PieceToId(token)

    def _convert_id_to_token(self, index):
        """Converts an index (integer) in a token (str) using the vocab."""
        return self.sp_model.IdToPiece(index)

    def convert_tokens_to_string(self, tokens):
        """Converts a sequence of tokens (strings for sub-words) in a single string."""
        out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
        return out_string

    def _decode(
            self,
            token_ids: List[int],
            skip_special_tokens: bool = False,
            clean_up_tokenization_spaces: bool = None,
            spaces_between_special_tokens: bool = True,
            **kwargs,
    ) -> str:
        """
        This method decodes a list of token IDs into a string representation.

        Args:
            self: The instance of the XLNetTokenizer class.
            token_ids (List[int]): A list of token IDs to be decoded into a string.
            skip_special_tokens (bool): A flag indicating whether to skip special tokens during decoding.
                Defaults to False.
            clean_up_tokenization_spaces (bool): A flag indicating whether to clean up tokenization spaces in the
                decoded text. If None, the value is determined by the clean_up_tokenization_spaces attribute of
                the XLNetTokenizer instance.
            spaces_between_special_tokens (bool): A flag indicating whether to include spaces between special tokens
                in the decoded text. Defaults to True.
            **kwargs: Additional keyword arguments. 'use_source_tokenizer' is a supported argument to control the
                use of the source tokenizer during decoding.

        Returns:
            str: The decoded string representation of the token IDs.

        Raises:
            None
        """
        self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False)

        filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)

        # To avoid mixing byte-level and unicode for byte-level BPT
        # we need to build string separately for added tokens and byte-level tokens
        # cf. https://github.com/huggingface/transformers/issues/1133
        sub_texts = []
        current_sub_text = []
        for token in filtered_tokens:
            if skip_special_tokens and token in self.all_special_ids:
                continue
            if token in self.added_tokens_encoder:
                if current_sub_text:
                    sub_texts.append(self.convert_tokens_to_string(current_sub_text))
                    current_sub_text = []
                sub_texts.append(token)
            else:
                current_sub_text.append(token)
        if current_sub_text:
            sub_texts.append(self.convert_tokens_to_string(current_sub_text))

        # Mimic the behavior of the Rust tokenizer:
        # By default, there are no spaces between special tokens
        text = "".join(sub_texts)

        clean_up_tokenization_spaces = (
            clean_up_tokenization_spaces
            if clean_up_tokenization_spaces is not None
            else self.clean_up_tokenization_spaces
        )
        if clean_up_tokenization_spaces:
            clean_text = self.clean_up_tokenization(text)
            return clean_text
        else:
            return text

    def build_inputs_with_special_tokens(
            self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
    ) -> List[int]:
        """
        Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
        adding special tokens. An XLNet sequence has the following format:

        - single sequence: `X <sep> <cls>`
        - pair of sequences: `A <sep> B <sep> <cls>`

        Args:
            token_ids_0 (`List[int]`):
                List of IDs to which the special tokens will be added.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
        """
        sep = [self.sep_token_id]
        cls = [self.cls_token_id]
        if token_ids_1 is None:
            return token_ids_0 + sep + cls
        return token_ids_0 + sep + token_ids_1 + sep + cls

    def get_special_tokens_mask(
            self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None,
            already_has_special_tokens: bool = False
    ) -> List[int]:
        """
        Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
        special tokens using the tokenizer `prepare_for_model` method.

        Args:
            token_ids_0 (`List[int]`):
                List of IDs.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.
            already_has_special_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not the token list is already formatted with special tokens for the model.

        Returns:
            `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
        """
        if already_has_special_tokens:
            return super().get_special_tokens_mask(
                token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
            )

        if token_ids_1 is not None:
            return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1, 1]
        return ([0] * len(token_ids_0)) + [1, 1]

    def create_token_type_ids_from_sequences(
            self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
    ) -> List[int]:
        """
        Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet
        sequence pair mask has the following format:
        ```
        0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
        | first sequence    | second sequence |
        ```

        If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).

        Args:
            token_ids_0 (`List[int]`):
                List of IDs.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
        """
        sep = [self.sep_token_id]
        cls_segment_id = [2]

        if token_ids_1 is None:
            return len(token_ids_0 + sep) * [0] + cls_segment_id
        return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id

    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
        """
        Save the vocabulary of the XLNetTokenizer.

        Args:
            self (XLNetTokenizer): An instance of the XLNetTokenizer class.
            save_directory (str): The directory path where the vocabulary will be saved.
            filename_prefix (Optional[str]): An optional prefix for the filename of the saved vocabulary.
                Defaults to None.

        Returns:
            Tuple[str]: A tuple containing the path to the saved vocabulary file.

        Raises:
            FileNotFoundError: If the specified save_directory does not exist.
            PermissionError: If the specified save_directory is not accessible for writing.

        Note:
            - The saved vocabulary file will be named as per the following format:
            '<filename_prefix>-vocab.txt' if filename_prefix is provided, otherwise 'vocab.txt'.
            - If the provided save_directory is the same as the current vocabulary file's directory and
            the vocabulary file already exists, it will be copied to the save_directory.
            - If the current vocabulary file does not exist, a new vocabulary file will be created in the
            save_directory using the serialized model from the sp_model attribute of the tokenizer.

        Example:
            ```python
            >>> tokenizer = XLNetTokenizer()
            >>> save_dir = '/path/to/save'
            >>> prefix = 'english'
            >>> vocab_file = tokenizer.save_vocabulary(save_dir, prefix)
            >>> print(f"Vocabulary saved at: {vocab_file}")
            ```
        """
        if not os.path.isdir(save_directory):
            logger.error(f"Vocabulary path ({save_directory}) should be a directory")
            return
        out_vocab_file = os.path.join(
            save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
        )

        if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
            copyfile(self.vocab_file, out_vocab_file)
        elif not os.path.isfile(self.vocab_file):
            with open(out_vocab_file, "wb") as fi:
                content_spiece_model = self.sp_model.serialized_model_proto()
                fi.write(content_spiece_model)

        return (out_vocab_file,)

mindnlp.transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size property

Returns the vocabulary size of the XLNetTokenizer.

PARAMETER DESCRIPTION
self

An instance of the XLNetTokenizer class.

TYPE: XLNetTokenizer

RETURNS DESCRIPTION
int

The vocabulary size of the tokenizer.

This method calculates and returns the size of the vocabulary used by the XLNetTokenizer. The vocabulary size is determined by the number of unique tokens present in the tokenizer's sp_model.

Note

The vocabulary size represents the number of distinct tokens that the tokenizer can recognize and encode.

Example
>>> tokenizer = XLNetTokenizer()
>>> size = tokenizer.vocab_size()
>>> print(size)
32000

mindnlp.transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.__getstate__()

Method 'getstate' in the class 'XLNetTokenizer'.

PARAMETER DESCRIPTION
self

The instance of the XLNetTokenizer class. Represents the current XLNetTokenizer object.

TYPE: XLNetTokenizer

RETURNS DESCRIPTION
dict

This method returns a dictionary representing the state of the XLNetTokenizer object. The 'sp_model' key in the dictionary is set to None before returning.

Source code in mindnlp\transformers\models\xlnet\tokenization_xlnet.py
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
def __getstate__(self):
    """
    Method '__getstate__' in the class 'XLNetTokenizer'.

    Args:
        self (XLNetTokenizer): The instance of the XLNetTokenizer class.
            Represents the current XLNetTokenizer object.

    Returns:
        dict: This method returns a dictionary representing the state of the XLNetTokenizer object.
            The 'sp_model' key in the dictionary is set to None before returning.

    Raises:
        None.
    """
    state = self.__dict__.copy()
    state["sp_model"] = None
    return state

mindnlp.transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.__init__(vocab_file, do_lower_case=False, remove_space=True, keep_accents=False, bos_token='<s>', eos_token='</s>', unk_token='<unk>', sep_token='<sep>', pad_token='<pad>', cls_token='<cls>', mask_token='<mask>', additional_special_tokens=['<eop>', '<eod>'], sp_model_kwargs=None, **kwargs)

Initialize an XLNetTokenizer object.

PARAMETER DESCRIPTION
vocab_file

Path to the vocabulary file.

TYPE: str

do_lower_case

Whether to lowercase the input tokens. Defaults to False.

TYPE: bool DEFAULT: False

remove_space

Whether to remove spaces in the input tokens. Defaults to True.

TYPE: bool DEFAULT: True

keep_accents

Whether to keep accents in the input tokens. Defaults to False.

TYPE: bool DEFAULT: False

bos_token

Beginning of sentence token. Defaults to ''.

TYPE: str DEFAULT: '<s>'

eos_token

End of sentence token. Defaults to ''.

TYPE: str DEFAULT: '</s>'

unk_token

Unknown token. Defaults to ''.

TYPE: str DEFAULT: '<unk>'

sep_token

Separator token. Defaults to ''.

TYPE: str DEFAULT: '<sep>'

pad_token

Padding token. Defaults to ''.

TYPE: str DEFAULT: '<pad>'

cls_token

Classification token. Defaults to ''.

TYPE: str DEFAULT: '<cls>'

mask_token

Mask token. Defaults to ''.

TYPE: str DEFAULT: '<mask>'

additional_special_tokens

Additional special tokens to include. Defaults to ['', ''].

TYPE: list DEFAULT: ['<eop>', '<eod>']

sp_model_kwargs

SentencePiece model keyword arguments. Defaults to None.

TYPE: Dict[str, Any] DEFAULT: None

**kwargs

Additional keyword arguments.

DEFAULT: {}

RETURNS DESCRIPTION
None

None

RAISES DESCRIPTION
TypeError

If the mask_token is not a string.

Source code in mindnlp\transformers\models\xlnet\tokenization_xlnet.py
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
def __init__(
        self,
        vocab_file,
        do_lower_case=False,
        remove_space=True,
        keep_accents=False,
        bos_token="<s>",
        eos_token="</s>",
        unk_token="<unk>",
        sep_token="<sep>",
        pad_token="<pad>",
        cls_token="<cls>",
        mask_token="<mask>",
        additional_special_tokens=["<eop>", "<eod>"],
        sp_model_kwargs: Optional[Dict[str, Any]] = None,
        **kwargs,
) -> None:
    """
    Initialize an XLNetTokenizer object.

    Args:
        vocab_file (str): Path to the vocabulary file.
        do_lower_case (bool, optional): Whether to lowercase the input tokens. Defaults to False.
        remove_space (bool, optional): Whether to remove spaces in the input tokens. Defaults to True.
        keep_accents (bool, optional): Whether to keep accents in the input tokens. Defaults to False.
        bos_token (str, optional): Beginning of sentence token. Defaults to '<s>'.
        eos_token (str, optional): End of sentence token. Defaults to '</s>'.
        unk_token (str, optional): Unknown token. Defaults to '<unk>'.
        sep_token (str, optional): Separator token. Defaults to '<sep>'.
        pad_token (str, optional): Padding token. Defaults to '<pad>'.
        cls_token (str, optional): Classification token. Defaults to '<cls>'.
        mask_token (str, optional): Mask token. Defaults to '<mask>'.
        additional_special_tokens (list, optional): Additional special tokens to include.
            Defaults to ['<eop>', '<eod>'].
        sp_model_kwargs (Dict[str, Any], optional): SentencePiece model keyword arguments. Defaults to None.
        **kwargs: Additional keyword arguments.

    Returns:
        None

    Raises:
        TypeError: If the mask_token is not a string.
    """
    # Mask token behave like a normal word, i.e. include the space before it
    mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token

    self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs

    self.do_lower_case = do_lower_case
    self.remove_space = remove_space
    self.keep_accents = keep_accents
    self.vocab_file = vocab_file

    self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
    self.sp_model.Load(vocab_file)

    super().__init__(
        do_lower_case=do_lower_case,
        remove_space=remove_space,
        keep_accents=keep_accents,
        bos_token=bos_token,
        eos_token=eos_token,
        unk_token=unk_token,
        sep_token=sep_token,
        pad_token=pad_token,
        cls_token=cls_token,
        mask_token=mask_token,
        additional_special_tokens=additional_special_tokens,
        sp_model_kwargs=self.sp_model_kwargs,
        **kwargs,
    )

    self._pad_token_type_id = 3

mindnlp.transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.__setstate__(d)

This method setstate is defined in the class XLNetTokenizer and is used to set the internal state of the object based on the provided dictionary 'd'.

PARAMETER DESCRIPTION
self

The instance of the XLNetTokenizer class.

TYPE: XLNetTokenizer

d

A dictionary containing the state information to be set. The keys and values in the dictionary are used to update the internal state of the XLNetTokenizer object.

TYPE: dict

RETURNS DESCRIPTION

None. This method does not return any value.

RAISES DESCRIPTION
None

However, potential exceptions that could be raised include:

  • AttributeError: If the 'sp_model_kwargs' attribute is not found within the XLNetTokenizer object.
  • TypeError: If the provided 'd' parameter is not a dictionary.
  • Other exceptions related to the SentencePieceProcessor object creation or loading process may be raised from the spm.SentencePieceProcessor forwardor or Load method.
Source code in mindnlp\transformers\models\xlnet\tokenization_xlnet.py
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
def __setstate__(self, d):
    """
    This method __setstate__ is defined in the class XLNetTokenizer and is used to set the internal state of the
    object based on the provided dictionary 'd'.

    Args:
        self (XLNetTokenizer): The instance of the XLNetTokenizer class.
        d (dict): A dictionary containing the state information to be set. The keys and values in the dictionary
            are used to update the internal state of the XLNetTokenizer object.

    Returns:
        None. This method does not return any value.

    Raises:
        None:
            However, potential exceptions that could be raised include:

            - AttributeError: If the 'sp_model_kwargs' attribute is not found within the XLNetTokenizer object.
            - TypeError: If the provided 'd' parameter is not a dictionary.
            - Other exceptions related to the SentencePieceProcessor object creation or loading process may be
            raised from the spm.SentencePieceProcessor forwardor or Load method.
    """
    self.__dict__ = d

    # for backward compatibility
    if not hasattr(self, "sp_model_kwargs"):
        self.sp_model_kwargs = {}

    self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
    self.sp_model.Load(self.vocab_file)

mindnlp.transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.build_inputs_with_special_tokens(token_ids_0, token_ids_1=None)

Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLNet sequence has the following format:

  • single sequence: X <sep> <cls>
  • pair of sequences: A <sep> B <sep> <cls>
PARAMETER DESCRIPTION
token_ids_0

List of IDs to which the special tokens will be added.

TYPE: `List[int]`

token_ids_1

Optional second list of IDs for sequence pairs.

TYPE: `List[int]`, *optional* DEFAULT: None

RETURNS DESCRIPTION
List[int]

List[int]: List of input IDs with the appropriate special tokens.

Source code in mindnlp\transformers\models\xlnet\tokenization_xlnet.py
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
def build_inputs_with_special_tokens(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
    """
    Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
    adding special tokens. An XLNet sequence has the following format:

    - single sequence: `X <sep> <cls>`
    - pair of sequences: `A <sep> B <sep> <cls>`

    Args:
        token_ids_0 (`List[int]`):
            List of IDs to which the special tokens will be added.
        token_ids_1 (`List[int]`, *optional*):
            Optional second list of IDs for sequence pairs.

    Returns:
        `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
    """
    sep = [self.sep_token_id]
    cls = [self.cls_token_id]
    if token_ids_1 is None:
        return token_ids_0 + sep + cls
    return token_ids_0 + sep + token_ids_1 + sep + cls

mindnlp.transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.convert_tokens_to_string(tokens)

Converts a sequence of tokens (strings for sub-words) in a single string.

Source code in mindnlp\transformers\models\xlnet\tokenization_xlnet.py
368
369
370
371
def convert_tokens_to_string(self, tokens):
    """Converts a sequence of tokens (strings for sub-words) in a single string."""
    out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
    return out_string

mindnlp.transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.create_token_type_ids_from_sequences(token_ids_0, token_ids_1=None)

Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet sequence pair mask has the following format:

0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence    | second sequence |

If token_ids_1 is None, this method only returns the first portion of the mask (0s).

PARAMETER DESCRIPTION
token_ids_0

List of IDs.

TYPE: `List[int]`

token_ids_1

Optional second list of IDs for sequence pairs.

TYPE: `List[int]`, *optional* DEFAULT: None

RETURNS DESCRIPTION
List[int]

List[int]: List of token type IDs according to the given sequence(s).

Source code in mindnlp\transformers\models\xlnet\tokenization_xlnet.py
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
def create_token_type_ids_from_sequences(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
    """
    Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet
    sequence pair mask has the following format:
    ```
    0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
    | first sequence    | second sequence |
    ```

    If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).

    Args:
        token_ids_0 (`List[int]`):
            List of IDs.
        token_ids_1 (`List[int]`, *optional*):
            Optional second list of IDs for sequence pairs.

    Returns:
        `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
    """
    sep = [self.sep_token_id]
    cls_segment_id = [2]

    if token_ids_1 is None:
        return len(token_ids_0 + sep) * [0] + cls_segment_id
    return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id

mindnlp.transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.get_special_tokens_mask(token_ids_0, token_ids_1=None, already_has_special_tokens=False)

Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer prepare_for_model method.

PARAMETER DESCRIPTION
token_ids_0

List of IDs.

TYPE: `List[int]`

token_ids_1

Optional second list of IDs for sequence pairs.

TYPE: `List[int]`, *optional* DEFAULT: None

already_has_special_tokens

Whether or not the token list is already formatted with special tokens for the model.

TYPE: `bool`, *optional*, defaults to `False` DEFAULT: False

RETURNS DESCRIPTION
List[int]

List[int]: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.

Source code in mindnlp\transformers\models\xlnet\tokenization_xlnet.py
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
def get_special_tokens_mask(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None,
        already_has_special_tokens: bool = False
) -> List[int]:
    """
    Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
    special tokens using the tokenizer `prepare_for_model` method.

    Args:
        token_ids_0 (`List[int]`):
            List of IDs.
        token_ids_1 (`List[int]`, *optional*):
            Optional second list of IDs for sequence pairs.
        already_has_special_tokens (`bool`, *optional*, defaults to `False`):
            Whether or not the token list is already formatted with special tokens for the model.

    Returns:
        `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
    """
    if already_has_special_tokens:
        return super().get_special_tokens_mask(
            token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
        )

    if token_ids_1 is not None:
        return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1, 1]
    return ([0] * len(token_ids_0)) + [1, 1]

mindnlp.transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.get_vocab()

Returns the vocabulary of the XLNetTokenizer.

PARAMETER DESCRIPTION
self

The instance of the XLNetTokenizer class.

RETURNS DESCRIPTION
dict

A dictionary containing the vocabulary of the XLNetTokenizer. The keys of the dictionary are the tokens, and the values are their corresponding indices.

Example
>>> tokenizer = XLNetTokenizer()
>>> tokenizer.get_vocab()
{'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3, '<mask>': 4, 'hello': 5, 'world': 6}
Source code in mindnlp\transformers\models\xlnet\tokenization_xlnet.py
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
def get_vocab(self):
    """
    Returns the vocabulary of the XLNetTokenizer.

    Args:
        self: The instance of the XLNetTokenizer class.

    Returns:
        dict:
            A dictionary containing the vocabulary of the XLNetTokenizer.
            The keys of the dictionary are the tokens, and the values are their corresponding indices.

    Raises:
        None.

    Example:
        ```python
        >>> tokenizer = XLNetTokenizer()
        >>> tokenizer.get_vocab()
        {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3, '<mask>': 4, 'hello': 5, 'world': 6}
        ```
    """
    vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
    vocab.update(self.added_tokens_encoder)
    return vocab

mindnlp.transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.preprocess_text(inputs)

This method preprocesses the input text according to the specified configuration settings.

PARAMETER DESCRIPTION
self

The instance of the XLNetTokenizer class.

TYPE: XLNetTokenizer

inputs

The input text to be preprocessed. It should be a string representation.

TYPE: str

RETURNS DESCRIPTION
str

The preprocessed text based on the applied configuration settings.

Source code in mindnlp\transformers\models\xlnet\tokenization_xlnet.py
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
def preprocess_text(self, inputs):
    """
    This method preprocesses the input text according to the specified configuration settings.

    Args:
        self (XLNetTokenizer): The instance of the XLNetTokenizer class.
        inputs (str): The input text to be preprocessed. It should be a string representation.

    Returns:
        str: The preprocessed text based on the applied configuration settings.

    Raises:
        None
    """
    if self.remove_space:
        outputs = " ".join(inputs.strip().split())
    else:
        outputs = inputs
    outputs = outputs.replace("``", '"').replace("''", '"')

    if not self.keep_accents:
        outputs = unicodedata.normalize("NFKD", outputs)
        outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
    if self.do_lower_case:
        outputs = outputs.lower()

    return outputs

mindnlp.transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.save_vocabulary(save_directory, filename_prefix=None)

Save the vocabulary of the XLNetTokenizer.

PARAMETER DESCRIPTION
self

An instance of the XLNetTokenizer class.

TYPE: XLNetTokenizer

save_directory

The directory path where the vocabulary will be saved.

TYPE: str

filename_prefix

An optional prefix for the filename of the saved vocabulary. Defaults to None.

TYPE: Optional[str] DEFAULT: None

RETURNS DESCRIPTION
Tuple[str]

Tuple[str]: A tuple containing the path to the saved vocabulary file.

RAISES DESCRIPTION
FileNotFoundError

If the specified save_directory does not exist.

PermissionError

If the specified save_directory is not accessible for writing.

Note
  • The saved vocabulary file will be named as per the following format: '-vocab.txt' if filename_prefix is provided, otherwise 'vocab.txt'.
  • If the provided save_directory is the same as the current vocabulary file's directory and the vocabulary file already exists, it will be copied to the save_directory.
  • If the current vocabulary file does not exist, a new vocabulary file will be created in the save_directory using the serialized model from the sp_model attribute of the tokenizer.
Example
>>> tokenizer = XLNetTokenizer()
>>> save_dir = '/path/to/save'
>>> prefix = 'english'
>>> vocab_file = tokenizer.save_vocabulary(save_dir, prefix)
>>> print(f"Vocabulary saved at: {vocab_file}")
Source code in mindnlp\transformers\models\xlnet\tokenization_xlnet.py
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
    """
    Save the vocabulary of the XLNetTokenizer.

    Args:
        self (XLNetTokenizer): An instance of the XLNetTokenizer class.
        save_directory (str): The directory path where the vocabulary will be saved.
        filename_prefix (Optional[str]): An optional prefix for the filename of the saved vocabulary.
            Defaults to None.

    Returns:
        Tuple[str]: A tuple containing the path to the saved vocabulary file.

    Raises:
        FileNotFoundError: If the specified save_directory does not exist.
        PermissionError: If the specified save_directory is not accessible for writing.

    Note:
        - The saved vocabulary file will be named as per the following format:
        '<filename_prefix>-vocab.txt' if filename_prefix is provided, otherwise 'vocab.txt'.
        - If the provided save_directory is the same as the current vocabulary file's directory and
        the vocabulary file already exists, it will be copied to the save_directory.
        - If the current vocabulary file does not exist, a new vocabulary file will be created in the
        save_directory using the serialized model from the sp_model attribute of the tokenizer.

    Example:
        ```python
        >>> tokenizer = XLNetTokenizer()
        >>> save_dir = '/path/to/save'
        >>> prefix = 'english'
        >>> vocab_file = tokenizer.save_vocabulary(save_dir, prefix)
        >>> print(f"Vocabulary saved at: {vocab_file}")
        ```
    """
    if not os.path.isdir(save_directory):
        logger.error(f"Vocabulary path ({save_directory}) should be a directory")
        return
    out_vocab_file = os.path.join(
        save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
    )

    if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
        copyfile(self.vocab_file, out_vocab_file)
    elif not os.path.isfile(self.vocab_file):
        with open(out_vocab_file, "wb") as fi:
            content_spiece_model = self.sp_model.serialized_model_proto()
            fi.write(content_spiece_model)

    return (out_vocab_file,)

mindnlp.transformers.models.xlnet.tokenization_xlnet_fast

Tokenization classes for XLNet model.

mindnlp.transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast

Bases: PreTrainedTokenizerFast

Construct a "fast" XLNet tokenizer (backed by HuggingFace's tokenizers library). Based on Unigram.

This tokenizer inherits from [PreTrainedTokenizerFast] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods.

PARAMETER DESCRIPTION
vocab_file

SentencePiece file (generally has a .spm extension) that contains the vocabulary necessary to instantiate a tokenizer.

TYPE: `str` DEFAULT: None

do_lower_case

Whether to lowercase the input when tokenizing.

TYPE: `bool`, *optional*, defaults to `True` DEFAULT: False

remove_space

Whether to strip the text when tokenizing (removing excess spaces before and after the string).

TYPE: `bool`, *optional*, defaults to `True` DEFAULT: True

keep_accents

Whether to keep accents when tokenizing.

TYPE: `bool`, *optional*, defaults to `False` DEFAULT: False

bos_token

The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.

When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the cls_token.

TYPE: `str`, *optional*, defaults to `"<s>"` DEFAULT: '<s>'

eos_token

The end of sequence token.

When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the sep_token.

TYPE: `str`, *optional*, defaults to `"</s>"` DEFAULT: '</s>'

unk_token

The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead.

TYPE: `str`, *optional*, defaults to `"<unk>"` DEFAULT: '<unk>'

sep_token

The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens.

TYPE: `str`, *optional*, defaults to `"<sep>"` DEFAULT: '<sep>'

pad_token

The token used for padding, for example when batching sequences of different lengths.

TYPE: `str`, *optional*, defaults to `"<pad>"` DEFAULT: '<pad>'

cls_token

The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens.

TYPE: `str`, *optional*, defaults to `"<cls>"` DEFAULT: '<cls>'

mask_token

The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict.

TYPE: `str`, *optional*, defaults to `"<mask>"` DEFAULT: '<mask>'

additional_special_tokens

Additional special tokens used by the tokenizer.

TYPE: `List[str]`, *optional*, defaults to `["<eop>", "<eod>"]` DEFAULT: ['<eop>', '<eod>']

ATTRIBUTE DESCRIPTION
sp_model

The SentencePiece processor that is used for every conversion (string, tokens and IDs).

TYPE: `SentencePieceProcessor`

Source code in mindnlp\transformers\models\xlnet\tokenization_xlnet_fast.py
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
class XLNetTokenizerFast(PreTrainedTokenizerFast):
    """
    Construct a "fast" XLNet tokenizer (backed by HuggingFace's *tokenizers* library). Based on
    [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models).

    This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
    refer to this superclass for more information regarding those methods.

    Args:
        vocab_file (`str`):
            [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
            contains the vocabulary necessary to instantiate a tokenizer.
        do_lower_case (`bool`, *optional*, defaults to `True`):
            Whether to lowercase the input when tokenizing.
        remove_space (`bool`, *optional*, defaults to `True`):
            Whether to strip the text when tokenizing (removing excess spaces before and after the string).
        keep_accents (`bool`, *optional*, defaults to `False`):
            Whether to keep accents when tokenizing.
        bos_token (`str`, *optional*, defaults to `"<s>"`):
            The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.

            <Tip>

            When building a sequence using special tokens, this is not the token that is used for the beginning of
            sequence. The token used is the `cls_token`.

            </Tip>

        eos_token (`str`, *optional*, defaults to `"</s>"`):
            The end of sequence token.

            <Tip>

            When building a sequence using special tokens, this is not the token that is used for the end of sequence.
            The token used is the `sep_token`.

            </Tip>

        unk_token (`str`, *optional*, defaults to `"<unk>"`):
            The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
            token instead.
        sep_token (`str`, *optional*, defaults to `"<sep>"`):
            The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
            sequence classification or for a text and a question for question answering. It is also used as the last
            token of a sequence built with special tokens.
        pad_token (`str`, *optional*, defaults to `"<pad>"`):
            The token used for padding, for example when batching sequences of different lengths.
        cls_token (`str`, *optional*, defaults to `"<cls>"`):
            The classifier token which is used when doing sequence classification (classification of the whole sequence
            instead of per-token classification). It is the first token of the sequence when built with special tokens.
        mask_token (`str`, *optional*, defaults to `"<mask>"`):
            The token used for masking values. This is the token used when training this model with masked language
            modeling. This is the token which the model will try to predict.
        additional_special_tokens (`List[str]`, *optional*, defaults to `["<eop>", "<eod>"]`):
            Additional special tokens used by the tokenizer.

    Attributes:
        sp_model (`SentencePieceProcessor`):
            The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
    """
    vocab_files_names = VOCAB_FILES_NAMES
    pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
    max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
    padding_side = "left"
    slow_tokenizer_class = XLNetTokenizer

    def __init__(
        self,
        vocab_file=None,
        tokenizer_file=None,
        do_lower_case=False,
        remove_space=True,
        keep_accents=False,
        bos_token="<s>",
        eos_token="</s>",
        unk_token="<unk>",
        sep_token="<sep>",
        pad_token="<pad>",
        cls_token="<cls>",
        mask_token="<mask>",
        additional_special_tokens=["<eop>", "<eod>"],
        **kwargs,
    ):
        """
        __init__

        Initializes an instance of the XLNetTokenizerFast class.

        Args:
            self: The instance of the class.
            vocab_file (str, optional): The path to the vocabulary file. Defaults to None.
            tokenizer_file (str, optional): The path to the tokenizer file. Defaults to None.
            do_lower_case (bool, optional): Whether to convert tokens to lowercase. Defaults to False.
            remove_space (bool, optional): Whether to remove spaces from tokens. Defaults to True.
            keep_accents (bool, optional): Whether to keep accents in tokens. Defaults to False.
            bos_token (str, optional): The beginning of sentence token. Defaults to '<s>'.
            eos_token (str, optional): The end of sentence token. Defaults to '</s>'.
            unk_token (str, optional): The unknown token. Defaults to '<unk>'.
            sep_token (str, optional): The separator token. Defaults to '<sep>'.
            pad_token (str, optional): The padding token. Defaults to '<pad>'.
            cls_token (str, optional): The classification token. Defaults to '<cls>'.
            mask_token (str or AddedToken, optional): The mask token. Defaults to '<mask>'.
            additional_special_tokens (list, optional): Additional special tokens. Defaults to ['<eop>', '<eod>'].
            **kwargs: Additional keyword arguments.

        Returns:
            None.

        Raises:
            ValueError: If invalid input is provided for the parameters.
            TypeError: If the input type for the parameters is incorrect.
        """
        # Mask token behave like a normal word, i.e. include the space before it
        mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token

        super().__init__(
            vocab_file=vocab_file,
            tokenizer_file=tokenizer_file,
            do_lower_case=do_lower_case,
            remove_space=remove_space,
            keep_accents=keep_accents,
            bos_token=bos_token,
            eos_token=eos_token,
            unk_token=unk_token,
            sep_token=sep_token,
            pad_token=pad_token,
            cls_token=cls_token,
            mask_token=mask_token,
            additional_special_tokens=additional_special_tokens,
            **kwargs,
        )

        self._pad_token_type_id = 3
        self.do_lower_case = do_lower_case
        self.remove_space = remove_space
        self.keep_accents = keep_accents
        self.vocab_file = vocab_file

    @property
    def can_save_slow_tokenizer(self) -> bool:
        """
        Checks if the slow tokenizer can be saved.

        Args:
            self (XLNetTokenizerFast): An instance of the XLNetTokenizerFast class.

        Returns:
            bool: Returns True if the slow tokenizer can be saved, False otherwise.

        Raises:
            None.

        The 'can_save_slow_tokenizer' method checks if the slow tokenizer can be saved by verifying the existence of
        the vocabulary file. It returns a boolean value indicating whether the slow tokenizer can be saved or not.
        If the 'vocab_file' attribute is not set or if the file does not exist, the method returns False. Otherwise,
        it returns True.

        Note that this method does not raise any exceptions.
        """
        return os.path.isfile(self.vocab_file) if self.vocab_file else False

    def build_inputs_with_special_tokens(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
    ) -> List[int]:
        """
        Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
        adding special tokens. An XLNet sequence has the following format:

        - single sequence: `X <sep> <cls>`
        - pair of sequences: `A <sep> B <sep> <cls>`

        Args:
            token_ids_0 (`List[int]`):
                List of IDs to which the special tokens will be added.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
        """
        sep = [self.sep_token_id]
        cls = [self.cls_token_id]
        if token_ids_1 is None:
            return token_ids_0 + sep + cls
        return token_ids_0 + sep + token_ids_1 + sep + cls

    def create_token_type_ids_from_sequences(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
    ) -> List[int]:
        """
        Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet
        sequence pair mask has the following format:

        ```
        0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
        | first sequence    | second sequence |
        ```

        If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).

        Args:
            token_ids_0 (`List[int]`):
                List of IDs.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
        """
        sep = [self.sep_token_id]
        cls_segment_id = [2]

        if token_ids_1 is None:
            return len(token_ids_0 + sep) * [0] + cls_segment_id
        return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id

    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
        """Save the vocabulary for a fast tokenizer to a specified directory.

        Args:
            self (XLNetTokenizerFast): The instance of the XLNetTokenizerFast class.
            save_directory (str): The directory path where the vocabulary will be saved.
            filename_prefix (Optional[str]): A prefix for the filename. Defaults to None. 

        Returns:
            Tuple[str]: A tuple containing the path to the saved vocabulary file.

        Raises:
            ValueError: If the fast tokenizer does not have the necessary information to save the vocabulary for
                a slow tokenizer.
            OSError: If the save_directory does not exist or is not a valid directory.
            IOError: If an error occurs while copying the vocabulary file to the specified directory.
        """
        if not self.can_save_slow_tokenizer:
            raise ValueError(
                "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
                "tokenizer."
            )

        if not os.path.isdir(save_directory):
            logger.error(f"Vocabulary path ({save_directory}) should be a directory")
            return
        out_vocab_file = os.path.join(
            save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
        )

        if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
            copyfile(self.vocab_file, out_vocab_file)

        return (out_vocab_file,)

mindnlp.transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.can_save_slow_tokenizer: bool property

Checks if the slow tokenizer can be saved.

PARAMETER DESCRIPTION
self

An instance of the XLNetTokenizerFast class.

TYPE: XLNetTokenizerFast

RETURNS DESCRIPTION
bool

Returns True if the slow tokenizer can be saved, False otherwise.

TYPE: bool

The 'can_save_slow_tokenizer' method checks if the slow tokenizer can be saved by verifying the existence of the vocabulary file. It returns a boolean value indicating whether the slow tokenizer can be saved or not. If the 'vocab_file' attribute is not set or if the file does not exist, the method returns False. Otherwise, it returns True.

Note that this method does not raise any exceptions.

mindnlp.transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.__init__(vocab_file=None, tokenizer_file=None, do_lower_case=False, remove_space=True, keep_accents=False, bos_token='<s>', eos_token='</s>', unk_token='<unk>', sep_token='<sep>', pad_token='<pad>', cls_token='<cls>', mask_token='<mask>', additional_special_tokens=['<eop>', '<eod>'], **kwargs)

init

Initializes an instance of the XLNetTokenizerFast class.

PARAMETER DESCRIPTION
self

The instance of the class.

vocab_file

The path to the vocabulary file. Defaults to None.

TYPE: str DEFAULT: None

tokenizer_file

The path to the tokenizer file. Defaults to None.

TYPE: str DEFAULT: None

do_lower_case

Whether to convert tokens to lowercase. Defaults to False.

TYPE: bool DEFAULT: False

remove_space

Whether to remove spaces from tokens. Defaults to True.

TYPE: bool DEFAULT: True

keep_accents

Whether to keep accents in tokens. Defaults to False.

TYPE: bool DEFAULT: False

bos_token

The beginning of sentence token. Defaults to ''.

TYPE: str DEFAULT: '<s>'

eos_token

The end of sentence token. Defaults to ''.

TYPE: str DEFAULT: '</s>'

unk_token

The unknown token. Defaults to ''.

TYPE: str DEFAULT: '<unk>'

sep_token

The separator token. Defaults to ''.

TYPE: str DEFAULT: '<sep>'

pad_token

The padding token. Defaults to ''.

TYPE: str DEFAULT: '<pad>'

cls_token

The classification token. Defaults to ''.

TYPE: str DEFAULT: '<cls>'

mask_token

The mask token. Defaults to ''.

TYPE: str or AddedToken DEFAULT: '<mask>'

additional_special_tokens

Additional special tokens. Defaults to ['', ''].

TYPE: list DEFAULT: ['<eop>', '<eod>']

**kwargs

Additional keyword arguments.

DEFAULT: {}

RETURNS DESCRIPTION

None.

RAISES DESCRIPTION
ValueError

If invalid input is provided for the parameters.

TypeError

If the input type for the parameters is incorrect.

Source code in mindnlp\transformers\models\xlnet\tokenization_xlnet_fast.py
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
def __init__(
    self,
    vocab_file=None,
    tokenizer_file=None,
    do_lower_case=False,
    remove_space=True,
    keep_accents=False,
    bos_token="<s>",
    eos_token="</s>",
    unk_token="<unk>",
    sep_token="<sep>",
    pad_token="<pad>",
    cls_token="<cls>",
    mask_token="<mask>",
    additional_special_tokens=["<eop>", "<eod>"],
    **kwargs,
):
    """
    __init__

    Initializes an instance of the XLNetTokenizerFast class.

    Args:
        self: The instance of the class.
        vocab_file (str, optional): The path to the vocabulary file. Defaults to None.
        tokenizer_file (str, optional): The path to the tokenizer file. Defaults to None.
        do_lower_case (bool, optional): Whether to convert tokens to lowercase. Defaults to False.
        remove_space (bool, optional): Whether to remove spaces from tokens. Defaults to True.
        keep_accents (bool, optional): Whether to keep accents in tokens. Defaults to False.
        bos_token (str, optional): The beginning of sentence token. Defaults to '<s>'.
        eos_token (str, optional): The end of sentence token. Defaults to '</s>'.
        unk_token (str, optional): The unknown token. Defaults to '<unk>'.
        sep_token (str, optional): The separator token. Defaults to '<sep>'.
        pad_token (str, optional): The padding token. Defaults to '<pad>'.
        cls_token (str, optional): The classification token. Defaults to '<cls>'.
        mask_token (str or AddedToken, optional): The mask token. Defaults to '<mask>'.
        additional_special_tokens (list, optional): Additional special tokens. Defaults to ['<eop>', '<eod>'].
        **kwargs: Additional keyword arguments.

    Returns:
        None.

    Raises:
        ValueError: If invalid input is provided for the parameters.
        TypeError: If the input type for the parameters is incorrect.
    """
    # Mask token behave like a normal word, i.e. include the space before it
    mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token

    super().__init__(
        vocab_file=vocab_file,
        tokenizer_file=tokenizer_file,
        do_lower_case=do_lower_case,
        remove_space=remove_space,
        keep_accents=keep_accents,
        bos_token=bos_token,
        eos_token=eos_token,
        unk_token=unk_token,
        sep_token=sep_token,
        pad_token=pad_token,
        cls_token=cls_token,
        mask_token=mask_token,
        additional_special_tokens=additional_special_tokens,
        **kwargs,
    )

    self._pad_token_type_id = 3
    self.do_lower_case = do_lower_case
    self.remove_space = remove_space
    self.keep_accents = keep_accents
    self.vocab_file = vocab_file

mindnlp.transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.build_inputs_with_special_tokens(token_ids_0, token_ids_1=None)

Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLNet sequence has the following format:

  • single sequence: X <sep> <cls>
  • pair of sequences: A <sep> B <sep> <cls>
PARAMETER DESCRIPTION
token_ids_0

List of IDs to which the special tokens will be added.

TYPE: `List[int]`

token_ids_1

Optional second list of IDs for sequence pairs.

TYPE: `List[int]`, *optional* DEFAULT: None

RETURNS DESCRIPTION
List[int]

List[int]: List of input IDs with the appropriate special tokens.

Source code in mindnlp\transformers\models\xlnet\tokenization_xlnet_fast.py
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
def build_inputs_with_special_tokens(
    self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
    """
    Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
    adding special tokens. An XLNet sequence has the following format:

    - single sequence: `X <sep> <cls>`
    - pair of sequences: `A <sep> B <sep> <cls>`

    Args:
        token_ids_0 (`List[int]`):
            List of IDs to which the special tokens will be added.
        token_ids_1 (`List[int]`, *optional*):
            Optional second list of IDs for sequence pairs.

    Returns:
        `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
    """
    sep = [self.sep_token_id]
    cls = [self.cls_token_id]
    if token_ids_1 is None:
        return token_ids_0 + sep + cls
    return token_ids_0 + sep + token_ids_1 + sep + cls

mindnlp.transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.create_token_type_ids_from_sequences(token_ids_0, token_ids_1=None)

Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet sequence pair mask has the following format:

0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence    | second sequence |

If token_ids_1 is None, this method only returns the first portion of the mask (0s).

PARAMETER DESCRIPTION
token_ids_0

List of IDs.

TYPE: `List[int]`

token_ids_1

Optional second list of IDs for sequence pairs.

TYPE: `List[int]`, *optional* DEFAULT: None

RETURNS DESCRIPTION
List[int]

List[int]: List of token type IDs according to the given sequence(s).

Source code in mindnlp\transformers\models\xlnet\tokenization_xlnet_fast.py
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
def create_token_type_ids_from_sequences(
    self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
    """
    Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet
    sequence pair mask has the following format:

    ```
    0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
    | first sequence    | second sequence |
    ```

    If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).

    Args:
        token_ids_0 (`List[int]`):
            List of IDs.
        token_ids_1 (`List[int]`, *optional*):
            Optional second list of IDs for sequence pairs.

    Returns:
        `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
    """
    sep = [self.sep_token_id]
    cls_segment_id = [2]

    if token_ids_1 is None:
        return len(token_ids_0 + sep) * [0] + cls_segment_id
    return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id

mindnlp.transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.save_vocabulary(save_directory, filename_prefix=None)

Save the vocabulary for a fast tokenizer to a specified directory.

PARAMETER DESCRIPTION
self

The instance of the XLNetTokenizerFast class.

TYPE: XLNetTokenizerFast

save_directory

The directory path where the vocabulary will be saved.

TYPE: str

filename_prefix

A prefix for the filename. Defaults to None.

TYPE: Optional[str] DEFAULT: None

RETURNS DESCRIPTION
Tuple[str]

Tuple[str]: A tuple containing the path to the saved vocabulary file.

RAISES DESCRIPTION
ValueError

If the fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.

OSError

If the save_directory does not exist or is not a valid directory.

IOError

If an error occurs while copying the vocabulary file to the specified directory.

Source code in mindnlp\transformers\models\xlnet\tokenization_xlnet_fast.py
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
    """Save the vocabulary for a fast tokenizer to a specified directory.

    Args:
        self (XLNetTokenizerFast): The instance of the XLNetTokenizerFast class.
        save_directory (str): The directory path where the vocabulary will be saved.
        filename_prefix (Optional[str]): A prefix for the filename. Defaults to None. 

    Returns:
        Tuple[str]: A tuple containing the path to the saved vocabulary file.

    Raises:
        ValueError: If the fast tokenizer does not have the necessary information to save the vocabulary for
            a slow tokenizer.
        OSError: If the save_directory does not exist or is not a valid directory.
        IOError: If an error occurs while copying the vocabulary file to the specified directory.
    """
    if not self.can_save_slow_tokenizer:
        raise ValueError(
            "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
            "tokenizer."
        )

    if not os.path.isdir(save_directory):
        logger.error(f"Vocabulary path ({save_directory}) should be a directory")
        return
    out_vocab_file = os.path.join(
        save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
    )

    if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
        copyfile(self.vocab_file, out_vocab_file)

    return (out_vocab_file,)