B 3gd@sdZddlZddlZddlZddlZddlZddlmZddlm Z ddl m Z m Z m Z mZmZmZmZmZddlZddlmZddlmZdd lmZmZmZmZmZmZm Z erddl!Z"erddl#Z#e$e%Z&e'd Z(e'd Z)e*Z+e e*Z,e e'Z-ee*e*fZ.ee e*e e*fZ/ee e'e e'fZ0d Z1d Z2dZ3dZ4Gddde Z5Gddde5Z6Gddde5Z7Gddde5Z8GdddeZ9GdddeZ:GdddeZ;GdddZGd"d#d#e<Z?dS)$a0 Base classes common to both the slow and the fast tokenization classes: PreTrainedTokenizerBase (host all the user fronting encoding methodes) Special token mixing (host the special tokens logic) and BatchEncoding (wrap the dictionnary of output with special method for the Fast tokenizers) N)UserDict)Enum)AnyDictList NamedTupleOptionalSequenceTupleUnion) AddedToken)Encoding)add_end_docstrings cached_path hf_bucket_url is_remote_urlis_tf_availableis_torch_availabletorch_requiredgꌠ9Y>)Fg@xDzspecial_tokens_map.jsonzadded_tokens.jsonztokenizer_config.jsonztokenizer.jsonc@seZdZdZeddZdS) ExplicitEnumz? Enum with more explicit error message for missing values. cCs&td||jtt|jfdS)Nz-%r is not a valid %s, please select one of %s) ValueError__name__strlist_value2member_map_keys)clsvaluer7/cluster/home2/cyx/elia/bert/tokenization_utils_base.py _missing_NszExplicitEnum._missing_N)r __module__ __qualname____doc__ classmethodr!rrrr rJsrc@seZdZdZdZdZdZdS)TruncationStrategyZ only_firstZ only_second longest_firstdo_not_truncateN)rr"r# ONLY_FIRST ONLY_SECOND LONGEST_FIRSTDO_NOT_TRUNCATErrrr r&Vsr&c@seZdZdZdZdZdS)PaddingStrategylongest max_lengthZ do_not_padN)rr"r#LONGEST MAX_LENGTH DO_NOT_PADrrrr r-]sr-c@seZdZdZdZdZdS) TensorTypepttfnpN)rr"r#PYTORCH TENSORFLOWNUMPYrrrr r3csr3c@s"eZdZUdZeed<eed<dS)CharSpanz Character span in the original string Args: start: index of the first character in the original string end: index of the character following the last character in the original string startendN)rr"r#r$int__annotations__rrrr r:is r:c@s"eZdZUdZeed<eed<dS) TokenSpanz Token span in an encoded string (list of tokens) Args: start: index of the first token in the span end: index of the token following the last token in the span r;r<N)rr"r#r$r=r>rrrr r?us r?cseZdZdZd7eeeefeee e e fedee fe dfdd Z eddZeeefe d d d Zed d dZddZddZddZddZddZeeee dddZd8eeedddZd9eeeedd d!Zd:eeeed"d#d$Zd;eeeed%d&d'Zdeeeed%d-d.Z"d?eeeed*d/d0Z#d@edee fe d1d2d3Z$e%ed4d5d6Z&Z'S)A BatchEncodinga BatchEncoding hold the output of the encode and batch_encode methods (tokens, attention_masks, etc). This class is derived from a python Dictionary and can be used as a dictionnary. In addition, this class expose utility methods to map from word/char space to token space. Args: data (:obj:`dict`): Dictionary of lists/arrays returned by the encode/batch_encode methods ('input_ids', 'attention_mask'...) encoding (:obj:`EncodingFast`, :obj:`list(EncodingFast)`, `optional`, defaults to :obj:`None`): If the tokenizer is a fast tokenizer which outputs additional informations like mapping from word/char space to token space the `EncodingFast` instance or list of instance (for batches) hold these informations. tensor_type (:obj:`Union[None, str, TensorType]`, `optional`, defaults to :obj:`None`): You can give a tensor_type here to convert the lists of integers in PyTorch/TF/Numpy Tensors at initialization prepend_batch_axis (:obj:`bool`, `optional`, defaults to :obj:`False`): Set to True to add a batch axis when converting in Tensors (see :obj:`tensor_type` above) NF)dataencoding tensor_typeprepend_batch_axiscs4t|t|tr|g}||_|j||ddS)N)rCrD)super__init__ isinstance EncodingFast _encodingsconvert_to_tensors)selfrArBrCrD) __class__rr rFs   zBatchEncoding.__init__cCs |jdk S)z Indicate if this BatchEncoding was generated from the result of a PreTrainedTokenizerFast Returns: True if generated from subclasses of PreTrainedTokenizerFast, else otherwise N)rI)rKrrr is_fastszBatchEncoding.is_fast)itemreturncCs4t|tr|j|S|jdk r(|j|StddS)z If the key is a string, get the value of the dict associated to `key` ('input_ids', 'attention_mask'...) If the key is an integer, get the EncodingFast for batch item with index `key` NzIndexing with integers (to access backend Encoding for a given batch index) is not available when using Python based tokenizers)rGrrArIKeyError)rKrNrrr __getitem__s     zBatchEncoding.__getitem__)rNcCs(y |j|Stk r"tYnXdS)N)rArPAttributeError)rKrNrrr __getattr__s zBatchEncoding.__getattr__cCs|j|jdS)N)rA encodings)rArI)rKrrr __getstate__szBatchEncoding.__getstate__cCs(d|kr|d|_d|kr$|d|_dS)NrArT)rArI)rKstaterrr __setstate__s zBatchEncoding.__setstate__cCs |jS)N)rAr)rKrrr rszBatchEncoding.keyscCs |jS)N)rAvalues)rKrrr rXszBatchEncoding.valuescCs |jS)N)rAitems)rKrrr rYszBatchEncoding.items)rOcCs|jS)z Return the list all encoding from the tokenization process Returns: List[EncodingFast] or None if input was tokenized through Python (i.e. not fast) tokenizer )rI)rKrrr rTszBatchEncoding.encodingsr) batch_indexrOcCs|jstd|j|jS)Nz sz$BatchEncoding.to..)rArY)rKrvr)rvr rw szBatchEncoding.to)NNNF)r)r)N)N)N)N)N)N)F)(rr"r#r$rrrrr rHr r3boolrFpropertyrMr=rQrSrUrWrrXrYrrTr[r\rar?rdr:rerhrirjrJrrw __classcell__rr)rLr r@s4@ )1)$)%-r@c@seZdZdZdddddddd gZdLd d Zed ddZee e e e ffedddZ dMe e e e e e e fedddZeddZeddZeddZeddZedd Zed!d"Zed#d$Zed%d&Zejd'dZejd(dZejd)dZejd*dZejd+d Zejd,d"Zejd-d$Zejd.d&Zed/d0Zed1d2Zed3d4Zed5d6Zed7d8Zed9d:Zed;d<Zed=d>Z ed?d@Z!edAdBZ"edCdDZ#edEdFZ$edGdHZ%edIdJZ&dKS)NSpecialTokensMixinai SpecialTokensMixin is derived by ``PreTrainedTokenizer`` and ``PreTrainedTokenizerFast`` and handles specific behaviors related to special tokens. In particular, this class hold the attributes which can be used to directly access to these special tokens in a model-independant manner and allow to set and update the special tokens. bos_token eos_token unk_token sep_token pad_token cls_token mask_tokenadditional_special_tokensTcKsd|_d|_d|_d|_d|_d|_d|_d|_g|_||_ x| D]|\}}||j krF|dkrt |t tfrtdd|Dstt|||qFt |ttfrt|||qFtd|t|qFWdS)Nrrcss|]}t|tVqdS)N)rGr)rxtrrr Ysz.SpecialTokensMixin.__init__..z?special token {} has to be either str or AddedToken but got: {}) _bos_token _eos_token _unk_token _sep_token _pad_token _cls_token _mask_token_pad_token_type_id_additional_special_tokensverboserYSPECIAL_TOKENS_ATTRIBUTESrGrtupleallAssertionErrorsetattrrr TypeErrorrqtype)rKrkwargsrurrrr rFGs& $zSpecialTokensMixin.__init__)rOcCs|j|jddS)a4 Make sure that all the special tokens attributes of the tokenizer (tokenizer.mask_token, tokenizer.cls_token, ...) are in the vocabulary. Add the missing ones to the vocabulary if needed. Return: Number of tokens added in the vocaulary during the operation. T)special_tokens) add_tokensall_special_tokens_extended)rKrrr sanitize_special_tokensbsz*SpecialTokensMixin.sanitize_special_tokens)special_tokens_dictrOcCs|sdSd}x|D]\}}||jks,t|jr@td||t||||dkrt|tt frtt dd|Dstd|d|d||j |d d 7}qt|t t fstd |d|d ||j |gd d 7}qW|S) a Add a dictionary of special tokens (eos, pad, cls...) to the encoder and link them to class attributes. If special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the current vocabulary). Using `add_special_tokens` will ensure your special tokens can be used in several ways: - special tokens are carefully handled by the tokenizer (they are never split) - you can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This makes it easy to develop model-agnostic training and fine-tuning scripts. When possible, special tokens are already registered for provided pretrained models (ex: BertTokenizer cls_token is already registered to be '[CLS]' and XLM's one is also registered to be '') Args: special_tokens_dict: dict of string. Keys should be in the list of predefined special attributes: [``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``]. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them). Returns: Number of tokens added to the vocabulary. Examples:: # Let's see how to add a new classification token to GPT-2 tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = GPT2Model.from_pretrained('gpt2') special_tokens_dict = {'cls_token': ''} num_added_toks = tokenizer.add_special_tokens(special_tokens_dict) print('We have added', num_added_toks, 'tokens') model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer. assert tokenizer.cls_token == '' rz+Assigning %s to the %s key of the tokenizerrcss|]}t|ttfVqdS)N)rGrr )rxrrrr rsz8SpecialTokensMixin.add_special_tokens..zTokens z for key z* should all be str or AddedToken instancesT)rzToken z* should be a str or an AddedToken instance)rYrrrloggerinforrGrrrrrr )rKr added_tokensrurrrr add_special_tokensks$% z%SpecialTokensMixin.add_special_tokensF) new_tokensrOcCs*|sdSt|ttfs|g}|j||dS)a Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to it with indices starting from length of the current vocabulary. Args: new_tokens: string or list of string or :class:`~transformers.AddedToken`. Each string is a token to add. Tokens are only added if they are not already in the vocabulary. AddedToken wrap a string token to let you personnalize it's behavior (Whether this token should only match against single word, whether this token should strip all potential whitespaces on the left side, Whether this token should strip all potential whitespaces on the right side...). special_token: can be used to specify if the token is a special token. This mostly change the normalization behavior (special tokens like CLS or [MASK] are usually not lower-cased for instance) See details for :class:`~transformers.AddedToken` in HuggingFace tokenizers library. Returns: Number of tokens added to the vocabulary. Examples:: # Let's see how to increase the vocabulary of Bert model and tokenizer tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2']) print('We have added', num_added_toks, 'tokens') model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer. r)r)rGrr _add_tokens)rKrrrrr rs zSpecialTokensMixin.add_tokenscCs(|jdkr|jrtddSt|jS)zW Beginning of sentence token (string). Log an error if used while not having been set. Nz'Using bos_token, but it is not set yet.)rrrerrorr)rKrrr rs zSpecialTokensMixin.bos_tokencCs(|jdkr|jrtddSt|jS)zQ End of sentence token (string). Log an error if used while not having been set. Nz'Using eos_token, but it is not set yet.)rrrrr)rKrrr rs zSpecialTokensMixin.eos_tokencCs(|jdkr|jrtddSt|jS)zI Unknown token (string). Log an error if used while not having been set. Nz'Using unk_token, but it is not set yet.)rrrrr)rKrrr rs zSpecialTokensMixin.unk_tokencCs(|jdkr|jrtddSt|jS)z Separation token (string). E.g. separate context and query in an input sequence. Log an error if used while not having been set. Nz'Using sep_token, but it is not set yet.)rrrrr)rKrrr rs zSpecialTokensMixin.sep_tokencCs(|jdkr|jrtddSt|jS)zI Padding token (string). Log an error if used while not having been set. Nz'Using pad_token, but it is not set yet.)rrrrr)rKrrr rs zSpecialTokensMixin.pad_tokencCs(|jdkr|jrtddSt|jS)z Classification token (string). E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. Nz'Using cls_token, but it is not set yet.)rrrrr)rKrrr rs zSpecialTokensMixin.cls_tokencCs(|jdkr|jrtddSt|jS)z Mask token (string). E.g. when training a model with masked-language modeling. Log an error if used while not having been set. Nz(Using mask_token, but it is not set yet.)rrrrr)rKrrr rs zSpecialTokensMixin.mask_tokencCs.|jdkr|jrtddSdd|jDS)zz All the additional special tokens you may want to use (list of strings). Log an error if used while not having been set. Nz7Using additional_special_tokens, but it is not set yet.cSsg|] }t|qSr)r)rxtokrrr sz@SpecialTokensMixin.additional_special_tokens..)rrrr)rKrrr rs z,SpecialTokensMixin.additional_special_tokenscCs ||_dS)N)r)rKrrrr r scCs ||_dS)N)r)rKrrrr rscCs ||_dS)N)r)rKrrrr rscCs ||_dS)N)r)rKrrrr rscCs ||_dS)N)r)rKrrrr rscCs ||_dS)N)r)rKrrrr r!scCs ||_dS)N)r)rKrrrr r%scCs ||_dS)N)r)rKrrrr r)scCs|jdkrdS||jS)zj Id of the beginning of sentence token in the vocabulary. Log an error if used while not having been set. N)rconvert_tokens_to_idsr)rKrrr bos_token_id-s zSpecialTokensMixin.bos_token_idcCs|jdkrdS||jS)zd Id of the end of sentence token in the vocabulary. Log an error if used while not having been set. N)rrr)rKrrr eos_token_id4s zSpecialTokensMixin.eos_token_idcCs|jdkrdS||jS)z\ Id of the unknown token in the vocabulary. Log an error if used while not having been set. N)rrr)rKrrr unk_token_id;s zSpecialTokensMixin.unk_token_idcCs|jdkrdS||jS)z Id of the separation token in the vocabulary. E.g. separate context and query in an input sequence. Log an error if used while not having been set. N)rrr)rKrrr sep_token_idBs zSpecialTokensMixin.sep_token_idcCs|jdkrdS||jS)z\ Id of the padding token in the vocabulary. Log an error if used while not having been set. N)rrr)rKrrr pad_token_idIs zSpecialTokensMixin.pad_token_idcCs|jS)z0 Id of the padding token type in the vocabulary.)r)rKrrr pad_token_type_idPsz$SpecialTokensMixin.pad_token_type_idcCs|jdkrdS||jS)z Id of the classification token in the vocabulary. E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. N)rrr)rKrrr cls_token_idUs zSpecialTokensMixin.cls_token_idcCs|jdkrdS||jS)z Id of the mask token in the vocabulary. E.g. when training a model with masked-language modeling. Log an error if used while not having been set. N)rrr)rKrrr mask_token_id\s z SpecialTokensMixin.mask_token_idcCs ||jS)z Ids of all the additional special tokens in the vocabulary (list of integers). Log an error if used while not having been set. )rr)rKrrr additional_special_tokens_idscsz0SpecialTokensMixin.additional_special_tokens_idscCs6i}x,|jD]"}t|d|}|r t|||<q W|S)z A dictionary mapping special token class attribute (cls_token, unk_token...) to their values ('', ''...) Convert tokens of AddedToken type in string. All returned tokens are strings _)rgetattrr)rKset_attrattr attr_valuerrr special_tokens_maphs  z%SpecialTokensMixin.special_tokens_mapcCs2i}x(|jD]}t|d|}|r |||<q W|S)a) A dictionary mapping special token class attribute (cls_token, unk_token...) to their values ('', ''...) Keep the tokens as AddedToken if they are of this type. AddedToken can be used to control more finely how special tokens are tokenized. r)rr)rKrrrrrr special_tokens_map_extendedvs   z.SpecialTokensMixin.special_tokens_map_extendedcCsdd|jD}|S)z List all the special tokens ('', ''...) mapped to class attributes Convert tokens of AddedToken type in string. All returned tokens are strings (cls_token, unk_token...). cSsg|] }t|qSr)r)rxsrrr rsz9SpecialTokensMixin.all_special_tokens..)r)rKall_toksrrr all_special_tokenssz%SpecialTokensMixin.all_special_tokenscCsLg}|j}x0|D]$}|t|ttfr0t|n|g}qWtt|}|S)z List all the special tokens ('', ''...) mapped to class attributes Keep the tokens as AddedToken if they are of this type. AddedToken can be used to control more finely how special tokens are tokenized. )rrXrGrrset)rKrrrrrr rs $ z.SpecialTokensMixin.all_special_tokens_extendedcCs|j}||}|S)z List the vocabulary indices of the special tokens ('', ''...) mapped to class attributes (cls_token, unk_token...). )rr)rKrZall_idsrrr all_special_idss z"SpecialTokensMixin.all_special_idsN)T)F)'rr"r#r$rrFr=rrrr r rrrr}rrrrrrrrsetterrrrrrrrrrrrrrrrrrr r5sV  =&%                    ra add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`): If set to ``True``, the sequences will be encoded with the special tokens relative to their model. `padding` (:obj:`Union[bool, str]`, `optional`, defaults to :obj:`False`): Activate and control padding. Accepts the following values: * `True` or `'longest'`: pad to the longest sequence in the batch (or no padding if only a single sequence if provided), * `'max_length'`: pad to a max length specified in `max_length` or to the max acceptable input length for the model if no length is provided (`max_length=None`) * `False` or `'do_not_pad'` (default): No padding (i.e. can output batch with sequences of uneven lengths) `truncation` (:obj:`Union[bool, str]`, `optional`, defaults to :obj:`False`): Activate and control truncation. Accepts the following values: * `True` or `'longest_first'`: truncate to a max length specified in `max_length` or to the max acceptable input length for the model if no length is provided (`max_length=None`). This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided, * `'only_first'`: truncate to a max length specified in `max_length` or to the max acceptable input length for the model if no length is provided (`max_length=None`). This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided, * `'only_second'`: truncate to a max length specified in `max_length` or to the max acceptable input length for the model if no length is provided (`max_length=None`). This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided, * `False` or `'do_not_truncate'` (default): No truncation (i.e. can output batch with sequences length greater than the model max admissible input size) `max_length` (:obj:`Union[int, None]`, `optional`, defaults to :obj:`None`): Control the length for padding/truncation. Accepts the following values * `None` (default): This will use the predefined model max length if required by one of the truncation/padding parameters. If the model has no specific max input length (e.g. XLNet) truncation/padding to max length is deactivated. * `any integer value` (e.g. `42`): Use this specific maximum length value if required by one of the truncation/padding parameters. stride (:obj:`int`, `optional`, defaults to ``0``): If set to a number along with max_length, the overflowing tokens returned when `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflow ing sequences. The value of this argument defines the number of overlapping tokens. is_pretokenized (:obj:`bool`, defaults to :obj:`False`): Set to True to indicate the input is already tokenized pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability >= 7.5 (Volta). return_tensors (:obj:`str`, `optional`, defaults to :obj:`None`): Can be set to 'tf', 'pt' or 'np' to return respectively TensorFlow :obj:`tf.constant`, PyTorch :obj:`torch.Tensor` or Numpy :oj: `np.ndarray` instead of a list of python integers. a return_token_type_ids (:obj:`bool`, `optional`, defaults to :obj:`None`): Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute. `What are token type IDs? <../glossary.html#token-type-ids>`_ return_attention_mask (:obj:`bool`, `optional`, defaults to :obj:`none`): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute. `What are attention masks? <../glossary.html#attention-mask>`__ return_overflowing_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Set to True to return overflowing token sequences (default False). return_special_tokens_mask (:obj:`bool`, `optional`, defaults to :obj:`False`): Set to True to return special tokens mask information (default False). return_offsets_mapping (:obj:`bool`, `optional`, defaults to :obj:`False`): Set to True to return (char_start, char_end) for each token (default False). If using Python's tokenizer, this method will raise NotImplementedError. This one is only available on fast tokenizers inheriting from PreTrainedTokenizerFast. **kwargs: passed to the `self.tokenize()` method Return: A Dictionary of shape:: { input_ids: list[int], token_type_ids: list[int] if return_token_type_ids is True (default) attention_mask: list[int] if return_attention_mask is True (default) overflowing_tokens: list[int] if the tokenizer is a slow tokenize, else a List[List[int]] if a ``max_length`` is specified and ``return_overflowing_tokens=True`` special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True } With the fields: - ``input_ids``: list of token ids to be fed to a model - ``token_type_ids``: list of token type ids to be fed to a model - ``attention_mask``: list of indices specifying which tokens should be attended to by the model - ``overflowing_tokens``: list of overflowing tokens sequences if a max length is specified and ``return_overflowing_tokens=True``. - ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added tokens and 1 specifying sequence tokens. csleZdZUdZiZeeefed<iZeeeeeffed<iZ eeeee ffed<iZ eee fed<ddgZ eeed<d Zeed <fd d Zee d ddZee d ddZee d ddZeje d ddZeje d ddZeddZeddZeed ddZeeddUeeee fe!eeee fe"ee"efee"efe!e e e!eee#fd!d"d#Z$dVe"e d$d%d&Z%dWd'd(Z&eee'dXeeeeeeefe!eeeeeeefe"ee"efee"efe!e e e"e!e e!eee#fe!e"e!e"e"e"e"e"e"e(d)d*d+Z)eee'dYeeee fe!eeee fe"ee"efee"efe!e e e"e!e e!eee#fe!e"e!e"e"e"e"e"e"e(d)d,d-Z*dde+j,e-j.dd ddddddddddfeeee fe!eeee fe"e+e-e!e e e"e!e e!eee#fe!e"e!e"e"e"e"e"e"e(d.d/d0Z/eee'dZeeeee0eeee1ee ee2fe"ee"efee"efe!e e e"e!e e!eee#fe!e"e!e"e"e"e"e"e"e(d1d2d3Z3de+j,e-j.dd ddddddddddfeeeee0eeee1ee ee2fe"e+e-e!e e e"e!e e!eee#fe!e"e!e"e"e"e"e"e"e(d4d5d6Z4d[ee(ee(eee feeee feeee ffee"efe!e e!e e!e"e!eee#fe"e(d7d8d9Z5d\ee!eee d:d;d<Z6d]ee!eed:d=d>Z7eee'd^ee e!ee e"ee"efee"efe!e e e!e e!eee#fe!e"e!e"e"e"e"e"e"e"e(d?d@dAZ8d_ee e!ee e eee-fe eee ee ee fdCdDdEZ9de+j,ddfeeee fe(fe!e e+e!e e!e"e:dFdGdHZ;eee eedIdJdKZe?eedRdSdTZ@ZAS)bPreTrainedTokenizerBasez Base class for slow and fast tokenizers. Handle shared (mostly boiler plate) methods for slow and fast tokenizers. vocab_files_namespretrained_vocab_files_mappretrained_init_configurationmax_model_input_sizestoken_type_idsattention_maskmodel_input_namesright padding_sidec s~d|_||_|d|dd}|dk r,|nt|_|d|j|_|jdks\td|j|d|j|_tj f|dS)Nrmodel_max_lengthmax_lenr)rleftzKPadding side should be selected between 'right' and 'left', current value: r) init_inputs init_kwargspopVERY_LARGE_INTEGERrrrrrErF)rKrr)rLrr rFs z PreTrainedTokenizerBase.__init__)rOcCs|jS)zq Kept here for backward compatibility. Now renamed to `model_max_length` to avoid ambiguity. )r)rKrrr rszPreTrainedTokenizerBase.max_lencCs|j|jddS)NF)pair)rnum_special_tokens_to_add)rKrrr max_len_single_sentencesz/PreTrainedTokenizerBase.max_len_single_sentencecCs|j|jddS)NT)r)rr)rKrrr max_len_sentences_pair#sz.PreTrainedTokenizerBase.max_len_sentences_paircCs4||j|jddkr(|jr(tdntddS)zM For backward compatibility, allow to try to setup 'max_len_single_sentence' F)rzXSetting 'max_len_single_sentence' is now deprecated. This value is automatically set up.N)rrrrwarningr)rKrrrr r's cCs4||j|jddkr(|jr(tdntddS)zL For backward compatibility, allow to try to setup 'max_len_sentences_pair' T)rzWSetting 'max_len_sentences_pair' is now deprecated. This value is automatically set up.N)rrrrrr)rKrrrr r3s cOs |j||S)a Instantiate a :class:`~transformers.PreTrainedTokenizer` (or a derived class) from a predefined tokenizer. Args: pretrained_model_name_or_path: either: - a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``. - a string with the `identifier name` of a predefined tokenizer that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``. - (not applicable to all derived classes, deprecated) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``. cache_dir: (`optional`) string: Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the vocabulary files and override the cached versions if they exists. resume_download: (`optional`) boolean, default False: Do not delete incompletely recieved file. Attempt to resume the download if such a file exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method. kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~transformers.PreTrainedTokenizer` for details. Examples:: # We can't instantiate directly the base class `PreTrainedTokenizer` so let's show our examples on a derived class: BertTokenizer # Download vocabulary from S3 and cache. tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') # Download vocabulary from S3 (user-uploaded) and cache. tokenizer = BertTokenizer.from_pretrained('dbmdz/bert-base-german-cased') # If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`) tokenizer = BertTokenizer.from_pretrained('./test/saved_model/') # If the tokenizer uses a single vocabulary file, you can point directly to this file tokenizer = BertTokenizer.from_pretrained('./test/saved_model/my_vocab.txt') # You can link tokens to special vocabulary when instantiating tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', unk_token='') # You should be sure '' is in the vocabulary when doing that. # Otherwise use tokenizer.add_special_tokens({'unk_token': ''}) instead) assert tokenizer.unk_token == '' )_from_pretrained)rinputsrrrr from_pretrained?s5z'PreTrainedTokenizerBase.from_pretrainedc( Os |dd}|dd}|dd}|dd}|dd}t|j} i} i} || krx"|jD]\} } | || | <qfW|jr||jkr|j|} nt d |d | |t j |st|rt|jd krtd |jtd |jt|jd } || | <nttttd }xr|j|D]`\} }t j |rt j ||}t j |st d |d}nt||dd}|| | <q8WyLi}xB| D]6\} }|dkrd|| <nt||||||d|| <qWWnNtk r6|| kr d}n d |d | |t|j}t|YnXtdd|Drttd |d | |t|jxL| D]@\} }||| krt d |nt d ||| q~W|dd}|dk rt |dd}t!"|}WdQRX|dd}|s|}n| }|#|||jkrj|j|}|dk rjt$|t%t&frjt'|(dt%d||d<|dd}x(|D]\}}||kr|||<qWy|||}Wnt)k rt)d YnX||_*||_+|d!d}|dk rLt |dd}t!"|}WdQRXx8|D],\}} t$| t,r:t-f| } t.||| qW|j/}!|dk rt |dd}"t!"|"}#WdQRXtt0|#d"d#d$}$xV|$D]N\}%}&|&t|kst1d%|%d&t|d'|&d(|j2|%t3|%|!kd)qW|4}'|'rtd*|S)+N cache_dirforce_downloadFresume_downloadproxieslocal_files_onlyzModel name '{}' not found in model shortcut name list ({}). Assuming '{}' is a path, a model identifier, or url to a directory containing tokenizer files.z, rzCalling {}.from_pretrained() with the path to a single file or url is not supported.Use a model identifier or the path to a directory instead.zPCalling {}.from_pretrained() with the path to a single file or url is deprecatedr)added_tokens_filespecial_tokens_map_filetokenizer_config_fileZfull_tokenizer_filez&Didn't find file {}. We won't load it.)filenameuse_cdn)rrrrrz;Couldn't reach server at '{}' to download vocabulary files.zModel name '{}' was not found in tokenizers model name list ({}). We assumed '{}' was a path or url to a directory containing vocabulary files named {}, but couldn't find such vocabulary files at this path or url.css|]}|dkVqdS)Nr)rxfull_file_namerrr rsz;PreTrainedTokenizerBase._from_pretrained..zModel name '{}' was not found in tokenizers model name list ({}). We assumed '{}' was a path, a model identifier, or url to a directory containing vocabulary files named {} but couldn't find such vocabulary files at this path or url.zloading file {}z loading file {} from cache at {}rzutf-8)rBrrrgꌠ9Y>)FrzoUnable to load vocabulary from file. Please check that the provided vocabulary is accessible and not corrupted.rcSs|dS)Nrr)xrrr $sz:PreTrainedTokenizerBase._from_pretrained..)ruzNon-consecutive added token 'z' found. Should have index z but has index z in saved vocabulary.)rzuSpecial tokens have been added in the vocabulary, make sure the associated word emebedding are fine-tuned or trained.)5rrrrrrYrcopyrrrqjoinospathisfilerlenrrrrADDED_TOKENS_FILESPECIAL_TOKENS_MAP_FILETOKENIZER_CONFIG_FILEFULL_TOKENIZER_FILEisdirexistsrrEnvironmentErrorrXropenjsonloadupdaterGr=floatmingetOSErrorrrdictr rrsortedrrr|r)(rpretrained_model_name_or_pathrrrrrrrZ s3_models vocab_filesZinit_configurationfile_idZmap_listZadditional_files_names file_namerZresolved_vocab_files file_pathmsgrZtokenizer_config_handlerZsaved_init_inputsrrZ args_name tokenizerrZspecial_tokens_map_handlerrurrZadded_tokens_handleadded_tok_encoderZadded_tok_encoder_sortedtokenindexrrrr rvs                           z(PreTrainedTokenizerBase._from_pretrainedc Cstj|r td|dStj|ddtj|t}tj|t }tj|t }t |j }t|jdkrt |j|d<x|jD]}||dqWt|ddd }|tj|d d WdQRXt|ddd X}i}x8|jD]*\} } t| tr| || <q| || <qW|tj|d d WdQRX|} | r|t|ddd }tj| d d } || WdQRX||} | ||fS) ac Save the tokenizer vocabulary files together with: - added tokens, - special-tokens-to-class-attributes-mapping, - tokenizer instantiation positional and keywords inputs (e.g. do_lower_case for Bert). Warning: This won't save modifications you may have applied to the tokenizer after the instantiation (e.g. modifying tokenizer.do_lower_case after creation). This method make sure the full tokenizer can then be re-loaded using the :func:`~transformers.PreTrainedTokenizer.from_pretrained` class method. z4Provided path ({}) should be a directory, not a fileNT)exist_okrrwzutf-8)rBF) ensure_ascii)rrrrrrqmakedirsrrrrrdeepcopyrrrrrrrwriterdumpsrrYrGr rUget_added_vocabsave_vocabulary)rKsave_directoryrrrZtokenizer_configrf write_dictrurZ added_vocabout_strrrrr save_pretrained6s8     z'PreTrainedTokenizerBase.save_pretrainedzC **kwargs: passed to the `self.tokenize()` method. NTFr)text text_pairrpadding truncationr/stridereturn_tensorsc Ks*|j|f|||||||d| } | dS)az Converts a string in a sequence of ids (integer), using the tokenizer and vocabulary. Same as doing ``self.convert_tokens_to_ids(self.tokenize(text))``. Args: text (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method) text_pair (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`, `optional`, defaults to :obj:`None`): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method) )rrrrr/rr input_ids) encode_plus) rKrrrrrr/rrrencoded_inputsrrr encodegs!zPreTrainedTokenizerBase.encode)rrOcCstdS)N)NotImplementedError)rKrrrr rsz1PreTrainedTokenizerBase.num_special_tokens_to_addc Ks|dd}|dd}|dk rB|dkrB|dkrB|r>tdd}|dkrv|rv|r^tdt|dkrntj} qtj} n2|dk r|d krtj} qt |tst|} ntj } |dkr|dkr|rtd tt |} n2|dk r|d krt j } nt |t st |} nt j } |dkrz| tjkrD|jtkr>|r6td tj } n|j}| t j krz|jtkrt|rltd t j } n|j}| tj kr|jr|jd krtd| t j kr| tj kr|dk r|dk r||d krtd|d|d| | ||fS)z Find the correct padding/truncation strategy with backward compatibility for old arguments (truncation_strategy and pad_to_max_length) and behaviors. truncation_strategyr(Zpad_to_max_lengthFNavTruncation was not explicitely activated but `max_length` is provided a specific value, please use `truncation=True` to explicitely truncate examples to max length. Defaulting to 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy more precisely by providing a specific strategy to `truncation`.r'aThe `pad_to_max_length` argument is deprecated and will be removed in a future version, use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or use `padding='max_length'` to pad to a max length. In this case, you can give a specific length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the maximal input size of the model (e.g. 512 for Bert).TaThe `truncation_strategy` argument is deprecated and will be removed in a future version, use `truncation=True` to truncate examples to a max length. You can give a specific length with `max_length` (e.g. `max_length=45`) or leave max_length to None to truncate to the maximal input size of the model (e.g. 512 for Bert). If you have pairs of inputs, you can give a specific truncation strategy selected among `truncation='only_first'` (will only truncate the first sentence in the pairs) `truncation='only_second'` (will only truncate the second sentence in the pairs) or `truncation='longest_first'` (will iteratively remove tokens from the longest sentence in the pairs).zAsking to pad to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no padding.zAsking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no truncation.rzAsking to pad but the tokenizer does not have a padding token. Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`.zATruncation and padding are both activated but truncation length (z+) is not a multiple of pad_to_multiple_of (z).)rrrwarningswarnDeprecationWarningr-r0r1rGr2r&r+r,r LARGE_INTEGERrrr) rKrrr/pad_to_multiple_ofrrZold_truncation_strategyZold_pad_to_max_lengthpadding_strategyr!rrr "_get_padding_truncation_strategiessv                  z:PreTrainedTokenizerBase._get_padding_truncation_strategies)rrrrrr/ris_pretokenizedr&rreturn_token_type_idsreturn_attention_maskreturn_overflowing_tokensreturn_special_tokens_maskreturn_offsets_mapping return_lengthrrOcKst|tsnt|ttfrft|dksnt|dtsnt|dttfrft|ddksnt|ddtsntd|dkst|tst|ttfrt|dkst|dtst|dttfrt|ddkst|ddtstdt| rt|ttfp(|o(t|ttfo(|o(t|dttf}|r|dk rJtt||n|}|jf|||||||| | | | | ||||d|S|j f||||||||| | | | | ||||d|SdS)a/ Returns a dictionary containing the encoded sequence or sequence pair and additional information: the mask for sequence classification and the overflowing elements if a ``max_length`` is specified. Args: text (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]``): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pre-tokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_pretokenized=True` (to lift the ambiguity with a batch of sequences) text_pair (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]``): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pre-tokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_pretokenized=True` (to lift the ambiguity with a batch of sequences) rztext input must of type `str` (single example), `List[str]` (batch or single pretokenized example) or `List[List[str]]` (batch of pretokenized examples).Nztext_pair input must of type `str` (single example), `List[str]` (batch or single pretokenized example) or `List[List[str]]` (batch of pretokenized examples).)batch_text_or_text_pairsrrrr/rr)r&rr*r+r,r-r.r/r)rrrrrr/rr)r&rr*r+r,r-r.r/r) rGrrrrrr|zipbatch_encode_plusr)rKrrrrrr/rr)r&rr*r+r,r-r.r/rrZ is_batchedr0rrr __call__sp'  6  $0z PreTrainedTokenizerBase.__call__cKsX|jf|||| |d|\}}}}|jf||||||||| | | | | ||||d|S)a Returns a dictionary containing the encoded sequence or sequence pair and additional information: the mask for sequence classification and the overflowing elements if a ``max_length`` is specified. Args: text (:obj:`str`, :obj:`List[str]` or :obj:`List[int]` (the later only for not-fast tokenizers)): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method) text_pair (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`, `optional`, defaults to :obj:`None`): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method) )rrr/r&r)rrrr'r!r/rr)r&rr*r+r,r-r.r/r)r( _encode_plus)rKrrrrrr/rr)r&rr*r+r,r-r.r/rrr'r!rrr rs4&z#PreTrainedTokenizerBase.encode_plus)rrrr'r!r/rr)r&rr*r+r,r-r.r/rrOcKstdS)N)r )rKrrrr'r!r/rr)r&rr*r+r,r-r.r/rrrrr r4sz$PreTrainedTokenizerBase._encode_plus)r0rrrr/rr)r&rr*r+r,r-r.r/rrOcKsV|jf|||||d|\}}}}|jf||||||||| | | | | |||d|S)a[ Returns a dictionary containing the encoded sequence or sequence pair and additional information: the mask for sequence classification and the overflowing elements if a ``max_length`` is specified. Args: batch_text_or_text_pairs (:obj:`List[str]`, :obj:`List[Tuple[str, str]]`, :obj:`List[List[str]]`, :obj:`List[Tuple[List[str], List[str]]]`, and for not-fast tokenizers, also: :obj:`List[List[int]]`, :obj:`List[Tuple[List[int], List[int]]]`): Batch of sequences or pair of sequences to be encoded. This can be a list of string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see details in encode_plus) )rrr/r&r)r0rr'r!r/rr)r&rr*r+r,r-r.r/r)r(_batch_encode_plus)rKr0rrrr/rr)r&rr*r+r,r-r.r/rrr'r!rrr r2s2+z)PreTrainedTokenizerBase.batch_encode_plus)r0rr'r!r/rr)r&rr*r+r,r-r.r/rrOcKstdS)N)r )rKr0rr'r!r/rr)r&rr*r+r,r-r.r/rrrrr r5+sz*PreTrainedTokenizerBase._batch_encode_plus)rrr/r&r+rrrOcstttfr:tdttfr:fdddDdksJtddsb|r^gd<S|j|||d\}} }} drtddttfs|j||||dt|d St dt fd d  Dstd |t j krtd d dD}t j}i} xxtD]ltfdd D} |j| ||||d} x6| D]*\} }| | krrg| | <| | |qXWqWt| |d S)ap Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length in the batch. Padding side (left/right) padding token ids are defined at the tokenizer level (with ``self.padding_side``, ``self.pad_token_id`` and ``self.pad_token_type_id``) Args: encoded_inputs: Dictionary of tokenized inputs (`Dict[str, List[int]]`) or batch of tokenized inputs. Batch of tokenized inputs can be given as dicts of lists or lists of dicts, both work so you can use ``tokenizer.pad()`` during pre-processing as well as in a PyTorch Dataloader collate function. (`Dict[str, List[List[int]]]` or `List[Dict[str, List[int]]]`). padding: Boolean or specific strategy to use for padding. Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - 'longest' (or `True`) Pad to the longest sequence in the batch - 'max_length': Pad to the max length (default) - 'do_not_pad' (or `False`): Do not pad max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability >= 7.5 (Volta). return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) return_tensors (:obj:`str`, `optional`, defaults to :obj:`None`): Can be set to 'tf', 'pt' or 'np' to return respectively TensorFlow :obj:`tf.constant`, PyTorch :obj:`torch.Tensor` or Numpy :oj: `np.ndarray` instead of a list of python integers. verbose (:obj:`bool`, `optional`, defaults to :obj:`True`): Set to ``False`` to avoid printing infos and warnings. rcs i|]fddDqS)csg|] }|qSrr)rxexample)rurr rusz:PreTrainedTokenizerBase.pad...r)rx)r)rur r{usz/PreTrainedTokenizerBase.pad..rzYou should supply an encoding or a list of encodings to this method. An encoding is the output of one the encoding methods of the tokenizer, i.e. __call__/encode_plus/batch_encode_plus. r)rr/r)r/r'r&r+)rCc3s|]}t|kVqdS)N)r)rxrz) batch_sizerr rsz.PreTrainedTokenizerBase.pad..zMSome items in the output dictionnary have a different batch size than others.css|]}t|VqdS)N)r)rxrrrr rsc3s|]\}}||fVqdS)Nr)rxryrz)irr rs)rGrrrr@rrr(_padrrrXr-r0maxr1rangerYappend)rKrrr/r&r+rrr'r batch_outputsroutputsrurr)r7rr8r padHsN,      zPreTrainedTokenizerBase.pad) token_ids_0 token_ids_1rOcCs2|dkrt|dgSdgt|dgt|S)Nrr)r)rKr@rArrr $create_token_type_ids_from_sequencessz {}). Running this sequence through the model will result in indexing errors)r/rr&r+length)rCrD)r"r# FutureWarningr(r|rrrr&r,truncate_sequencesrCrBget_special_tokens_maskrrrrqr-r2r?rr@)rKrDrErrrr/rr&rr*r+r,r-r.r/rrDrr'r!rZlen_idsZ len_pair_idsr total_lenrGsequencerr=rrr prepare_for_modelsx!     &$ z)PreTrainedTokenizerBase.prepare_for_modelr')rDrErFr!rrOc Cs|dkr||gfSt|ts$t|}g}|tjkrxt|D]}|dksXt|t|kr|sptt||d}nd}||| d|dd}q<|stt||d}nd}||| d|dd}qdg|dgt|d|d<d|kr`|jg||d|d<d|krdg||d|d<|jg||d|d<ntd t |jn|rdgt|d|d<|S) a Pad encoded inputs (on left/right and up to predefined legnth or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability >= 7.5 (Volta). return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) NrrrrrrrHrzInvalid padding strategy:) rr-r0rr2rrrrr)rKrr/r'r&r+Zneeds_to_be_padded differencerrr r9s<           zPreTrainedTokenizerBase._pad) sequencesrOc sfdd|DS)Ncsg|]}j|fqSr)decode)rxseq)rrKrr rsz8PreTrainedTokenizerBase.batch_decode..r)rKrSrr)rrKr batch_decodesz$PreTrainedTokenizerBase.batch_decode) token_idsskip_special_tokensclean_up_tokenization_spacesrOcCstdS)aH Converts a sequence of ids (integer) in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces. Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``. Args: token_ids: list of tokenized input ids. Can be obtained using the `encode` or `encode_plus` methods. skip_special_tokens: if set to True, will replace special tokens. clean_up_tokenization_spaces: if set to True, will clean up the tokenization spaces. N)r )rKrWrXrYrrr rTs zPreTrainedTokenizerBase.decode)r@rAalready_has_special_tokensrOcs0|r |dkstd|jfdd|D}|S)a Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods. Args: token_ids_0: list of ids (must not contain special tokens) token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids for sequence pairs already_has_special_tokens: (default False) Set to True if the token list is already formated with special tokens for the model Returns: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. NaYou cannot use ``already_has_special_tokens=False`` with this tokenizer. Please use a slow (full python) tokenizer to activate this argument.Or set `return_special_token_mask=True` when calling the encoding method to get the special tokens mask in any tokenizer. csg|]}|krdndqS)rrr)rxr)rrr rszCPreTrainedTokenizerBase.get_special_tokens_mask..)rr)rKr@rArZrHr)rr rLs z/PreTrainedTokenizerBase.get_special_tokens_mask) out_stringrOcCsX|ddddddddd d d d d ddddddd}|S)zx Clean up a list of simple English tokenization artifacts like spaces before punctuations and abreviated forms. z ..z ??z !!z ,,z ' 'z n'tzn'tz 'mz'mz 'sz'sz 'vez'vez 'rez're)replace)r[rrr clean_up_tokenizations z-PreTrainedTokenizerBase.clean_up_tokenization)NTFFNrN)F)FFNNT)NTFFNrFNNNNFFFFT)NTFFNrFNNNNFFFFT)TFFNrFNNNNFFFFT)TNNNNT)N)N)NTFFNrNNNNFFFFTF)Nrr'r)FT)NF)Brr"r#r$rrrr>rrrrr=rrrrFr}rrrrr%rrr rrENCODE_KWARGS_DOCSTRINGr TextInputPreTokenizedInput EncodedInputrr|r3rrr('ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRINGr@r3rr-r2r&r,r4 TextInputPairPreTokenizedInputPairEncodedInputPairr2r5r?rBrCrOrKrr9rVrTrL staticmethodrbr~rr)rLr rsH     7 A1X  t|eh2Z v7h bU \k $L*?  r)@r$rrloggingrr" collectionsrenumrtypingrrrrrr r r numpyr6Z tokenizersr r rH file_utilsrrrrrrr tensorflowr5rm getLoggerrrr=rr%rrdrerfrhrirjrrrrrr&r-r3r:r?r@rrcrgrrrrr s\  (  $      7+