\t\t\t\t\t\t\tUnion[str, Any]:\r\n __UpperCamelCase\t\t\t =\t\t\t\t\tFalse\r\n\r\n\r\n\r\n def \t\t\t\t__lowerCamelCase\t\t\t(\t\t\t\t\tself\t\t\t\t\t\t,\t\tlowercase\t\t\t\t\t\t,\t\tlowercase\t\t\t\t\t\t,\t\tlowercase\t\t\t\t\t\t,\t\tlowercase )\t\t\t\t\t->\t\t\t\t\t\t\tOptional[int]:\r\n if not self.initialized:\r\n __UpperCamelCase\t\t\t =\t\t\t\t\tRagRetriever(\r\n lowercase\t\t\t\t\t\t,\t\tquestion_encoder_tokenizer=lowercase\t\t\t\t\t\t,\t\tgenerator_tokenizer=lowercase\t\t\t\t\t\t,\t\tindex=lowercase\t\t\t\t\t\t,\t\tinit_retrieval=lowercase\t\t\t\t\t\t,\t\t)\r\n __UpperCamelCase\t\t\t =\t\t\t\t\tTrue\r\n\r\n\r\n\r\n def \t\t\t\t__lowerCamelCase\t\t\t(\t\t\t\t\tself )\t\t\t\t\t->\t\t\t\t\t\t\tList[Any]:\r\n self.retriever.index.init_index()\r\n\r\n\r\n\r\n\r\n def \t\t\t\t__lowerCamelCase\t\t\t(\t\t\t\t\tself\t\t\t\t\t\t,\t\tlowercase\t\t\t\t\t\t,\t\tlowercase )\t\t\t\t\t->\t\t\t\t\t\t\tOptional[Any]:\r\n __UpperCamelCase ,\t\t\t\t\t\t__UpperCamelCase\t\t\t =\t\t\t\t\tself.retriever._main_retrieve(lowercase\t\t\t\t\t\t,\t\tlowercase )\r\n return doc_ids, retrieved_doc_embeds\r\n\r\n\r\nclass UpperCAmelCase__\t\t\t\t\t( UpperCAmelCase_):\r\n\r\n\r\n\r\n def __init__(\t\t\t\t\tself\t\t\t\t\t\t,\t\tlowercase\t\t\t\t\t\t,\t\tlowercase\t\t\t\t\t\t,\t\tlowercase\t\t\t\t\t\t,\t\tlowercase\t\t\t\t\t\t,\t\tlowercase=None )\t\t\t\t\t->\t\t\t\t\t\t\tOptional[Any]:\r\n if index is not None and index.is_initialized() and len(lowercase ) > 0:\r\n raise ValueError(\r\n \"\"\"When using Ray for distributed fine-tuning, \"\"\"\r\n \"\"\"you'll need to provide the paths instead, \"\"\"\r\n \"\"\"as the dataset and the index are loaded \"\"\"\r\n \"\"\"separately. More info in examples/rag/use_own_knowledge_dataset.py \"\"\" )\r\n super().__init__(\r\n lowercase\t\t\t\t\t\t,\t\tquestion_encoder_tokenizer=lowercase\t\t\t\t\t\t,\t\tgenerator_tokenizer=lowercase\t\t\t\t\t\t,\t\tindex=lowercase\t\t\t\t\t\t,\t\tinit_retrieval=lowercase\t\t\t\t\t\t,\t\t)\r\n __UpperCamelCase\t\t\t =\t\t\t\t\tretrieval_workers\r\n if len(self.retrieval_workers ) > 0:\r\n ray.get(\r\n [\r\n worker.create_rag_retriever.remote(lowercase\t\t\t\t\t\t,\t\tlowercase\t\t\t\t\t\t,\t\tlowercase\t\t\t\t\t\t,\t\tlowercase )\r\n for worker in self.retrieval_workers\r\n ] )\r\n\r\n\r\n\r\n def \t\t\t\t__lowerCamelCase\t\t\t(\t\t\t\t\tself )\t\t\t\t\t->\t\t\t\t\t\t\tOptional[int]:\r\n logger.info(\"\"\"initializing retrieval\"\"\" )\r\n\r\n if len(self.retrieval_workers ) > 0:\r\n ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )\r\n else:\r\n # Non-distributed training. Load index into this same process.\r\n self.index.init_index()\r\n\r\n\r\n\r\n def \t\t\t\t__lowerCamelCase\t\t\t(\t\t\t\t\tself\t\t\t\t\t\t,\t\tlowercase\t\t\t\t\t\t,\t\tlowercase )\t\t\t\t\t->\t\t\t\t\t\t\tList[str]:\r\n if len(self.retrieval_workers ) > 0:\r\n # Select a random retrieval actor.\r\n __UpperCamelCase\t\t\t =\t\t\t\t\tself.retrieval_workers[random.randint(0\t\t\t\t\t\t,\t\tlen(self.retrieval_workers ) - 1 )]\r\n __UpperCamelCase ,\t\t\t\t\t\t__UpperCamelCase\t\t\t =\t\t\t\t\tray.get(random_worker.retrieve.remote(lowercase\t\t\t\t\t\t,\t\tlowercase ) )\r\n else:\r\n __UpperCamelCase ,\t\t\t\t\t\t__UpperCamelCase\t\t\t =\t\t\t\t\tself._main_retrieve(lowercase\t\t\t\t\t\t,\t\tlowercase )\r\n return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase )\r\n\r\n\r\n\r\n @classmethod\r\n def \t\t\t\t__lowerCamelCase\t\t\t(\t\t\t\t\tcls\t\t\t\t\t\t,\t\tlowercase\t\t\t\t\t\t,\t\tlowercase=None\t\t\t\t\t\t,\t\t**lowercase )\t\t\t\t\t->\t\t\t\t\t\t\tTuple:\r\n return super(lowercase\t\t\t\t\t\t,\t\tcls ).get_tokenizers(lowercase\t\t\t\t\t\t,\t\tlowercase\t\t\t\t\t\t,\t\t**lowercase )\r\n\r\n\r\n\r\n\r\n @classmethod\r\n def \t\t\t\t__lowerCamelCase\t\t\t(\t\t\t\t\tcls\t\t\t\t\t\t,\t\tlowercase\t\t\t\t\t\t,\t\tlowercase\t\t\t\t\t\t,\t\tlowercase=None\t\t\t\t\t\t,\t\t**lowercase )\t\t\t\t\t->\t\t\t\t\t\t\tDict:\r\n __UpperCamelCase\t\t\t =\t\t\t\t\tkwargs.pop(\"\"\"config\"\"\"\t\t\t\t\t\t,\t\tlowercase ) or RagConfig.from_pretrained(lowercase\t\t\t\t\t\t,\t\t**lowercase )\r\n __UpperCamelCase\t\t\t =\t\t\t\t\tRagTokenizer.from_pretrained(lowercase\t\t\t\t\t\t,\t\tconfig=lowercase )\r\n __UpperCamelCase\t\t\t =\t\t\t\t\trag_tokenizer.question_encoder\r\n __UpperCamelCase\t\t\t =\t\t\t\t\trag_tokenizer.generator\r\n if indexed_dataset is not None:\r\n __UpperCamelCase\t\t\t =\t\t\t\t\t\"\"\"custom\"\"\"\r\n __UpperCamelCase\t\t\t =\t\t\t\t\tCustomHFIndex(config.retrieval_vector_size\t\t\t\t\t\t,\t\tlowercase )\r\n else:\r\n __UpperCamelCase\t\t\t =\t\t\t\t\tcls._build_index(lowercase )\r\n return cls(\r\n lowercase\t\t\t\t\t\t,\t\tquestion_encoder_tokenizer=lowercase\t\t\t\t\t\t,\t\tgenerator_tokenizer=lowercase\t\t\t\t\t\t,\t\tretrieval_workers=lowercase\t\t\t\t\t\t,\t\tindex=lowercase\t\t\t\t\t\t,\t\t)\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":243,"string":"243"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":710,"cells":{"code":{"kind":"string","value":"\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\rfrom typing import List, Optional, Union\r\rfrom ...configuration_utils import PretrainedConfig\rfrom ...utils import logging\r\r\r__magic_name__ =\t\t\tlogging.get_logger(__name__)\r\r__magic_name__ =\t\t\t{\r \"huggingface/informer-tourism-monthly\": (\r \"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json\"\r ),\r # See all Informer models at https://huggingface.co/models?filter=informer\r}\rclass \t\t\t\t\t\tSCREAMING_SNAKE_CASE_ (\t\t\t\t\t\t__a ):\r\r\r\r\r\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t__lowercase\t\t\t\t\t\t\t: Dict\t\t\t\t\t\t =\t\t'''informer'''\r\t\t__lowercase\t\t\t\t\t\t\t: Union[str, Any]\t\t\t\t\t\t =\t\t{\r\t\t '''hidden_size''': '''d_model''',\r\t\t '''num_attention_heads''': '''encoder_attention_heads''',\r\t\t '''num_hidden_layers''': '''encoder_layers''',\r\t\t}\r\r\r\r\r\t\tdef __init__( self\t\t\t\t\t\t\t, lowerCAmelCase__ = None\t\t\t\t\t\t\t, lowerCAmelCase__ = None\t\t\t\t\t\t\t, lowerCAmelCase__ = \"student_t\"\t\t\t\t\t\t\t, lowerCAmelCase__ = \"nll\"\t\t\t\t\t\t\t, lowerCAmelCase__ = 1\t\t\t\t\t\t\t, lowerCAmelCase__ = None\t\t\t\t\t\t\t, lowerCAmelCase__ = \"mean\"\t\t\t\t\t\t\t, lowerCAmelCase__ = 0\t\t\t\t\t\t\t, lowerCAmelCase__ = 0\t\t\t\t\t\t\t, lowerCAmelCase__ = 0\t\t\t\t\t\t\t, lowerCAmelCase__ = 0\t\t\t\t\t\t\t, lowerCAmelCase__ = None\t\t\t\t\t\t\t, lowerCAmelCase__ = None\t\t\t\t\t\t\t, lowerCAmelCase__ = 6_4\t\t\t\t\t\t\t, lowerCAmelCase__ = 3_2\t\t\t\t\t\t\t, lowerCAmelCase__ = 3_2\t\t\t\t\t\t\t, lowerCAmelCase__ = 2\t\t\t\t\t\t\t, lowerCAmelCase__ = 2\t\t\t\t\t\t\t, lowerCAmelCase__ = 2\t\t\t\t\t\t\t, lowerCAmelCase__ = 2\t\t\t\t\t\t\t, lowerCAmelCase__ = True\t\t\t\t\t\t\t, lowerCAmelCase__ = \"gelu\"\t\t\t\t\t\t\t, lowerCAmelCase__ = 0.05\t\t\t\t\t\t\t, lowerCAmelCase__ = 0.1\t\t\t\t\t\t\t, lowerCAmelCase__ = 0.1\t\t\t\t\t\t\t, lowerCAmelCase__ = 0.1\t\t\t\t\t\t\t, lowerCAmelCase__ = 0.1\t\t\t\t\t\t\t, lowerCAmelCase__ = 1_0_0\t\t\t\t\t\t\t, lowerCAmelCase__ = 0.02\t\t\t\t\t\t\t, lowerCAmelCase__=True\t\t\t\t\t\t\t, lowerCAmelCase__ = \"prob\"\t\t\t\t\t\t\t, lowerCAmelCase__ = 5\t\t\t\t\t\t\t, lowerCAmelCase__ = True\t\t\t\t\t\t\t, **lowerCAmelCase__\t\t\t\t\t\t\t, ):\r\t\t\t\t# time series specific configuration\r\t\t\t\t__SCREAMING_SNAKE_CASE = prediction_length\r\t\t\t\t__SCREAMING_SNAKE_CASE = context_length or prediction_length\r\t\t\t\t__SCREAMING_SNAKE_CASE = distribution_output\r\t\t\t\t__SCREAMING_SNAKE_CASE = loss\r\t\t\t\t__SCREAMING_SNAKE_CASE = input_size\r\t\t\t\t__SCREAMING_SNAKE_CASE = num_time_features\r\t\t\t\t__SCREAMING_SNAKE_CASE = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]\r\t\t\t\t__SCREAMING_SNAKE_CASE = scaling\r\t\t\t\t__SCREAMING_SNAKE_CASE = num_dynamic_real_features\r\t\t\t\t__SCREAMING_SNAKE_CASE = num_static_real_features\r\t\t\t\t__SCREAMING_SNAKE_CASE = num_static_categorical_features\r\r\t\t\t\t# set cardinality\r\t\t\t\tif cardinality and num_static_categorical_features > 0:\r\t\t\t\t\t\tif len(lowerCAmelCase__) != num_static_categorical_features:\r\t\t\t\t\t\t\t\traise ValueError(\r\t\t\t\t\t\t\t\t \"\"\"The cardinality should be a list of the same length as `num_static_categorical_features`\"\"\")\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE = cardinality\r\t\t\t\telse:\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE = [0]\r\r\t\t\t\t# set embedding_dimension\r\t\t\t\tif embedding_dimension and num_static_categorical_features > 0:\r\t\t\t\t\t\tif len(lowerCAmelCase__) != num_static_categorical_features:\r\t\t\t\t\t\t\t\traise ValueError(\r\t\t\t\t\t\t\t\t \"\"\"The embedding dimension should be a list of the same length as `num_static_categorical_features`\"\"\")\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE = embedding_dimension\r\t\t\t\telse:\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE = [min(5_0\t\t\t\t\t\t\t, (cat + 1) // 2) for cat in self.cardinality]\r\r\t\t\t\t__SCREAMING_SNAKE_CASE = num_parallel_samples\r\r\t\t\t\t# Transformer architecture configuration\r\t\t\t\t__SCREAMING_SNAKE_CASE = input_size * len(self.lags_sequence) + self._number_of_features\r\t\t\t\t__SCREAMING_SNAKE_CASE = d_model\r\t\t\t\t__SCREAMING_SNAKE_CASE = encoder_attention_heads\r\t\t\t\t__SCREAMING_SNAKE_CASE = decoder_attention_heads\r\t\t\t\t__SCREAMING_SNAKE_CASE = encoder_ffn_dim\r\t\t\t\t__SCREAMING_SNAKE_CASE = decoder_ffn_dim\r\t\t\t\t__SCREAMING_SNAKE_CASE = encoder_layers\r\t\t\t\t__SCREAMING_SNAKE_CASE = decoder_layers\r\r\t\t\t\t__SCREAMING_SNAKE_CASE = dropout\r\t\t\t\t__SCREAMING_SNAKE_CASE = attention_dropout\r\t\t\t\t__SCREAMING_SNAKE_CASE = activation_dropout\r\t\t\t\t__SCREAMING_SNAKE_CASE = encoder_layerdrop\r\t\t\t\t__SCREAMING_SNAKE_CASE = decoder_layerdrop\r\r\t\t\t\t__SCREAMING_SNAKE_CASE = activation_function\r\t\t\t\t__SCREAMING_SNAKE_CASE = init_std\r\r\t\t\t\t__SCREAMING_SNAKE_CASE = use_cache\r\r\t\t\t\t# Informer\r\t\t\t\t__SCREAMING_SNAKE_CASE = attention_type\r\t\t\t\t__SCREAMING_SNAKE_CASE = sampling_factor\r\t\t\t\t__SCREAMING_SNAKE_CASE = distil\r\r\t\t\t\tsuper().__init__(is_encoder_decoder=lowerCAmelCase__\t\t\t\t\t\t\t, **lowerCAmelCase__)\r\r\r\r\r\t\t@property\r\t\tdef \tsnake_case_ ( self):\r\t\t\t\treturn (\r\t\t\t\t sum(self.embedding_dimension)\r\t\t\t\t + self.num_dynamic_real_features\r\t\t\t\t + self.num_time_features\r\t\t\t\t + self.num_static_real_features\r\t\t\t\t + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features\r\t\t\t\t)\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":100,"string":"100"},"style_context":{"kind":"string","value":"\r\n\r\n\r\nimport argparse\r\nimport json\r\nimport os\r\nimport time\r\nimport zipfile\r\n\r\nfrom get_ci_error_statistics import download_artifact, get_artifacts_links\r\n\r\nfrom transformers import logging\r\n\r\n\r\n__lowerCAmelCase\t\t =\t\t\t\t\t\t\tlogging.get_logger(__name__)\r\n\r\n\r\n\r\n\r\ndef snake_case_ ( snake_case\t\t\t,\t\t\t\t\t\t\tsnake_case ) ->\t\t\t\tList[str]:\r\n lowercase__: List[str] \t\t\t= set()\r\n lowercase__: List[Any] \t\t\t= []\r\n\r\n def parse_line(snake_case ):\r\n for line in fp:\r\n if isinstance(snake_case\t\t\t,\t\t\t\t\t\t\tsnake_case ):\r\n lowercase__: Optional[Any] \t\t\t= line.decode('UTF-8' )\r\n if \"warnings summary (final)\" in line:\r\n continue\r\n # This means we are outside the body of a warning\r\n elif not line.startswith(' ' ):\r\n # process a single warning and move it to `selected_warnings`.\r\n if len(snake_case ) > 0:\r\n lowercase__: List[str] \t\t\t= '\\n'.join(snake_case )\r\n # Only keep the warnings specified in `targets`\r\n if any(f': {x}: ' in warning for x in targets ):\r\n selected_warnings.add(snake_case )\r\n buffer.clear()\r\n continue\r\n else:\r\n lowercase__: Union[str, Any] \t\t\t= line.strip()\r\n buffer.append(snake_case )\r\n\r\n if from_gh:\r\n for filename in os.listdir(snake_case ):\r\n lowercase__: Dict \t\t\t= os.path.join(snake_case\t\t\t,\t\t\t\t\t\t\tsnake_case )\r\n if not os.path.isdir(snake_case ):\r\n # read the file\r\n if filename != \"warnings.txt\":\r\n continue\r\n with open(snake_case ) as fp:\r\n parse_line(snake_case )\r\n else:\r\n try:\r\n with zipfile.ZipFile(snake_case ) as z:\r\n for filename in z.namelist():\r\n if not os.path.isdir(snake_case ):\r\n # read the file\r\n if filename != \"warnings.txt\":\r\n continue\r\n with z.open(snake_case ) as fp:\r\n parse_line(snake_case )\r\n except Exception:\r\n logger.warning(\r\n f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )\r\n\r\n return selected_warnings\r\n\r\n\r\n\r\n\r\ndef snake_case_ ( snake_case\t\t\t,\t\t\t\t\t\t\tsnake_case ) ->\t\t\t\tAny:\r\n lowercase__: Optional[Any] \t\t\t= set()\r\n\r\n lowercase__: int \t\t\t= [os.path.join(snake_case\t\t\t,\t\t\t\t\t\t\tsnake_case ) for p in os.listdir(snake_case ) if (p.endswith('.zip' ) or from_gh)]\r\n for p in paths:\r\n selected_warnings.update(extract_warnings_from_single_artifact(snake_case\t\t\t,\t\t\t\t\t\t\tsnake_case ) )\r\n\r\n return selected_warnings\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n\r\n\r\n\r\n def snake_case_ ( snake_case ) ->\t\t\t\tstr:\r\n return values.split(',' )\r\n\r\n __lowerCAmelCase\t\t =\t\t\t\t\t\t\targparse.ArgumentParser()\r\n # Required parameters\r\n parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')\r\n parser.add_argument(\r\n '''--output_dir''',\r\n type=str,\r\n required=True,\r\n help='''Where to store the downloaded artifacts and other result files.''',\r\n )\r\n parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')\r\n # optional parameters\r\n parser.add_argument(\r\n '''--targets''',\r\n default='''DeprecationWarning,UserWarning,FutureWarning''',\r\n type=list_str,\r\n help='''Comma-separated list of target warning(s) which we want to extract.''',\r\n )\r\n parser.add_argument(\r\n '''--from_gh''',\r\n action='''store_true''',\r\n help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',\r\n )\r\n\r\n __lowerCAmelCase\t\t =\t\t\t\t\t\t\tparser.parse_args()\r\n\r\n __lowerCAmelCase\t\t =\t\t\t\t\t\t\targs.from_gh\r\n if from_gh:\r\n # The artifacts have to be downloaded using `actions/download-artifact@v3`\r\n pass\r\n else:\r\n os.makedirs(args.output_dir, exist_ok=True)\r\n\r\n # get download links\r\n __lowerCAmelCase\t\t =\t\t\t\t\t\t\tget_artifacts_links(args.workflow_run_id, token=args.token)\r\n with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:\r\n json.dump(artifacts, fp, ensure_ascii=False, indent=4)\r\n\r\n # download artifacts\r\n for idx, (name, url) in enumerate(artifacts.items()):\r\n print(name)\r\n print(url)\r\n print('''=''' * 80)\r\n download_artifact(name, url, args.output_dir, args.token)\r\n # Be gentle to GitHub\r\n time.sleep(1)\r\n\r\n # extract warnings from artifacts\r\n __lowerCAmelCase\t\t =\t\t\t\t\t\t\textract_warnings(args.output_dir, args.targets)\r\n __lowerCAmelCase\t\t =\t\t\t\t\t\t\tsorted(selected_warnings)\r\n with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:\r\n json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":196,"string":"196"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":711,"cells":{"code":{"kind":"string","value":"\rfrom typing import Optional, Union\r\rimport numpy as np\r\rfrom ...image_processing_utils import BaseImageProcessor, BatchFeature\rfrom ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format\rfrom ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images\rfrom ...utils import TensorType, logging\r\r\r__A =logging.get_logger(__name__)\r\r\r\r\rclass _SCREAMING_SNAKE_CASE\t\t\t\t\t(\t\t\t\tsnake_case_\t\t\t\t\t\t):\r\tlowerCAmelCase__ \t\t\t\t=\t\t\t['pixel_values']\r\r\r\r\tdef __init__( self\t\t\t\t\t\t, lowercase = True\t\t\t\t\t\t, lowercase = 1 / 255\t\t\t\t\t\t, lowercase = True\t\t\t\t\t\t, lowercase = 8\t\t\t\t\t\t, **lowercase\t\t\t\t\t\t, ) ->\tNone:\r\t\t\t\t\tsuper().__init__(**lowercase\t\t\t)\r\r\t\t\t\t\tlowerCamelCase_ = do_rescale\r\t\t\t\t\tlowerCamelCase_ = rescale_factor\r\t\t\t\t\tlowerCamelCase_ = do_pad\r\t\t\t\t\tlowerCamelCase_ = pad_size\r\r\r\r\tdef SCREAMING_SNAKE_CASE_( self\t\t\t\t\t\t, lowercase\t\t\t\t\t\t, lowercase\t\t\t\t\t\t, lowercase = None\t\t\t\t\t\t, **lowercase\t\t\t) ->\tnp.ndarray:\r\t\t\t\t\treturn rescale(lowercase\t\t\t\t\t\t, scale=lowercase\t\t\t\t\t\t, data_format=lowercase\t\t\t\t\t\t, **lowercase\t\t\t)\r\r\r\r\tdef SCREAMING_SNAKE_CASE_( self\t\t\t\t\t\t, lowercase\t\t\t\t\t\t, lowercase\t\t\t\t\t\t, lowercase = None\t\t\t) ->\tList[str]:\r\t\t\t\t\tlowerCamelCase_ , lowerCamelCase_ = get_image_size(lowercase\t\t\t)\r\t\t\t\t\tlowerCamelCase_ = (old_height // size + 1) * size - old_height\r\t\t\t\t\tlowerCamelCase_ = (old_width // size + 1) * size - old_width\r\r\t\t\t\t\treturn pad(lowercase\t\t\t\t\t\t, ((0, pad_height), (0, pad_width))\t\t\t\t\t\t, mode=\"symmetric\"\t\t\t\t\t\t, data_format=lowercase\t\t\t)\r\r\r\r\tdef SCREAMING_SNAKE_CASE_( self\t\t\t\t\t\t, lowercase\t\t\t\t\t\t, lowercase = None\t\t\t\t\t\t, lowercase = None\t\t\t\t\t\t, lowercase = None\t\t\t\t\t\t, lowercase = None\t\t\t\t\t\t, lowercase = None\t\t\t\t\t\t, lowercase = ChannelDimension.FIRST\t\t\t\t\t\t, **lowercase\t\t\t\t\t\t, ) ->\tAny:\r\t\t\t\t\tlowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale\r\t\t\t\t\tlowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor\r\t\t\t\t\tlowerCamelCase_ = do_pad if do_pad is not None else self.do_pad\r\t\t\t\t\tlowerCamelCase_ = pad_size if pad_size is not None else self.pad_size\r\r\t\t\t\t\tlowerCamelCase_ = make_list_of_images(lowercase\t\t\t)\r\r\t\t\t\t\tif not valid_images(lowercase\t\t\t):\r\t\t\t\t\t\t\t\t\traise ValueError(\r\t\t\t\t\t\t\t\t\t \"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, \"\r\t\t\t\t\t\t\t\t\t \"torch.Tensor, tf.Tensor or jax.ndarray.\"\t\t\t)\r\r\t\t\t\t\tif do_rescale and rescale_factor is None:\r\t\t\t\t\t\t\t\t\traise ValueError(\"Rescale factor must be specified if do_rescale is True.\"\t\t\t)\r\r\t\t\t\t\t# All transformations expect numpy arrays.\r\t\t\t\t\tlowerCamelCase_ = [to_numpy_array(lowercase\t\t\t) for image in images]\r\r\t\t\t\t\tif do_rescale:\r\t\t\t\t\t\t\t\t\tlowerCamelCase_ = [self.rescale(image=lowercase\t\t\t\t\t\t, scale=lowercase\t\t\t) for image in images]\r\r\t\t\t\t\tif do_pad:\r\t\t\t\t\t\t\t\t\tlowerCamelCase_ = [self.pad(lowercase\t\t\t\t\t\t, size=lowercase\t\t\t) for image in images]\r\r\t\t\t\t\tlowerCamelCase_ = [to_channel_dimension_format(lowercase\t\t\t\t\t\t, lowercase\t\t\t) for image in images]\r\r\t\t\t\t\tlowerCamelCase_ = {\"pixel_values\": images}\r\t\t\t\t\treturn BatchFeature(data=lowercase\t\t\t\t\t\t, tensor_type=lowercase\t\t\t)\r\r\r"},"code_codestyle":{"kind":"number","value":47,"string":"47"},"style_context":{"kind":"string","value":"\r__A ='''\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'''\r\r__A =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]\r__A ={\r '''{processor_class}''': '''FakeProcessorClass''',\r '''{model_class}''': '''FakeModelClass''',\r '''{object_class}''': '''FakeObjectClass''',\r}\r\r\r"},"style_context_codestyle":{"kind":"number","value":47,"string":"47"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":712,"cells":{"code":{"kind":"string","value":"\r\r\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\rimport argparse\rimport math\rimport os\rfrom copy import deepcopy\r\rimport torch\rfrom audio_diffusion.models import DiffusionAttnUnetaD\rfrom diffusion import sampling\rfrom torch import nn\r\rfrom diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel\r\r\r__snake_case\t\t\t\t=\t\t\t\t{\r '''gwf-440k''': {\r '''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',\r '''sample_rate''': 48000,\r '''sample_size''': 65536,\r },\r '''jmann-small-190k''': {\r '''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',\r '''sample_rate''': 48000,\r '''sample_size''': 65536,\r },\r '''jmann-large-580k''': {\r '''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',\r '''sample_rate''': 48000,\r '''sample_size''': 131072,\r },\r '''maestro-uncond-150k''': {\r '''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',\r '''sample_rate''': 16000,\r '''sample_size''': 65536,\r },\r '''unlocked-uncond-250k''': {\r '''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',\r '''sample_rate''': 16000,\r '''sample_size''': 65536,\r },\r '''honk-140k''': {\r '''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',\r '''sample_rate''': 16000,\r '''sample_size''': 65536,\r },\r}\rdef \t\t\t\t\tA_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : Tuple\t\t\t\t):\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r return torch.atana(UpperCamelCase__, UpperCamelCase__\t\t\t\t) / math.pi * 2\rdef \t\t\t\t\tA_ ( _lowerCAmelCase : List[Any]\t\t\t\t):\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r _a\t\t\t\t\t= torch.sin(t * math.pi / 2\t\t\t\t) ** 2\r _a\t\t\t\t\t= (1 - sigma**2) ** 0.5\r return alpha_sigma_to_t(UpperCamelCase__, UpperCamelCase__\t\t\t\t)\r\r\rclass \t\t\t__lowerCamelCase ( snake_case_ ):\r\r '''simple docstring'''\r pass\r\r\r\r\r\r\r\rclass \t\t\t__lowerCamelCase ( nn.Module ):\r\r '''simple docstring'''\r\r\r def __init__(\t\t\t\t\tself\t\t\t, __UpperCAmelCase\t\t\t\t\t\t\t)\t\t\t\t->\t\t\t\t\t\t\tUnion[str, Any]:\r super().__init__()\r\r _a\t\t\t\t\t= DiffusionAttnUnetaD(UpperCAmelCase__\t\t\t, n_attn_layers=4\t\t\t\t\t\t\t)\r _a\t\t\t\t\t= deepcopy(self.diffusion\t\t\t\t\t\t\t)\r _a\t\t\t\t\t= torch.quasirandom.SobolEngine(1\t\t\t, scramble=UpperCAmelCase__\t\t\t\t\t\t\t)\r\r\r\r\r\r\r\rdef \t\t\t\t\tA_ ( _lowerCAmelCase : Any\t\t\t\t):\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r _a\t\t\t\t\t= MODELS_MAP[model_name][\"\"\"url\"\"\"]\r os.system(f'wget {url} ./'\t\t\t\t)\r\r return f'./{model_name}.ckpt'\r\r\r__snake_case\t\t\t\t=\t\t\t\t{\r '''1''': '''resnets.0''',\r '''2''': '''attentions.0''',\r '''3''': '''resnets.1''',\r '''4''': '''attentions.1''',\r '''5''': '''resnets.2''',\r '''6''': '''attentions.2''',\r}\r__snake_case\t\t\t\t=\t\t\t\t{\r '''8''': '''resnets.0''',\r '''9''': '''attentions.0''',\r '''10''': '''resnets.1''',\r '''11''': '''attentions.1''',\r '''12''': '''resnets.2''',\r '''13''': '''attentions.2''',\r}\r__snake_case\t\t\t\t=\t\t\t\t{\r '''1''': '''resnets.0''',\r '''2''': '''attentions.0''',\r '''3''': '''resnets.1''',\r '''4''': '''attentions.1''',\r '''5''': '''resnets.2''',\r '''6''': '''attentions.2''',\r '''8''': '''resnets.3''',\r '''9''': '''attentions.3''',\r '''10''': '''resnets.4''',\r '''11''': '''attentions.4''',\r '''12''': '''resnets.5''',\r '''13''': '''attentions.5''',\r}\r__snake_case\t\t\t\t=\t\t\t\t{\r '''0''': '''resnets.0''',\r '''1''': '''resnets.1''',\r '''2''': '''resnets.2''',\r '''4''': '''resnets.0''',\r '''5''': '''resnets.1''',\r '''6''': '''resnets.2''',\r}\r\r__snake_case\t\t\t\t=\t\t\t\t{\r '''skip''': '''conv_skip''',\r '''main.0''': '''conv_1''',\r '''main.1''': '''group_norm_1''',\r '''main.3''': '''conv_2''',\r '''main.4''': '''group_norm_2''',\r}\r\r__snake_case\t\t\t\t=\t\t\t\t{\r '''norm''': '''group_norm''',\r '''qkv_proj''': ['''query''', '''key''', '''value'''],\r '''out_proj''': ['''proj_attn'''],\r}\rdef \t\t\t\t\tA_ ( _lowerCAmelCase : Union[str, Any]\t\t\t\t):\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r if name.startswith('''skip'''\t\t\t\t):\r return name.replace('''skip''', RES_CONV_MAP['''skip''']\t\t\t\t)\r\r # name has to be of format main.{digit}\r if not name.startswith('''main.'''\t\t\t\t):\r raise ValueError(f'ResConvBlock error with {name}'\t\t\t\t)\r\r return name.replace(name[:6], RES_CONV_MAP[name[:6]]\t\t\t\t)\rdef \t\t\t\t\tA_ ( _lowerCAmelCase : Dict\t\t\t\t):\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r for key, value in ATTN_MAP.items():\r if name.startswith(UpperCamelCase__\t\t\t\t) and not isinstance(UpperCamelCase__, UpperCamelCase__\t\t\t\t):\r return name.replace(UpperCamelCase__, UpperCamelCase__\t\t\t\t)\r elif name.startswith(UpperCamelCase__\t\t\t\t):\r return [name.replace(UpperCamelCase__, UpperCamelCase__\t\t\t\t) for v in value]\r raise ValueError(f'Attn error with {name}'\t\t\t\t)\rdef \t\t\t\t\tA_ ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Optional[int]=13\t\t\t\t):\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r _a\t\t\t\t\t= input_string\r\r if string.split('''.'''\t\t\t\t)[0] == \"timestep_embed\":\r return string.replace('''timestep_embed''', '''time_proj'''\t\t\t\t)\r\r _a\t\t\t\t\t= 0\r if string.startswith('''net.3.'''\t\t\t\t):\r depth += 1\r _a\t\t\t\t\t= string[6:]\r elif string.startswith('''net.'''\t\t\t\t):\r _a\t\t\t\t\t= string[4:]\r\r while string.startswith('''main.7.'''\t\t\t\t):\r depth += 1\r _a\t\t\t\t\t= string[7:]\r\r if string.startswith('''main.'''\t\t\t\t):\r _a\t\t\t\t\t= string[5:]\r\r # mid block\r if string[:2].isdigit():\r _a\t\t\t\t\t= string[:2]\r _a\t\t\t\t\t= string[2:]\r else:\r _a\t\t\t\t\t= string[0]\r _a\t\t\t\t\t= string[1:]\r\r if depth == max_depth:\r _a\t\t\t\t\t= MID_NUM_TO_LAYER[layer_num]\r _a\t\t\t\t\t= \"\"\"mid_block\"\"\"\r elif depth > 0 and int(UpperCamelCase__\t\t\t\t) < 7:\r _a\t\t\t\t\t= DOWN_NUM_TO_LAYER[layer_num]\r _a\t\t\t\t\t= f'down_blocks.{depth}'\r elif depth > 0 and int(UpperCamelCase__\t\t\t\t) > 7:\r _a\t\t\t\t\t= UP_NUM_TO_LAYER[layer_num]\r _a\t\t\t\t\t= f'up_blocks.{max_depth - depth - 1}'\r elif depth == 0:\r _a\t\t\t\t\t= DEPTH_0_TO_LAYER[layer_num]\r _a\t\t\t\t\t= f'up_blocks.{max_depth - 1}' if int(UpperCamelCase__\t\t\t\t) > 3 else \"\"\"down_blocks.0\"\"\"\r\r if not string_left.startswith('''.'''\t\t\t\t):\r raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.'\t\t\t\t)\r\r _a\t\t\t\t\t= string_left[1:]\r\r if \"resnets\" in new_layer:\r _a\t\t\t\t\t= convert_resconv_naming(UpperCamelCase__\t\t\t\t)\r elif \"attentions\" in new_layer:\r _a\t\t\t\t\t= convert_attn_naming(UpperCamelCase__\t\t\t\t)\r _a\t\t\t\t\t= new_string_left\r\r if not isinstance(UpperCamelCase__, UpperCamelCase__\t\t\t\t):\r _a\t\t\t\t\t= prefix + \"\"\".\"\"\" + new_layer + \"\"\".\"\"\" + string_left\r else:\r _a\t\t\t\t\t= [prefix + \"\"\".\"\"\" + new_layer + \"\"\".\"\"\" + s for s in string_left]\r return new_string\rdef \t\t\t\t\tA_ ( _lowerCAmelCase : str\t\t\t\t):\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r _a\t\t\t\t\t= {}\r for k, v in state_dict.items():\r if k.endswith('''kernel'''\t\t\t\t):\r # up- and downsample layers, don't have trainable weights\r continue\r\r _a\t\t\t\t\t= rename(UpperCamelCase__\t\t\t\t)\r\r # check if we need to transform from Conv => Linear for attention\r if isinstance(UpperCamelCase__, UpperCamelCase__\t\t\t\t):\r _a\t\t\t\t\t= transform_conv_attns(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__\t\t\t\t)\r else:\r _a\t\t\t\t\t= v\r\r return new_state_dict\rdef \t\t\t\t\tA_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[Any]\t\t\t\t):\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r if len(UpperCamelCase__\t\t\t\t) == 1:\r if len(v.shape\t\t\t\t) == 3:\r # weight\r _a\t\t\t\t\t= v[:, :, 0]\r else:\r # bias\r _a\t\t\t\t\t= v\r else:\r # qkv matrices\r _a\t\t\t\t\t= v.shape[0]\r _a\t\t\t\t\t= trippled_shape // 3\r for i in range(3\t\t\t\t):\r if len(v.shape\t\t\t\t) == 3:\r _a\t\t\t\t\t= v[i * single_shape : (i + 1) * single_shape, :, 0]\r else:\r _a\t\t\t\t\t= v[i * single_shape : (i + 1) * single_shape]\r return new_state_dict\rdef \t\t\t\t\tA_ ( _lowerCAmelCase : int\t\t\t\t):\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r _a\t\t\t\t\t= torch.device('''cuda''' if torch.cuda.is_available() else '''cpu'''\t\t\t\t)\r\r _a\t\t\t\t\t= args.model_path.split('''/'''\t\t\t\t)[-1].split('''.'''\t\t\t\t)[0]\r if not os.path.isfile(args.model_path\t\t\t\t):\r assert (\r model_name == args.model_path\r ), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'\r _a\t\t\t\t\t= download(UpperCamelCase__\t\t\t\t)\r\r _a\t\t\t\t\t= MODELS_MAP[model_name][\"\"\"sample_rate\"\"\"]\r _a\t\t\t\t\t= MODELS_MAP[model_name][\"\"\"sample_size\"\"\"]\r\r _a\t\t\t\t\t= Object()\r _a\t\t\t\t\t= sample_size\r _a\t\t\t\t\t= sample_rate\r _a\t\t\t\t\t= 0\r\r _a\t\t\t\t\t= UNetaDModel(sample_size=UpperCamelCase__, sample_rate=UpperCamelCase__\t\t\t\t)\r _a\t\t\t\t\t= diffusers_model.state_dict()\r\r _a\t\t\t\t\t= DiffusionUncond(UpperCamelCase__\t\t\t\t)\r orig_model.load_state_dict(torch.load(args.model_path, map_location=UpperCamelCase__\t\t\t\t)['''state_dict''']\t\t\t\t)\r _a\t\t\t\t\t= orig_model.diffusion_ema.eval()\r _a\t\t\t\t\t= orig_model.state_dict()\r _a\t\t\t\t\t= rename_orig_weights(UpperCamelCase__\t\t\t\t)\r\r _a\t\t\t\t\t= set(renamed_state_dict.keys()\t\t\t\t) - set(diffusers_state_dict.keys()\t\t\t\t)\r _a\t\t\t\t\t= set(diffusers_state_dict.keys()\t\t\t\t) - set(renamed_state_dict.keys()\t\t\t\t)\r\r assert len(UpperCamelCase__\t\t\t\t) == 0, f'Problem with {renamed_minus_diffusers}'\r assert all(k.endswith('''kernel'''\t\t\t\t) for k in list(UpperCamelCase__\t\t\t\t)\t\t\t\t), f'Problem with {diffusers_minus_renamed}'\r\r for key, value in renamed_state_dict.items():\r assert (\r diffusers_state_dict[key].squeeze().shape == value.squeeze().shape\r ), f'Shape for {key} doesn\\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'\r if key == \"time_proj.weight\":\r _a\t\t\t\t\t= value.squeeze()\r\r _a\t\t\t\t\t= value\r\r diffusers_model.load_state_dict(UpperCamelCase__\t\t\t\t)\r\r _a\t\t\t\t\t= 1_00\r _a\t\t\t\t\t= 33\r\r _a\t\t\t\t\t= IPNDMScheduler(num_train_timesteps=UpperCamelCase__\t\t\t\t)\r\r _a\t\t\t\t\t= torch.manual_seed(UpperCamelCase__\t\t\t\t)\r _a\t\t\t\t\t= torch.randn([1, 2, config.sample_size], generator=UpperCamelCase__\t\t\t\t).to(UpperCamelCase__\t\t\t\t)\r\r _a\t\t\t\t\t= torch.linspace(1, 0, steps + 1, device=UpperCamelCase__\t\t\t\t)[:-1]\r _a\t\t\t\t\t= get_crash_schedule(UpperCamelCase__\t\t\t\t)\r\r _a\t\t\t\t\t= DanceDiffusionPipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__\t\t\t\t)\r\r _a\t\t\t\t\t= torch.manual_seed(33\t\t\t\t)\r _a\t\t\t\t\t= pipe(num_inference_steps=UpperCamelCase__, generator=UpperCamelCase__\t\t\t\t).audios\r\r _a\t\t\t\t\t= sampling.iplms_sample(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, {}\t\t\t\t)\r _a\t\t\t\t\t= generated.clamp(-1, 1\t\t\t\t)\r\r _a\t\t\t\t\t= (generated - audio).abs().sum()\r _a\t\t\t\t\t= (generated - audio).abs().max()\r\r if args.save:\r pipe.save_pretrained(args.checkpoint_path\t\t\t\t)\r\r print('''Diff sum''', UpperCamelCase__\t\t\t\t)\r print('''Diff max''', UpperCamelCase__\t\t\t\t)\r\r assert diff_max < 1e-3, f'Diff max: {diff_max} is too much :-/'\r\r print(f'Conversion for {model_name} successful!'\t\t\t\t)\r\r\rif __name__ == \"__main__\":\r __snake_case\t\t\t\t=\t\t\t\targparse.ArgumentParser()\r\r parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')\r parser.add_argument(\r '''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''\r )\r parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')\r __snake_case\t\t\t\t=\t\t\t\tparser.parse_args()\r\r main(args)"},"code_codestyle":{"kind":"number","value":320,"string":"320"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\ndef \t\t\tlowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__\t\t\t\t):\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n # Check if the input is valid\r\n if not len(UpperCamelCase__\t\t\t\t) == len(UpperCamelCase__\t\t\t\t) == 3:\r\n raise ValueError(\"\"\"Please enter a valid equation.\"\"\"\t\t\t\t)\r\n if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:\r\n raise ValueError(\"\"\"Both a & b of two equations can't be zero.\"\"\"\t\t\t\t)\r\n\r\n # Extract the coefficients\r\n _a , _a , _a : Any =\tequationa\r\n _a , _a , _a : Tuple =\tequationa\r\n\r\n # Calculate the determinants of the matrices\r\n _a : int =\taa * ba - aa * ba\r\n _a : str =\tca * ba - ca * ba\r\n _a : str =\taa * ca - aa * ca\r\n\r\n # Check if the system of linear equations has a solution (using Cramer's rule)\r\n if determinant == 0:\r\n if determinant_x == determinant_y == 0:\r\n raise ValueError(\"\"\"Infinite solutions. (Consistent system)\"\"\"\t\t\t\t)\r\n else:\r\n raise ValueError(\"\"\"No solution. (Inconsistent system)\"\"\"\t\t\t\t)\r\n else:\r\n if determinant_x == determinant_y == 0:\r\n # Trivial solution (Inconsistent system)\r\n return (0.0, 0.0)\r\n else:\r\n _a : Dict =\tdeterminant_x / determinant\r\n _a : str =\tdeterminant_y / determinant\r\n # Non-Trivial Solution (Consistent system)\r\n return (x, y)\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":294,"string":"294"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":713,"cells":{"code":{"kind":"string","value":"\n\n\n'''simple docstring'''\n\n\n\nimport shutil\nimport tempfile\nimport unittest\n\nfrom transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast\nfrom transformers.testing_utils import require_sentencepiece, require_torchaudio\n\nfrom .test_feature_extraction_clap import floats_list\n\n\n\n\n\n@require_torchaudio\n@require_sentencepiece\nclass \t\t\t\t\t\t\t__UpperCamelCase (\t\tunittest.TestCase ):\n\n\n\n\n\tdef \t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a :\tOptional[Any] = 'laion/clap-htsat-unfused'\n\t\t\t\t\t\t__a :\tOptional[Any] = tempfile.mkdtemp()\n\n\n\n\n\tdef \t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t, **__a\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\treturn RobertaTokenizer.from_pretrained(self.checkpoint\t\t\t\t\t\t, **__a\t\t\t\t\t\t\t)\n\n\n\n\n\tdef \t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t, **__a\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\treturn ClapFeatureExtractor.from_pretrained(self.checkpoint\t\t\t\t\t\t, **__a\t\t\t\t\t\t\t)\n\n\n\n\n\tdef \t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\tshutil.rmtree(self.tmpdirname\t\t\t\t\t\t\t)\n\n\n\n\n\tdef \t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a :\tAny = self.get_tokenizer()\n\t\t\t\t\t\t__a :\tList[str] = self.get_feature_extractor()\n\n\t\t\t\t\t\t__a :\tAny = ClapProcessor(tokenizer=__a\t\t\t\t\t\t, feature_extractor=__a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\tprocessor.save_pretrained(self.tmpdirname\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a :\tTuple = ClapProcessor.from_pretrained(self.tmpdirname\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\tself.assertEqual(processor.tokenizer.get_vocab()\t\t\t\t\t\t, tokenizer.get_vocab()\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertIsInstance(processor.tokenizer\t\t\t\t\t\t, __a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\tself.assertEqual(processor.feature_extractor.to_json_string()\t\t\t\t\t\t, feature_extractor.to_json_string()\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertIsInstance(processor.feature_extractor\t\t\t\t\t\t, __a\t\t\t\t\t\t\t)\n\n\n\n\n\tdef \t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a :\tstr = ClapProcessor(tokenizer=self.get_tokenizer()\t\t\t\t\t\t, feature_extractor=self.get_feature_extractor()\t\t\t\t\t\t\t)\n\t\t\t\t\t\tprocessor.save_pretrained(self.tmpdirname\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t__a :\tint = self.get_tokenizer(bos_token='(BOS)'\t\t\t\t\t\t, eos_token='(EOS)'\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a :\tList[str] = self.get_feature_extractor(do_normalize=__a\t\t\t\t\t\t, padding_value=1.0\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t__a :\tTuple = ClapProcessor.from_pretrained(\n\t\t\t\t\t\t self.tmpdirname\t\t\t\t\t\t, bos_token='(BOS)'\t\t\t\t\t\t, eos_token='(EOS)'\t\t\t\t\t\t, do_normalize=__a\t\t\t\t\t\t, padding_value=1.0\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\tself.assertEqual(processor.tokenizer.get_vocab()\t\t\t\t\t\t, tokenizer_add_kwargs.get_vocab()\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertIsInstance(processor.tokenizer\t\t\t\t\t\t, __a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\tself.assertEqual(processor.feature_extractor.to_json_string()\t\t\t\t\t\t, feature_extractor_add_kwargs.to_json_string()\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertIsInstance(processor.feature_extractor\t\t\t\t\t\t, __a\t\t\t\t\t\t\t)\n\n\n\n\n\tdef \t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a :\tstr = self.get_feature_extractor()\n\t\t\t\t\t\t__a :\tint = self.get_tokenizer()\n\n\t\t\t\t\t\t__a :\tstr = ClapProcessor(tokenizer=__a\t\t\t\t\t\t, feature_extractor=__a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t__a :\tint = floats_list((3, 1000)\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t__a :\tstr = feature_extractor(__a\t\t\t\t\t\t, return_tensors='np'\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a :\tint = processor(audios=__a\t\t\t\t\t\t, return_tensors='np'\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\tfor key in input_feat_extract.keys():\n\t\t\t\t\t\t\t\t\t\t\tself.assertAlmostEqual(input_feat_extract[key].sum()\t\t\t\t\t\t, input_processor[key].sum()\t\t\t\t\t\t, delta=1E-2\t\t\t\t\t\t\t)\n\n\n\n\n\tdef \t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a :\tUnion[str, Any] = self.get_feature_extractor()\n\t\t\t\t\t\t__a :\tAny = self.get_tokenizer()\n\n\t\t\t\t\t\t__a :\tAny = ClapProcessor(tokenizer=__a\t\t\t\t\t\t, feature_extractor=__a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t__a :\tUnion[str, Any] = 'This is a test string'\n\n\t\t\t\t\t\t__a :\tUnion[str, Any] = processor(text=__a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t__a :\tTuple = tokenizer(__a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\tfor key in encoded_tok.keys():\n\t\t\t\t\t\t\t\t\t\t\tself.assertListEqual(encoded_tok[key]\t\t\t\t\t\t, encoded_processor[key]\t\t\t\t\t\t\t)\n\n\n\n\n\tdef \t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a :\tstr = self.get_feature_extractor()\n\t\t\t\t\t\t__a :\tstr = self.get_tokenizer()\n\n\t\t\t\t\t\t__a :\tList[str] = ClapProcessor(tokenizer=__a\t\t\t\t\t\t, feature_extractor=__a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t__a :\tDict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]\n\n\t\t\t\t\t\t__a :\tOptional[int] = processor.batch_decode(__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a :\tOptional[Any] = tokenizer.batch_decode(__a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\tself.assertListEqual(__a\t\t\t\t\t\t, __a\t\t\t\t\t\t\t)\n\n\n\n\n\n\tdef \t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a :\tOptional[Any] = self.get_feature_extractor()\n\t\t\t\t\t\t__a :\tOptional[int] = self.get_tokenizer()\n\n\t\t\t\t\t\t__a :\tint = ClapProcessor(tokenizer=__a\t\t\t\t\t\t, feature_extractor=__a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\tself.assertListEqual(\n\t\t\t\t\t\t processor.model_input_names[2:]\t\t\t\t\t\t, feature_extractor.model_input_names\t\t\t\t\t\t, msg='`processor` and `feature_extractor` model input names do not match'\t\t\t\t\t\t, )\n\n\n\n"},"code_codestyle":{"kind":"number","value":294,"string":"294"},"style_context":{"kind":"string","value":"\n\n\n'''simple docstring'''\n\n\n\nimport json\nimport os\nimport re\nimport unittest\n\nfrom transformers import CodeGenTokenizer, CodeGenTokenizerFast\nfrom transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES\nfrom transformers.testing_utils import require_tokenizers, slow\n\nfrom ...test_tokenization_common import TokenizerTesterMixin\n\n\n\n\n\n@require_tokenizers\nclass \t\t\t\t\t\t\t__UpperCamelCase (\t\tlowerCAmelCase_\t\t,\t\t\t\tunittest.TestCase ):\n\tA_ = CodeGenTokenizer\n\tA_ = CodeGenTokenizerFast\n\tA_ = True\n\tA_ = {\"add_prefix_space\": True}\n\tA_ = False\n\n\n\n\n\tdef \t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\tsuper().setUp()\n\n\t\t\t\t\t\t# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt\n\t\t\t\t\t\t__a :\tTuple = [\n\t\t\t\t\t\t 'l',\n\t\t\t\t\t\t 'o',\n\t\t\t\t\t\t 'w',\n\t\t\t\t\t\t 'e',\n\t\t\t\t\t\t 'r',\n\t\t\t\t\t\t 's',\n\t\t\t\t\t\t 't',\n\t\t\t\t\t\t 'i',\n\t\t\t\t\t\t 'd',\n\t\t\t\t\t\t 'n',\n\t\t\t\t\t\t '\\u0120',\n\t\t\t\t\t\t '\\u0120l',\n\t\t\t\t\t\t '\\u0120n',\n\t\t\t\t\t\t '\\u0120lo',\n\t\t\t\t\t\t '\\u0120low',\n\t\t\t\t\t\t 'er',\n\t\t\t\t\t\t '\\u0120lowest',\n\t\t\t\t\t\t '\\u0120newer',\n\t\t\t\t\t\t '\\u0120wider',\n\t\t\t\t\t\t '',\n\t\t\t\t\t\t '<|endoftext|>',\n\t\t\t\t\t\t]\n\t\t\t\t\t\t__a :\tUnion[str, Any] = dict(zip(__a\t\t\t\t\t\t, range(len(__a\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a :\tTuple = ['#version: 0.2', '\\u0120 l', '\\u0120l o', '\\u0120lo w', 'e r', '']\n\t\t\t\t\t\t__a :\tDict = {'unk_token': ''}\n\n\t\t\t\t\t\t__a :\tList[Any] = os.path.join(self.tmpdirname\t\t\t\t\t\t, VOCAB_FILES_NAMES['vocab_file']\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a :\tList[str] = os.path.join(self.tmpdirname\t\t\t\t\t\t, VOCAB_FILES_NAMES['merges_file']\t\t\t\t\t\t\t)\n\t\t\t\t\t\twith open(self.vocab_file\t\t\t\t\t\t, 'w'\t\t\t\t\t\t, encoding='utf-8'\t\t\t\t\t\t\t) as fp:\n\t\t\t\t\t\t\t\t\t\t\tfp.write(json.dumps(__a\t\t\t\t\t\t\t) + '\\n'\t\t\t\t\t\t\t)\n\t\t\t\t\t\twith open(self.merges_file\t\t\t\t\t\t, 'w'\t\t\t\t\t\t, encoding='utf-8'\t\t\t\t\t\t\t) as fp:\n\t\t\t\t\t\t\t\t\t\t\tfp.write('\\n'.join(__a\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\n\n\n\n\n\tdef \t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t, **__a\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\tkwargs.update(self.special_tokens_map\t\t\t\t\t\t\t)\n\t\t\t\t\t\treturn CodeGenTokenizer.from_pretrained(self.tmpdirname\t\t\t\t\t\t, **__a\t\t\t\t\t\t\t)\n\n\n\n\n\tdef \t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t, **__a\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\tkwargs.update(self.special_tokens_map\t\t\t\t\t\t\t)\n\t\t\t\t\t\treturn CodeGenTokenizerFast.from_pretrained(self.tmpdirname\t\t\t\t\t\t, **__a\t\t\t\t\t\t\t)\n\n\n\n\n\tdef \t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t, __a\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a :\tTuple = 'lower newer'\n\t\t\t\t\t\t__a :\tTuple = 'lower newer'\n\t\t\t\t\t\treturn input_text, output_text\n\n\n\n\n\tdef \t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a :\tList[Any] = CodeGenTokenizer(self.vocab_file\t\t\t\t\t\t, self.merges_file\t\t\t\t\t\t, **self.special_tokens_map\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a :\tstr = 'lower newer'\n\t\t\t\t\t\t__a :\tTuple = ['\\u0120low', 'er', '\\u0120', 'n', 'e', 'w', 'er']\n\t\t\t\t\t\t__a :\tDict = tokenizer.tokenize(__a\t\t\t\t\t\t, add_prefix_space=__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertListEqual(__a\t\t\t\t\t\t, __a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t__a :\tList[str] = tokens + [tokenizer.unk_token]\n\t\t\t\t\t\t__a :\tAny = [14, 15, 10, 9, 3, 2, 15, 19]\n\t\t\t\t\t\tself.assertListEqual(tokenizer.convert_tokens_to_ids(__a\t\t\t\t\t\t\t)\t\t\t\t\t\t, __a\t\t\t\t\t\t\t)\n\n\n\n\n\tdef \t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\tif not self.test_rust_tokenizer:\n\t\t\t\t\t\t\t\t\t\t\treturn\n\n\t\t\t\t\t\t__a :\tList[Any] = self.get_tokenizer()\n\t\t\t\t\t\t__a :\tList[str] = self.get_rust_tokenizer(add_prefix_space=__a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t__a :\tAny = 'lower newer'\n\n\t\t\t\t\t\t# Testing tokenization\n\t\t\t\t\t\t__a :\tDict = tokenizer.tokenize(__a\t\t\t\t\t\t, add_prefix_space=__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a :\tDict = rust_tokenizer.tokenize(__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertListEqual(__a\t\t\t\t\t\t, __a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t# Testing conversion to ids without special tokens\n\t\t\t\t\t\t__a :\tint = tokenizer.encode(__a\t\t\t\t\t\t, add_special_tokens=__a\t\t\t\t\t\t, add_prefix_space=__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a :\tTuple = rust_tokenizer.encode(__a\t\t\t\t\t\t, add_special_tokens=__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertListEqual(__a\t\t\t\t\t\t, __a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t# Testing conversion to ids with special tokens\n\t\t\t\t\t\t__a :\tTuple = self.get_rust_tokenizer(add_prefix_space=__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a :\tUnion[str, Any] = tokenizer.encode(__a\t\t\t\t\t\t, add_prefix_space=__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a :\tint = rust_tokenizer.encode(__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertListEqual(__a\t\t\t\t\t\t, __a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t# Testing the unknown token\n\t\t\t\t\t\t__a :\tAny = tokens + [rust_tokenizer.unk_token]\n\t\t\t\t\t\t__a :\tTuple = [14, 15, 10, 9, 3, 2, 15, 19]\n\t\t\t\t\t\tself.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a\t\t\t\t\t\t\t)\t\t\t\t\t\t, __a\t\t\t\t\t\t\t)\n\n\n\n\n\tdef \t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t, *__a\t\t\t\t\t\t, **__a\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\tpass\n\n\n\n\n\tdef \t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t, __a=15\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\tfor tokenizer, pretrained_name, kwargs in self.tokenizers_list:\n\t\t\t\t\t\t\t\t\t\t\twith self.subTest(f\"\"\"{tokenizer.__class__.__name__} ({pretrained_name})\"\"\"\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a :\tOptional[int] = self.rust_tokenizer_class.from_pretrained(__a\t\t\t\t\t\t, **__a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Simple input\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a :\tList[Any] = 'This is a simple input'\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a :\tTuple = ['This is a simple input 1', 'This is a simple input 2']\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a :\tTuple = ('This is a simple input', 'This is a pair')\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a :\tstr = [\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ('This is a simple input 1', 'This is a simple input 2'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ('This is a simple pair 1', 'This is a simple pair 2'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t]\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Simple input tests\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertRaises(__a\t\t\t\t\t\t, tokenizer_r.encode\t\t\t\t\t\t, __a\t\t\t\t\t\t, max_length=__a\t\t\t\t\t\t, padding='max_length'\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Simple input\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertRaises(__a\t\t\t\t\t\t, tokenizer_r.encode_plus\t\t\t\t\t\t, __a\t\t\t\t\t\t, max_length=__a\t\t\t\t\t\t, padding='max_length'\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Simple input\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertRaises(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t __a\t\t\t\t\t\t, tokenizer_r.batch_encode_plus\t\t\t\t\t\t, __a\t\t\t\t\t\t, max_length=__a\t\t\t\t\t\t, padding='max_length'\t\t\t\t\t\t, )\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Pair input\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertRaises(__a\t\t\t\t\t\t, tokenizer_r.encode\t\t\t\t\t\t, __a\t\t\t\t\t\t, max_length=__a\t\t\t\t\t\t, padding='max_length'\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Pair input\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertRaises(__a\t\t\t\t\t\t, tokenizer_r.encode_plus\t\t\t\t\t\t, __a\t\t\t\t\t\t, max_length=__a\t\t\t\t\t\t, padding='max_length'\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Pair input\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertRaises(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t __a\t\t\t\t\t\t, tokenizer_r.batch_encode_plus\t\t\t\t\t\t, __a\t\t\t\t\t\t, max_length=__a\t\t\t\t\t\t, padding='max_length'\t\t\t\t\t\t, )\n\n\n\n\n\tdef \t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a :\tList[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname\t\t\t\t\t\t, pad_token=''\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t# Simple input\n\t\t\t\t\t\t__a :\tstr = 'This is a simple input'\n\t\t\t\t\t\t__a :\tAny = ['This is a simple input looooooooong', 'This is a simple input']\n\t\t\t\t\t\t__a :\tOptional[int] = ('This is a simple input', 'This is a pair')\n\t\t\t\t\t\t__a :\tOptional[Any] = [\n\t\t\t\t\t\t ('This is a simple input loooooong', 'This is a simple input'),\n\t\t\t\t\t\t ('This is a simple pair loooooong', 'This is a simple pair'),\n\t\t\t\t\t\t]\n\n\t\t\t\t\t\t__a :\tint = tokenizer.pad_token_id\n\n\t\t\t\t\t\t__a :\tList[Any] = tokenizer(__a\t\t\t\t\t\t, padding='max_length'\t\t\t\t\t\t, max_length=30\t\t\t\t\t\t, return_tensors='np'\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a :\tUnion[str, Any] = tokenizer(__a\t\t\t\t\t\t, padding=__a\t\t\t\t\t\t, truncate=__a\t\t\t\t\t\t, return_tensors='np'\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a :\tOptional[Any] = tokenizer(*__a\t\t\t\t\t\t, padding='max_length'\t\t\t\t\t\t, max_length=60\t\t\t\t\t\t, return_tensors='np'\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a :\tList[Any] = tokenizer(__a\t\t\t\t\t\t, padding=__a\t\t\t\t\t\t, truncate=__a\t\t\t\t\t\t, return_tensors='np'\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t# s\n\t\t\t\t\t\t# test single string max_length padding\n\t\t\t\t\t\tself.assertEqual(out_s['input_ids'].shape[-1]\t\t\t\t\t\t, 30\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertTrue(pad_token_id in out_s['input_ids']\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertTrue(0 in out_s['attention_mask']\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t# s2\n\t\t\t\t\t\t# test automatic padding\n\t\t\t\t\t\tself.assertEqual(out_sa['input_ids'].shape[-1]\t\t\t\t\t\t, 33\t\t\t\t\t\t\t)\n\t\t\t\t\t\t# long slice doesn't have padding\n\t\t\t\t\t\tself.assertFalse(pad_token_id in out_sa['input_ids'][0]\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertFalse(0 in out_sa['attention_mask'][0]\t\t\t\t\t\t\t)\n\t\t\t\t\t\t# short slice does have padding\n\t\t\t\t\t\tself.assertTrue(pad_token_id in out_sa['input_ids'][1]\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertTrue(0 in out_sa['attention_mask'][1]\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t# p\n\t\t\t\t\t\t# test single pair max_length padding\n\t\t\t\t\t\tself.assertEqual(out_p['input_ids'].shape[-1]\t\t\t\t\t\t, 60\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertTrue(pad_token_id in out_p['input_ids']\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertTrue(0 in out_p['attention_mask']\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t# p2\n\t\t\t\t\t\t# test automatic padding pair\n\t\t\t\t\t\tself.assertEqual(out_pa['input_ids'].shape[-1]\t\t\t\t\t\t, 52\t\t\t\t\t\t\t)\n\t\t\t\t\t\t# long slice pair doesn't have padding\n\t\t\t\t\t\tself.assertFalse(pad_token_id in out_pa['input_ids'][0]\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertFalse(0 in out_pa['attention_mask'][0]\t\t\t\t\t\t\t)\n\t\t\t\t\t\t# short slice pair does have padding\n\t\t\t\t\t\tself.assertTrue(pad_token_id in out_pa['input_ids'][1]\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertTrue(0 in out_pa['attention_mask'][1]\t\t\t\t\t\t\t)\n\n\n\n\n\tdef \t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a :\tOptional[int] = '$$$'\n\t\t\t\t\t\t__a :\tList[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname\t\t\t\t\t\t, bos_token=__a\t\t\t\t\t\t, add_bos_token=__a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t__a :\tUnion[str, Any] = 'This is a simple input'\n\t\t\t\t\t\t__a :\tList[Any] = ['This is a simple input 1', 'This is a simple input 2']\n\n\t\t\t\t\t\t__a :\tList[Any] = tokenizer.bos_token_id\n\n\t\t\t\t\t\t__a :\tList[str] = tokenizer(__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a :\tOptional[Any] = tokenizer(__a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\tself.assertEqual(out_s.input_ids[0]\t\t\t\t\t\t, __a\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t__a :\tAny = tokenizer.decode(out_s.input_ids\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a :\tUnion[str, Any] = tokenizer.batch_decode(out_sa.input_ids\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\tself.assertEqual(decode_s.split()[0]\t\t\t\t\t\t, __a\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertTrue(all(d.split()[0] == bos_token for d in decode_sa\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\n\n\n\n\n\t@slow\n\tdef \t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a :\tAny = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono'\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t__a :\tOptional[int] = '\\nif len_a > len_b:\\n result = a\\nelse:\\n result = b\\n\\n\\n\\n#'\n\t\t\t\t\t\t__a :\tTuple = '\\nif len_a > len_b: result = a\\nelse: result = b'\n\n\t\t\t\t\t\t__a :\tOptional[int] = tokenizer.encode(__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a :\tUnion[str, Any] = ['^#', re.escape('<|endoftext|>'\t\t\t\t\t\t\t), '^\\'\\'\\'', '^\"\"\"', '\\n\\n\\n']\n\t\t\t\t\t\t__a :\tTuple = tokenizer.decode(__a\t\t\t\t\t\t, truncate_before_pattern=__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertEqual(__a\t\t\t\t\t\t, __a\t\t\t\t\t\t\t)\n\n\n\n\n\n\tdef \t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\tpass\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":294,"string":"294"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":714,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\nimport random\r\nimport unittest\r\n\r\nfrom transformers import TransfoXLConfig, is_tf_available\r\nfrom transformers.testing_utils import require_tf, slow\r\n\r\nfrom ...test_configuration_common import ConfigTester\r\nfrom ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor\r\nfrom ...test_pipeline_mixin import PipelineTesterMixin\r\n\r\n\r\nif is_tf_available():\r\n import tensorflow as tf\r\n\r\n from transformers import (\r\n TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n TFTransfoXLForSequenceClassification,\r\n TFTransfoXLLMHeadModel,\r\n TFTransfoXLModel,\r\n )\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\t\t_A :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n def __init__(\t\t\t\t\tself\t\t\t\t: str\t\t\t\t, __UpperCAmelCase\t\t\t\t: Optional[Any]\t\t\t\t, ):\r\n a\t\t\t\t\t\t:\t\tList[str] \t\t\t\t\t\t\t=\t\t\tparent\r\n a\t\t\t\t\t\t:\t\tOptional[Any] \t\t\t\t\t\t\t=\t\t\t13\r\n a\t\t\t\t\t\t:\t\tDict \t\t\t\t\t\t\t=\t\t\t7\r\n a\t\t\t\t\t\t:\t\tDict \t\t\t\t\t\t\t=\t\t\t30\r\n a\t\t\t\t\t\t:\t\tOptional[int] \t\t\t\t\t\t\t=\t\t\tself.seq_length + self.mem_len\r\n a\t\t\t\t\t\t:\t\tOptional[int] \t\t\t\t\t\t\t=\t\t\t15\r\n a\t\t\t\t\t\t:\t\tDict \t\t\t\t\t\t\t=\t\t\tTrue\r\n a\t\t\t\t\t\t:\t\tList[str] \t\t\t\t\t\t\t=\t\t\tTrue\r\n a\t\t\t\t\t\t:\t\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\t99\r\n a\t\t\t\t\t\t:\t\tAny \t\t\t\t\t\t\t=\t\t\t[10, 50, 80]\r\n a\t\t\t\t\t\t:\t\tDict \t\t\t\t\t\t\t=\t\t\t32\r\n a\t\t\t\t\t\t:\t\tDict \t\t\t\t\t\t\t=\t\t\t32\r\n a\t\t\t\t\t\t:\t\tOptional[int] \t\t\t\t\t\t\t=\t\t\t4\r\n a\t\t\t\t\t\t:\t\tList[Any] \t\t\t\t\t\t\t=\t\t\t8\r\n a\t\t\t\t\t\t:\t\tList[Any] \t\t\t\t\t\t\t=\t\t\t128\r\n a\t\t\t\t\t\t:\t\tOptional[int] \t\t\t\t\t\t\t=\t\t\t2\r\n a\t\t\t\t\t\t:\t\tList[Any] \t\t\t\t\t\t\t=\t\t\t2\r\n a\t\t\t\t\t\t:\t\tTuple \t\t\t\t\t\t\t=\t\t\tNone\r\n a\t\t\t\t\t\t:\t\tstr \t\t\t\t\t\t\t=\t\t\t1\r\n a\t\t\t\t\t\t:\t\tDict \t\t\t\t\t\t\t=\t\t\t0\r\n a\t\t\t\t\t\t:\t\tAny \t\t\t\t\t\t\t=\t\t\t3\r\n a\t\t\t\t\t\t:\t\tAny \t\t\t\t\t\t\t=\t\t\tself.vocab_size - 1\r\n a\t\t\t\t\t\t:\t\tOptional[int] \t\t\t\t\t\t\t=\t\t\t0.01\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: Union[str, Any]):\r\n a\t\t\t\t\t\t:\t\tList[Any] \t\t\t\t\t\t\t=\t\t\tids_tensor([self.batch_size, self.seq_length]\t\t\t\t, self.vocab_size)\r\n a\t\t\t\t\t\t:\t\tList[Any] \t\t\t\t\t\t\t=\t\t\tids_tensor([self.batch_size, self.seq_length]\t\t\t\t, self.vocab_size)\r\n\r\n a\t\t\t\t\t\t:\t\tAny \t\t\t\t\t\t\t=\t\t\tNone\r\n if self.use_labels:\r\n a\t\t\t\t\t\t:\t\tstr \t\t\t\t\t\t\t=\t\t\tids_tensor([self.batch_size, self.seq_length]\t\t\t\t, self.vocab_size)\r\n\r\n a\t\t\t\t\t\t:\t\tTuple \t\t\t\t\t\t\t=\t\t\tTransfoXLConfig(\r\n vocab_size=self.vocab_size\t\t\t\t, mem_len=self.mem_len\t\t\t\t, clamp_len=self.clamp_len\t\t\t\t, cutoffs=self.cutoffs\t\t\t\t, d_model=self.hidden_size\t\t\t\t, d_embed=self.d_embed\t\t\t\t, n_head=self.num_attention_heads\t\t\t\t, d_head=self.d_head\t\t\t\t, d_inner=self.d_inner\t\t\t\t, div_val=self.div_val\t\t\t\t, n_layer=self.num_hidden_layers\t\t\t\t, eos_token_id=self.eos_token_id\t\t\t\t, pad_token_id=self.vocab_size - 1\t\t\t\t, init_range=self.init_range\t\t\t\t, num_labels=self.num_labels\t\t\t\t, )\r\n\r\n return (config, input_ids_a, input_ids_a, lm_labels)\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: List[str]):\r\n random.seed(self.seed)\r\n tf.random.set_seed(self.seed)\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: List[str]\t\t\t\t, __UpperCAmelCase\t\t\t\t: int\t\t\t\t, __UpperCAmelCase\t\t\t\t: Optional[Any]\t\t\t\t, __UpperCAmelCase\t\t\t\t: Any\t\t\t\t, __UpperCAmelCase\t\t\t\t: int):\r\n a\t\t\t\t\t\t:\t\tList[str] \t\t\t\t\t\t\t=\t\t\tTFTransfoXLModel(__UpperCAmelCase)\r\n\r\n a\t\t\t\t\t\t\t,\t\ta\t\t\t\t\t\t:\t\tList[str] \t\t\t\t\t\t\t=\t\t\tmodel(__UpperCAmelCase).to_tuple()\r\n\r\n a\t\t\t\t\t\t:\t\tOptional[Any] \t\t\t\t\t\t\t=\t\t\t{\"input_ids\": input_ids_a, \"mems\": mems_a}\r\n\r\n a\t\t\t\t\t\t\t,\t\ta\t\t\t\t\t\t:\t\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\tmodel(__UpperCAmelCase).to_tuple()\r\n\r\n self.parent.assertEqual(hidden_states_a.shape\t\t\t\t, (self.batch_size, self.seq_length, self.hidden_size))\r\n self.parent.assertEqual(hidden_states_a.shape\t\t\t\t, (self.batch_size, self.seq_length, self.hidden_size))\r\n self.parent.assertListEqual(\r\n [mem.shape for mem in mems_a]\t\t\t\t, [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers\t\t\t\t, )\r\n self.parent.assertListEqual(\r\n [mem.shape for mem in mems_a]\t\t\t\t, [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers\t\t\t\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: int\t\t\t\t, __UpperCAmelCase\t\t\t\t: str\t\t\t\t, __UpperCAmelCase\t\t\t\t: str\t\t\t\t, __UpperCAmelCase\t\t\t\t: int\t\t\t\t, __UpperCAmelCase\t\t\t\t: Union[str, Any]):\r\n a\t\t\t\t\t\t:\t\tList[Any] \t\t\t\t\t\t\t=\t\t\tTFTransfoXLLMHeadModel(__UpperCAmelCase)\r\n\r\n a\t\t\t\t\t\t\t,\t\ta\t\t\t\t\t\t:\t\tOptional[Any] \t\t\t\t\t\t\t=\t\t\tmodel(__UpperCAmelCase).to_tuple()\r\n\r\n a\t\t\t\t\t\t:\t\tOptional[int] \t\t\t\t\t\t\t=\t\t\t{\"input_ids\": input_ids_a, \"labels\": lm_labels}\r\n a\t\t\t\t\t\t\t,\t\ta\t\t\t\t\t\t:\t\tOptional[int] \t\t\t\t\t\t\t=\t\t\tmodel(__UpperCAmelCase).to_tuple()\r\n\r\n a\t\t\t\t\t\t\t,\t\ta\t\t\t\t\t\t:\t\tOptional[int] \t\t\t\t\t\t\t=\t\t\tmodel([input_ids_a, mems_a]).to_tuple()\r\n\r\n a\t\t\t\t\t\t:\t\tTuple \t\t\t\t\t\t\t=\t\t\t{\"input_ids\": input_ids_a, \"mems\": mems_a, \"labels\": lm_labels}\r\n\r\n a\t\t\t\t\t\t\t,\t\ta\t\t\t\t\t\t:\t\tList[str] \t\t\t\t\t\t\t=\t\t\tmodel(__UpperCAmelCase).to_tuple()\r\n\r\n self.parent.assertEqual(lm_logits_a.shape\t\t\t\t, (self.batch_size, self.seq_length, self.vocab_size))\r\n self.parent.assertListEqual(\r\n [mem.shape for mem in mems_a]\t\t\t\t, [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers\t\t\t\t, )\r\n\r\n self.parent.assertEqual(lm_logits_a.shape\t\t\t\t, (self.batch_size, self.seq_length, self.vocab_size))\r\n self.parent.assertListEqual(\r\n [mem.shape for mem in mems_a]\t\t\t\t, [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers\t\t\t\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: List[Any]\t\t\t\t, __UpperCAmelCase\t\t\t\t: Optional[int]\t\t\t\t, __UpperCAmelCase\t\t\t\t: Optional[Any]\t\t\t\t, __UpperCAmelCase\t\t\t\t: Union[str, Any]\t\t\t\t, __UpperCAmelCase\t\t\t\t: List[str]):\r\n a\t\t\t\t\t\t:\t\tDict \t\t\t\t\t\t\t=\t\t\tTFTransfoXLForSequenceClassification(__UpperCAmelCase)\r\n a\t\t\t\t\t\t:\t\tint \t\t\t\t\t\t\t=\t\t\tmodel(__UpperCAmelCase)\r\n self.parent.assertEqual(result.logits.shape\t\t\t\t, (self.batch_size, self.num_labels))\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: Union[str, Any]):\r\n a\t\t\t\t\t\t:\t\tOptional[Any] \t\t\t\t\t\t\t=\t\t\tself.prepare_config_and_inputs()\r\n ((a)\t\t\t\t\t\t\t,\t\t(a)\t\t\t\t\t\t\t,\t\t(a)\t\t\t\t\t\t\t,\t\t(a))\t\t\t\t\t\t:\t\tList[Any] \t\t\t\t\t\t\t=\t\t\tconfig_and_inputs\r\n a\t\t\t\t\t\t:\t\tList[str] \t\t\t\t\t\t\t=\t\t\t{\"input_ids\": input_ids_a}\r\n return config, inputs_dict\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_tf\r\nclass \t\t\t\t\t\t_A ( _a ,_a ,unittest.TestCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n UpperCAmelCase :\t\t\t\t\t\t\tAny\t= (\r\n (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()\r\n )\r\n UpperCAmelCase :\t\t\t\t\t\t\tList[Any]\t= () if is_tf_available() else ()\r\n UpperCAmelCase :\t\t\t\t\t\t\tTuple\t= (\r\n {\r\n \"\"\"feature-extraction\"\"\": TFTransfoXLModel,\r\n \"\"\"text-classification\"\"\": TFTransfoXLForSequenceClassification,\r\n \"\"\"text-generation\"\"\": TFTransfoXLLMHeadModel,\r\n \"\"\"zero-shot\"\"\": TFTransfoXLForSequenceClassification,\r\n }\r\n if is_tf_available()\r\n else {}\r\n )\r\n # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented\r\n UpperCAmelCase :\t\t\t\t\t\t\tAny\t= False\r\n UpperCAmelCase :\t\t\t\t\t\t\tint\t= False\r\n UpperCAmelCase :\t\t\t\t\t\t\tDict\t= False\r\n UpperCAmelCase :\t\t\t\t\t\t\tAny\t= False\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: List[str]\t\t\t\t, __UpperCAmelCase\t\t\t\t: List[Any]\t\t\t\t, __UpperCAmelCase\t\t\t\t: str\t\t\t\t, __UpperCAmelCase\t\t\t\t: Union[str, Any]\t\t\t\t, __UpperCAmelCase\t\t\t\t: str\t\t\t\t, __UpperCAmelCase\t\t\t\t: Optional[int]):\r\n if pipeline_test_casse_name == \"TextGenerationPipelineTests\":\r\n # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.\r\n # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple\r\n # tokenizer.\r\n return True\r\n\r\n return False\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: Any):\r\n a\t\t\t\t\t\t:\t\tAny \t\t\t\t\t\t\t=\t\t\tTFTransfoXLModelTester(self)\r\n a\t\t\t\t\t\t:\t\tAny \t\t\t\t\t\t\t=\t\t\tConfigTester(self\t\t\t\t, config_class=__UpperCAmelCase\t\t\t\t, d_embed=37)\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: int):\r\n self.config_tester.run_common_tests()\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: int):\r\n self.model_tester.set_seed()\r\n a\t\t\t\t\t\t:\t\tstr \t\t\t\t\t\t\t=\t\t\tself.model_tester.prepare_config_and_inputs()\r\n self.model_tester.create_and_check_transfo_xl_model(*__UpperCAmelCase)\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: str):\r\n self.model_tester.set_seed()\r\n a\t\t\t\t\t\t:\t\tTuple \t\t\t\t\t\t\t=\t\t\tself.model_tester.prepare_config_and_inputs()\r\n self.model_tester.create_and_check_transfo_xl_lm_head(*__UpperCAmelCase)\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: Optional[int]):\r\n a\t\t\t\t\t\t:\t\tList[str] \t\t\t\t\t\t\t=\t\t\tself.model_tester.prepare_config_and_inputs()\r\n self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__UpperCAmelCase)\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: Tuple):\r\n a\t\t\t\t\t\t\t,\t\ta\t\t\t\t\t\t:\t\tList[str] \t\t\t\t\t\t\t=\t\t\tself.model_tester.prepare_config_and_inputs_for_common()\r\n a\t\t\t\t\t\t:\t\tOptional[Any] \t\t\t\t\t\t\t=\t\t\t[TFTransfoXLForSequenceClassification]\r\n\r\n for model_class in self.all_model_classes:\r\n a\t\t\t\t\t\t:\t\tOptional[int] \t\t\t\t\t\t\t=\t\t\tmodel_class(__UpperCAmelCase)\r\n assert isinstance(model.get_input_embeddings()\t\t\t\t, tf.keras.layers.Layer)\r\n if model_class in list_other_models_with_output_ebd:\r\n a\t\t\t\t\t\t:\t\tOptional[int] \t\t\t\t\t\t\t=\t\t\tmodel.get_output_embeddings()\r\n assert isinstance(__UpperCAmelCase\t\t\t\t, tf.keras.layers.Layer)\r\n a\t\t\t\t\t\t:\t\tList[str] \t\t\t\t\t\t\t=\t\t\tmodel.get_bias()\r\n assert name is None\r\n else:\r\n a\t\t\t\t\t\t:\t\tList[str] \t\t\t\t\t\t\t=\t\t\tmodel.get_output_embeddings()\r\n assert x is None\r\n a\t\t\t\t\t\t:\t\tOptional[int] \t\t\t\t\t\t\t=\t\t\tmodel.get_bias()\r\n assert name is None\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: Optional[Any]):\r\n # TODO JP: Make TransfoXL XLA compliant\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n\r\n @slow\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: str):\r\n for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\r\n a\t\t\t\t\t\t:\t\tTuple \t\t\t\t\t\t\t=\t\t\tTFTransfoXLModel.from_pretrained(__UpperCAmelCase)\r\n self.assertIsNotNone(__UpperCAmelCase)\r\n\r\n\r\n\r\n\r\n\r\n\r\n @unittest.skip(reason=\"This model doesn't play well with fit() due to not returning a single loss.\")\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: Tuple):\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_tf\r\nclass \t\t\t\t\t\t_A ( unittest.TestCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n @unittest.skip(\"Skip test until #12651 is resolved.\")\r\n @slow\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: List[str]):\r\n a\t\t\t\t\t\t:\t\tint \t\t\t\t\t\t\t=\t\t\tTFTransfoXLLMHeadModel.from_pretrained(\"transfo-xl-wt103\")\r\n # fmt: off\r\n a\t\t\t\t\t\t:\t\tTuple \t\t\t\t\t\t\t=\t\t\ttf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]]\t\t\t\t, dtype=tf.intaa) # noqa: E231\r\n # fmt: on\r\n # In 1991 , the remains of Russian Tsar Nicholas II and his family\r\n # ( except for Alexei and Maria ) are discovered .\r\n # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the\r\n # remainder of the story . 1883 Western Siberia ,\r\n # a young Grigori Rasputin is asked by his father and a group of men to perform magic .\r\n # Rasputin has a vision and denounces one of the men as a horse thief . Although his\r\n # father initially slaps him for making such an accusation , Rasputin watches as the\r\n # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of\r\n # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,\r\n # with people , even a bishop , begging for his blessing . \r\n\r\n # fmt: off\r\n a\t\t\t\t\t\t:\t\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\t[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231\r\n # fmt: on\r\n # In 1991, the remains of Russian Tsar Nicholas II and his family (\r\n # except for Alexei and Maria ) are discovered. The voice of young son,\r\n # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.\r\n # 1883 Western Siberia, a young Grigori Rasputin is asked by his father\r\n # and a group of men to perform magic. Rasputin has a vision and\r\n # denounces one of the men as a horse thief. Although his father initially\r\n # slaps him for making such an accusation, Rasputin watches as the man\r\n # is chased outside and beaten. Twenty years later, Rasputin sees a vision\r\n # of the Virgin Mary, prompting him to become a priest.\r\n # Rasputin quickly becomes famous, with people, even a bishop, begging for\r\n # his blessing. In the 1990s, the remains of Russian Tsar\r\n # Nicholas II and his family were discovered. The voice of young son,\r\n # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.\r\n\r\n a\t\t\t\t\t\t:\t\tOptional[int] \t\t\t\t\t\t\t=\t\t\tmodel.generate(__UpperCAmelCase\t\t\t\t, max_length=200\t\t\t\t, do_sample=__UpperCAmelCase)\r\n self.assertListEqual(output_ids[0].numpy().tolist()\t\t\t\t, __UpperCAmelCase)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":40,"string":"40"},"style_context":{"kind":"string","value":"\r\rimport json\rfrom typing import List, Optional, Tuple\r\rfrom tokenizers import normalizers\r\rfrom ...tokenization_utils_fast import PreTrainedTokenizerFast\rfrom ...utils import logging\rfrom .tokenization_convbert import ConvBertTokenizer\r\r\ra_ =\t\t\tlogging.get_logger(__name__)\r\ra_ =\t\t\t{'vocab_file': 'vocab.txt'}\r\ra_ =\t\t\t{\r 'vocab_file': {\r 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',\r 'YituTech/conv-bert-medium-small': (\r 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'\r ),\r 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',\r }\r}\r\ra_ =\t\t\t{\r 'YituTech/conv-bert-base': 512,\r 'YituTech/conv-bert-medium-small': 512,\r 'YituTech/conv-bert-small': 512,\r}\r\r\ra_ =\t\t\t{\r 'YituTech/conv-bert-base': {'do_lower_case': True},\r 'YituTech/conv-bert-medium-small': {'do_lower_case': True},\r 'YituTech/conv-bert-small': {'do_lower_case': True},\r}\r\r\r\r\r\r\rclass _UpperCamelCase\t\t\t\t\t\t\t(\t\t\t\t\t\t\t__A ):\r\t\t\t'''simple docstring'''\r\r\r\r\r\r\r\r\t\t\tlowerCamelCase__\t\t\t\t\t\t\t=VOCAB_FILES_NAMES\r\t\t\tlowerCamelCase__\t\t\t\t\t\t\t=PRETRAINED_VOCAB_FILES_MAP\r\t\t\tlowerCamelCase__\t\t\t\t\t\t\t=PRETRAINED_INIT_CONFIGURATION\r\t\t\tlowerCamelCase__\t\t\t\t\t\t\t=PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\r\t\t\tlowerCamelCase__\t\t\t\t\t\t\t=ConvBertTokenizer\r\t\t\tdef __init__(\tself\t\t\t\t\t\t\t: List[str]\t\t\t\t\t,\t\t\t\t\ta\t\t\t\t\t\t\t: Union[str, Any]=None\t\t\t\t\t,\t\t\t\t\ta\t\t\t\t\t\t\t: Optional[int]=None\t\t\t\t\t,\t\t\t\t\ta\t\t\t\t\t\t\t: int=True\t\t\t\t\t,\t\t\t\t\ta\t\t\t\t\t\t\t: Tuple=\"[UNK]\"\t\t\t\t\t,\t\t\t\t\ta\t\t\t\t\t\t\t: Dict=\"[SEP]\"\t\t\t\t\t,\t\t\t\t\ta\t\t\t\t\t\t\t: Dict=\"[PAD]\"\t\t\t\t\t,\t\t\t\t\ta\t\t\t\t\t\t\t: List[Any]=\"[CLS]\"\t\t\t\t\t,\t\t\t\t\ta\t\t\t\t\t\t\t: Tuple=\"[MASK]\"\t\t\t\t\t,\t\t\t\t\ta\t\t\t\t\t\t\t: Dict=True\t\t\t\t\t,\t\t\t\t\ta\t\t\t\t\t\t\t: Optional[Any]=None\t\t\t\t\t,\t\t\t\t\t**a\t\t\t\t\t\t\t: str\t\t\t\t\t,\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\t\tDict:\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\t\t\t\tsuper().__init__(\r\t\t\t\t a\t\t\t\t\t,\t\t\t\t\ttokenizer_file=a\t\t\t\t\t,\t\t\t\t\tdo_lower_case=a\t\t\t\t\t,\t\t\t\t\tunk_token=a\t\t\t\t\t,\t\t\t\t\tsep_token=a\t\t\t\t\t,\t\t\t\t\tpad_token=a\t\t\t\t\t,\t\t\t\t\tcls_token=a\t\t\t\t\t,\t\t\t\t\tmask_token=a\t\t\t\t\t,\t\t\t\t\ttokenize_chinese_chars=a\t\t\t\t\t,\t\t\t\t\tstrip_accents=a\t\t\t\t\t,\t\t\t\t\t**a\t\t\t\t\t,\t\t\t\t\t)\r\r\t\t\t\tSCREAMING_SNAKE_CASE :\t\t\t\t\tOptional[Any] =\t\t\t\tjson.loads(self.backend_tokenizer.normalizer.__getstate__()\t\t)\r\t\t\t\tif (\r\t\t\t\t normalizer_state.get(\"lowercase\"\t\t\t\t\t,\t\t\t\t\ta\t\t) != do_lower_case\r\t\t\t\t or normalizer_state.get(\"strip_accents\"\t\t\t\t\t,\t\t\t\t\ta\t\t) != strip_accents\r\t\t\t\t or normalizer_state.get(\"handle_chinese_chars\"\t\t\t\t\t,\t\t\t\t\ta\t\t) != tokenize_chinese_chars\r\t\t\t\t):\r\t\t\t\t\tSCREAMING_SNAKE_CASE :\t\t\t\t\tList[str] =\t\t\t\tgetattr(a\t\t\t\t\t,\t\t\t\t\tnormalizer_state.pop(\"type\"\t\t)\t\t)\r\t\t\t\t\tSCREAMING_SNAKE_CASE :\t\t\t\t\tOptional[Any] =\t\t\t\tdo_lower_case\r\t\t\t\t\tSCREAMING_SNAKE_CASE :\t\t\t\t\tAny =\t\t\t\tstrip_accents\r\t\t\t\t\tSCREAMING_SNAKE_CASE :\t\t\t\t\tOptional[int] =\t\t\t\ttokenize_chinese_chars\r\t\t\t\t\tSCREAMING_SNAKE_CASE :\t\t\t\t\tList[str] =\t\t\t\tnormalizer_class(**a\t\t)\r\r\t\t\t\tSCREAMING_SNAKE_CASE :\t\t\t\t\tstr =\t\t\t\tdo_lower_case\r\t\t\tdef \t\t\t\t\t\t\t__UpperCamelCase (\tself\t\t\t\t\t\t\t: Union[str, Any]\t\t\t\t\t,\t\t\t\t\ta\t\t\t\t\t\t\t: List[Any]\t\t\t\t\t,\t\t\t\t\ta\t\t\t\t\t\t\t: int=None\t\t)\t\t\t\t\t\t\t->\t\t\t\tOptional[Any]:\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\t\t\t\tSCREAMING_SNAKE_CASE :\t\t\t\t\tstr =\t\t\t\t[self.cls_token_id] + token_ids_a + [self.sep_token_id]\r\r\t\t\t\tif token_ids_a:\r\t\t\t\t\toutput += token_ids_a + [self.sep_token_id]\r\r\t\t\t\treturn output\r\t\t\tdef \t\t\t\t\t\t\t__UpperCamelCase (\tself\t\t\t\t\t\t\t: Dict\t\t\t\t\t,\t\t\t\t\ta\t\t\t\t\t\t\t: List[int]\t\t\t\t\t,\t\t\t\t\ta\t\t\t\t\t\t\t: Optional[List[int]] = None\t\t)\t\t\t\t\t\t\t->\t\t\t\tList[int]:\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\t\t\t\tSCREAMING_SNAKE_CASE :\t\t\t\t\tList[str] =\t\t\t\t[self.sep_token_id]\r\t\t\t\tSCREAMING_SNAKE_CASE :\t\t\t\t\tUnion[str, Any] =\t\t\t\t[self.cls_token_id]\r\t\t\t\tif token_ids_a is None:\r\t\t\t\t\treturn len(cls + token_ids_a + sep\t\t) * [0]\r\t\t\t\treturn len(cls + token_ids_a + sep\t\t) * [0] + len(token_ids_a + sep\t\t) * [1]\r\r\r\r\r\r\r\t\t\tdef \t\t\t\t\t\t\t__UpperCamelCase (\tself\t\t\t\t\t\t\t: Tuple\t\t\t\t\t,\t\t\t\t\ta\t\t\t\t\t\t\t: str\t\t\t\t\t,\t\t\t\t\ta\t\t\t\t\t\t\t: Optional[str] = None\t\t)\t\t\t\t\t\t\t->\t\t\t\tTuple[str]:\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\t\t\t\tSCREAMING_SNAKE_CASE :\t\t\t\t\tDict =\t\t\t\tself._tokenizer.model.save(a\t\t\t\t\t,\t\t\t\t\tname=a\t\t)\r\t\t\t\treturn tuple(a\t\t)"},"style_context_codestyle":{"kind":"number","value":76,"string":"76"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":715,"cells":{"code":{"kind":"string","value":"\n\n\n\nimport numpy as np\nfrom sklearn.datasets import fetch_california_housing\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\nfrom sklearn.model_selection import train_test_split\nfrom xgboost import XGBRegressor\n\n\n\ndef \t\t\t\t__lowerCamelCase ( _lowercase\t\t)\t\t\t\t-> List[Any]:\n\t\t\t\t\t\t\treturn (data[\"data\"], data[\"target\"])\n\n\n\ndef \t\t\t\t__lowerCamelCase ( _lowercase , _lowercase , _lowercase\t\t)\t\t\t\t-> Union[str, Any]:\n\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t:\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\tXGBRegressor(verbosity=0 , random_state=4_2\t\t)\n\t\t\t\t\t\t\txgb.fit(a__ , a__\t\t)\n\t\t\t\t\t\t\t# Predict target for test data\n\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t:\t\tTuple\t\t\t\t\t\t\t\t\t\t\t\t=\t\txgb.predict(a__\t\t)\n\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t:\t\tList[Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\tpredictions.reshape(len(a__\t\t) , 1\t\t)\n\t\t\t\t\t\t\treturn predictions\n\n\n\ndef \t\t\t\t__lowerCamelCase ( )\t\t\t\t-> str:\n\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t:\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\tfetch_california_housing()\n\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t:\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\tdata_handling(a__\t\t)\n\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t:\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t\t=\t\ttrain_test_split(\n\t\t\t\t\t\t\t a__ , a__ , test_size=0.25 , random_state=1\t\t)\n\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t:\t\tDict\t\t\t\t\t\t\t\t\t\t\t\t=\t\txgboost(a__ , a__ , a__\t\t)\n\t\t\t\t\t\t\t# Error printing\n\t\t\t\t\t\t\tprint(F'''Mean Absolute Error : {mean_absolute_error(a__ , a__\t\t)}'''\t\t)\n\t\t\t\t\t\t\tprint(F'''Mean Square Error : {mean_squared_error(a__ , a__\t\t)}'''\t\t)\n\n\nif __name__ == \"__main__\":\n\t\t\t\t\t\t\timport doctest\n\n\t\t\t\t\t\t\tdoctest.testmod(verbose=True)\n\t\t\t\t\t\t\tmain()\n\n"},"code_codestyle":{"kind":"number","value":369,"string":"369"},"style_context":{"kind":"string","value":"\n\n\n\n'''simple docstring'''\n\n\n\n\n# Function to print upper half of diamond (pyramid)\n\n\n\ndef \t\t\t\t__lowerCamelCase ( _lowercase\t\t)\t\t\t\t-> List[Any]:\n\t\t\t\t\t\t\tfor i in range(0 , _lowercase\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor _ in range(0 , n - i - 1\t\t): # printing spaces\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint(\"\"\" \"\"\" , end=\"\"\"\"\"\"\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor _ in range(0 , i + 1\t\t): # printing stars\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint(\"\"\"* \"\"\" , end=\"\"\"\"\"\"\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint()\n\n\n\ndef \t\t\t\t__lowerCamelCase ( _lowercase\t\t)\t\t\t\t-> Dict:\n\t\t\t\t\t\t\tfor i in range(_lowercase , 0 , -1\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor _ in range(_lowercase , 0 , -1\t\t): # printing stars\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint(\"\"\"* \"\"\" , end=\"\"\"\"\"\"\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint()\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor _ in range(n - i + 1 , 0 , -1\t\t): # printing spaces\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint(\"\"\" \"\"\" , end=\"\"\"\"\"\"\t\t)\n\n\n\ndef \t\t\t\t__lowerCamelCase ( _lowercase\t\t)\t\t\t\t-> List[Any]:\n\t\t\t\t\t\t\tif n <= 0:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint(\"\"\" ... .... nothing printing :(\"\"\"\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\tfloyd(_lowercase\t\t) # upper half\n\t\t\t\t\t\t\treverse_floyd(_lowercase\t\t) # lower half\n\n\nif __name__ == \"__main__\":\n\t\t\t\t\t\t\tprint(R\"\"\"| /\\ | |- | |- |--| |\\ /| |-\"\"\")\n\t\t\t\t\t\t\tprint(R\"\"\"|/ \\| |- |_ |_ |__| | \\/ | |_\"\"\")\n\t\t\t\t\t\t\ta : List[Any] = 1\n\t\t\t\t\t\t\twhile K:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ta : int = int(input(\"\"\"enter the number and , and see the magic : \"\"\"))\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint()\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tpretty_print(user_number)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ta : Tuple = int(input(\"\"\"press 0 to exit... and 1 to continue...\"\"\"))\n\n\t\t\t\t\t\t\tprint(\"\"\"Good Bye...\"\"\")\n\n"},"style_context_codestyle":{"kind":"number","value":338,"string":"338"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":716,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\rimport argparse\r\rimport torch\r\rfrom transformers import GPTaLMHeadModel, RobertaForMaskedLM\r\r\rif __name__ == \"__main__\":\r SCREAMING_SNAKE_CASE__\t\t\t\t\t= argparse.ArgumentParser(\r description=(\r \"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned\"\r \" Distillation\"\r )\r )\r parser.add_argument(\"--model_type\", default=\"roberta\", choices=[\"roberta\", \"gpt2\"])\r parser.add_argument(\"--model_name\", default=\"roberta-large\", type=str)\r parser.add_argument(\"--dump_checkpoint\", default=\"serialization_dir/tf_roberta_048131723.pth\", type=str)\r parser.add_argument(\"--vocab_transform\", action=\"store_true\")\r SCREAMING_SNAKE_CASE__\t\t\t\t\t= parser.parse_args()\r\r if args.model_type == \"roberta\":\r SCREAMING_SNAKE_CASE__\t\t\t\t\t= RobertaForMaskedLM.from_pretrained(args.model_name)\r SCREAMING_SNAKE_CASE__\t\t\t\t\t= \"roberta\"\r elif args.model_type == \"gpt2\":\r SCREAMING_SNAKE_CASE__\t\t\t\t\t= GPTaLMHeadModel.from_pretrained(args.model_name)\r SCREAMING_SNAKE_CASE__\t\t\t\t\t= \"transformer\"\r\r SCREAMING_SNAKE_CASE__\t\t\t\t\t= model.state_dict()\r SCREAMING_SNAKE_CASE__\t\t\t\t\t= {}\r\r # Embeddings #\r if args.model_type == \"gpt2\":\r for param_name in [\"wte.weight\", \"wpe.weight\"]:\r SCREAMING_SNAKE_CASE__\t\t\t\t\t= state_dict[f'{prefix}.{param_name}']\r else:\r for w in [\"word_embeddings\", \"position_embeddings\", \"token_type_embeddings\"]:\r SCREAMING_SNAKE_CASE__\t\t\t\t\t= f'{prefix}.embeddings.{w}.weight'\r SCREAMING_SNAKE_CASE__\t\t\t\t\t= state_dict[param_name]\r for w in [\"weight\", \"bias\"]:\r SCREAMING_SNAKE_CASE__\t\t\t\t\t= f'{prefix}.embeddings.LayerNorm.{w}'\r SCREAMING_SNAKE_CASE__\t\t\t\t\t= state_dict[param_name]\r\r # Transformer Blocks #\r SCREAMING_SNAKE_CASE__\t\t\t\t\t= 0\r for teacher_idx in [0, 2, 4, 7, 9, 11]:\r if args.model_type == \"gpt2\":\r for layer in [\"ln_1\", \"attn.c_attn\", \"attn.c_proj\", \"ln_2\", \"mlp.c_fc\", \"mlp.c_proj\"]:\r for w in [\"weight\", \"bias\"]:\r SCREAMING_SNAKE_CASE__\t\t\t\t\t= state_dict[\r f'{prefix}.h.{teacher_idx}.{layer}.{w}'\r ]\r SCREAMING_SNAKE_CASE__\t\t\t\t\t= state_dict[f'{prefix}.h.{teacher_idx}.attn.bias']\r else:\r for layer in [\r \"attention.self.query\",\r \"attention.self.key\",\r \"attention.self.value\",\r \"attention.output.dense\",\r \"attention.output.LayerNorm\",\r \"intermediate.dense\",\r \"output.dense\",\r \"output.LayerNorm\",\r ]:\r for w in [\"weight\", \"bias\"]:\r SCREAMING_SNAKE_CASE__\t\t\t\t\t= state_dict[\r f'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'\r ]\r std_idx += 1\r\r # Language Modeling Head ###s\r if args.model_type == \"roberta\":\r for layer in [\"lm_head.decoder.weight\", \"lm_head.bias\"]:\r SCREAMING_SNAKE_CASE__\t\t\t\t\t= state_dict[f'{layer}']\r if args.vocab_transform:\r for w in [\"weight\", \"bias\"]:\r SCREAMING_SNAKE_CASE__\t\t\t\t\t= state_dict[f'lm_head.dense.{w}']\r SCREAMING_SNAKE_CASE__\t\t\t\t\t= state_dict[f'lm_head.layer_norm.{w}']\r elif args.model_type == \"gpt2\":\r for w in [\"weight\", \"bias\"]:\r SCREAMING_SNAKE_CASE__\t\t\t\t\t= state_dict[f'{prefix}.ln_f.{w}']\r SCREAMING_SNAKE_CASE__\t\t\t\t\t= state_dict[\"lm_head.weight\"]\r\r print(f'N layers selected for distillation: {std_idx}')\r print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}')\r\r print(f'Save transferred checkpoint to {args.dump_checkpoint}.')\r torch.save(compressed_sd, args.dump_checkpoint)\r\r\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":46,"string":"46"},"style_context":{"kind":"string","value":"\n\n\n\n\n\n'''simple docstring'''\n\n\n\n\nimport argparse\n\nfrom transformers import (\n TapasConfig,\n TapasForMaskedLM,\n TapasForQuestionAnswering,\n TapasForSequenceClassification,\n TapasModel,\n TapasTokenizer,\n load_tf_weights_in_tapas,\n)\nfrom transformers.utils import logging\n\n\nlogging.set_verbosity_info()\ndef \tUpperCAmelCase_\t\t\t\t\t\t\t(\t\t\t\t\t__lowercase\t\t\t\t\t\t\t: int\t\t\t, __lowercase\t\t\t\t\t\t\t: Dict\t\t\t, __lowercase\t\t\t\t\t\t\t: str\t\t\t, __lowercase\t\t\t\t\t\t\t: Optional[Any]\t\t\t, __lowercase\t\t\t\t\t\t\t: str )\t\t-> List[str]:\n\n\n '''simple docstring'''\n\n\n\n _UpperCAmelCase \t\t\t\t\t\t\t= TapasConfig.from_json_file(__lowercase )\n # set absolute/relative position embeddings parameter\n _UpperCAmelCase \t\t\t\t\t\t\t= reset_position_index_per_cell\n\n # set remaining parameters of TapasConfig as well as the model based on the task\n if task == \"SQA\":\n _UpperCAmelCase \t\t\t\t\t\t\t= TapasForQuestionAnswering(config=__lowercase )\n elif task == \"WTQ\":\n # run_task_main.py hparams\n _UpperCAmelCase \t\t\t\t\t\t\t= 4\n _UpperCAmelCase \t\t\t\t\t\t\t= True\n # hparam_utils.py hparams\n _UpperCAmelCase \t\t\t\t\t\t\t= 0.66_4694\n _UpperCAmelCase \t\t\t\t\t\t\t= 0.20_7951\n _UpperCAmelCase \t\t\t\t\t\t\t= 0.12_1194\n _UpperCAmelCase \t\t\t\t\t\t\t= True\n _UpperCAmelCase \t\t\t\t\t\t\t= True\n _UpperCAmelCase \t\t\t\t\t\t\t= False\n _UpperCAmelCase \t\t\t\t\t\t\t= 0.035_2513\n\n _UpperCAmelCase \t\t\t\t\t\t\t= TapasForQuestionAnswering(config=__lowercase )\n elif task == \"WIKISQL_SUPERVISED\":\n # run_task_main.py hparams\n _UpperCAmelCase \t\t\t\t\t\t\t= 4\n _UpperCAmelCase \t\t\t\t\t\t\t= False\n # hparam_utils.py hparams\n _UpperCAmelCase \t\t\t\t\t\t\t= 36.4519\n _UpperCAmelCase \t\t\t\t\t\t\t= 0.90_3421\n _UpperCAmelCase \t\t\t\t\t\t\t= 222.088\n _UpperCAmelCase \t\t\t\t\t\t\t= True\n _UpperCAmelCase \t\t\t\t\t\t\t= True\n _UpperCAmelCase \t\t\t\t\t\t\t= True\n _UpperCAmelCase \t\t\t\t\t\t\t= 0.76_3141\n\n _UpperCAmelCase \t\t\t\t\t\t\t= TapasForQuestionAnswering(config=__lowercase )\n elif task == \"TABFACT\":\n _UpperCAmelCase \t\t\t\t\t\t\t= TapasForSequenceClassification(config=__lowercase )\n elif task == \"MLM\":\n _UpperCAmelCase \t\t\t\t\t\t\t= TapasForMaskedLM(config=__lowercase )\n elif task == \"INTERMEDIATE_PRETRAINING\":\n _UpperCAmelCase \t\t\t\t\t\t\t= TapasModel(config=__lowercase )\n else:\n raise ValueError(f'Task {task} not supported.' )\n\n print(f'Building PyTorch model from configuration: {config}' )\n # Load weights from tf checkpoint\n load_tf_weights_in_tapas(__lowercase\t\t\t, __lowercase\t\t\t, __lowercase )\n\n # Save pytorch-model (weights and configuration)\n print(f'Save PyTorch model to {pytorch_dump_path}' )\n model.save_pretrained(__lowercase )\n\n # Save tokenizer files\n print(f'Save tokenizer files to {pytorch_dump_path}' )\n _UpperCAmelCase \t\t\t\t\t\t\t= TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + \"vocab.txt\"\t\t\t, model_max_length=512 )\n tokenizer.save_pretrained(__lowercase )\n\n print(\"Used relative position embeddings:\"\t\t\t, model.config.reset_position_index_per_cell )\n\n\nif __name__ == \"__main__\":\n __SCREAMING_SNAKE_CASE :List[str] =\t\t\t\targparse.ArgumentParser()\n # Required parameters\n parser.add_argument(\n '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''\n )\n parser.add_argument(\n '''--reset_position_index_per_cell''',\n default=False,\n action='''store_true''',\n help='''Whether to use relative position embeddings or not. Defaults to True.''',\n )\n parser.add_argument(\n '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''\n )\n parser.add_argument(\n '''--tapas_config_file''',\n default=None,\n type=str,\n required=True,\n help=(\n '''The config json file corresponding to the pre-trained TAPAS model. \\n'''\n '''This specifies the model architecture.'''\n ),\n )\n parser.add_argument(\n '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''\n )\n __SCREAMING_SNAKE_CASE :List[str] =\t\t\t\tparser.parse_args()\n convert_tf_checkpoint_to_pytorch(\n args.task,\n args.reset_position_index_per_cell,\n args.tf_checkpoint_path,\n args.tapas_config_file,\n args.pytorch_dump_path,\n )\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":22,"string":"22"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":717,"cells":{"code":{"kind":"string","value":"\n\n\n\n\"\"\"simple docstring\"\"\"\n\n\n\n\nfrom __future__ import annotations\n\ndef \t\t\t\t\t\t__snake_case\t\t\t\t\t\t(\t\t\t\tSCREAMING_SNAKE_CASE__ :\t\t\t\t\t\tint ) -> list[int]:\n\n\n\n\n\n '''simple docstring'''\n\n _UpperCAmelCase : Optional[int] = 2\n _UpperCAmelCase : List[str] = []\n while i * i <= n:\n if n % i:\n i += 1\n else:\n n //= i\n factors.append(SCREAMING_SNAKE_CASE__ )\n if n > 1:\n factors.append(SCREAMING_SNAKE_CASE__ )\n return factors\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n"},"code_codestyle":{"kind":"number","value":202,"string":"202"},"style_context":{"kind":"string","value":"\n\n\n\n\"\"\"simple docstring\"\"\"\n\n\n\n\ndef \t\t\t\t\t\t__snake_case\t\t\t\t\t\t(\t\t\t\tSCREAMING_SNAKE_CASE__ :\t\t\t\t\t\tList[str] ) -> str:\n\n\n\n\n\n '''simple docstring'''\n\n _UpperCAmelCase ,\t_UpperCAmelCase : Dict = [], []\n while len(SCREAMING_SNAKE_CASE__ ) > 1:\n _UpperCAmelCase ,\t_UpperCAmelCase : int = min(SCREAMING_SNAKE_CASE__ ), max(SCREAMING_SNAKE_CASE__ )\n start.append(SCREAMING_SNAKE_CASE__ )\n end.append(SCREAMING_SNAKE_CASE__ )\n collection.remove(SCREAMING_SNAKE_CASE__ )\n collection.remove(SCREAMING_SNAKE_CASE__ )\n end.reverse()\n return start + collection + end\n\n\nif __name__ == \"__main__\":\n _lowerCAmelCase : int\t\t\t\t\t\t\t=\t\t\t\t\t\t\tinput(\"Enter numbers separated by a comma:\\n\").strip()\n _lowerCAmelCase : List[str]\t\t\t\t\t\t\t=\t\t\t\t\t\t\t[int(item) for item in user_input.split(\",\")]\n print(*merge_sort(unsorted), sep=\",\")\n"},"style_context_codestyle":{"kind":"number","value":202,"string":"202"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":718,"cells":{"code":{"kind":"string","value":"\nimport tempfile\nimport unittest\n\nfrom make_student import create_student_by_copying_alternating_layers\n\nfrom transformers import AutoConfig\nfrom transformers.file_utils import cached_property\nfrom transformers.testing_utils import require_torch\n\n\nlowercase__\t\t:List[Any] = \"sshleifer/bart-tiny-random\"\nlowercase__\t\t:Union[str, Any] = \"patrickvonplaten/t5-tiny-random\"\n\n\n\n\n\n\n\n@require_torch\nclass \t\t\t\tlowercase\t\t(\t\t\t\t\tunittest.TestCase\t\t):\n\n\n\n\n\t\t\t\t\t\t\t@cached_property\n\t\t\t\t\t\t\tdef A__ (\t\t\t\tself):\n\t\t\t\t\t\t\t\t\t\t\t\treturn AutoConfig.from_pretrained(A__)\n\n\n\n\n\t\t\t\t\t\t\tdef A__ (\t\t\t\tself):\n\t\t\t\t\t\t\t\t\t\t\t\tlowercase\t,\t\t\t\t\t\t*lowercase\t\t\t\t\t\t\t=\t\t\t\t\t\tcreate_student_by_copying_alternating_layers(A__ ,tempfile.mkdtemp() ,e=1 ,d=1)\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(student.config.num_hidden_layers ,1)\n\n\n\n\n\t\t\t\t\t\t\tdef A__ (\t\t\t\tself):\n\t\t\t\t\t\t\t\t\t\t\t\tlowercase\t,\t\t\t\t\t\t*lowercase\t\t\t\t\t\t\t=\t\t\t\t\t\tcreate_student_by_copying_alternating_layers(A__ ,tempfile.mkdtemp() ,e=1 ,d=A__)\n\n\n\n\n\t\t\t\t\t\t\tdef A__ (\t\t\t\tself):\n\t\t\t\t\t\t\t\t\t\t\t\tlowercase\t,\t\t\t\t\t\t*lowercase\t\t\t\t\t\t\t=\t\t\t\t\t\tcreate_student_by_copying_alternating_layers(A__ ,tempfile.mkdtemp() ,e=1 ,d=A__)\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(student.config.encoder_layers ,1)\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(student.config.decoder_layers ,self.teacher_config.encoder_layers)\n\n\n\n\n\t\t\t\t\t\t\tdef A__ (\t\t\t\tself):\n\t\t\t\t\t\t\t\t\t\t\t\tlowercase\t,\t\t\t\t\t\t*lowercase\t\t\t\t\t\t\t=\t\t\t\t\t\tcreate_student_by_copying_alternating_layers(A__ ,tempfile.mkdtemp() ,e=1 ,d=1)\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(student.config.encoder_layers ,1)\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(student.config.decoder_layers ,1)\n\n\n\n\n\n\t\t\t\t\t\t\tdef A__ (\t\t\t\tself):\n\t\t\t\t\t\t\t\t\t\t\t\twith self.assertRaises(A__):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcreate_student_by_copying_alternating_layers(A__ ,tempfile.mkdtemp() ,e=A__ ,d=A__)\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":101,"string":"101"},"style_context":{"kind":"string","value":"\nimport os\nimport sys\n\nlowercase__\t\t:Tuple = os.path.join(os.path.dirname(__file__), \"src\")\nsys.path.append(SRC_DIR)\n\n\nfrom transformers import (\n AutoConfig,\n AutoModel,\n AutoModelForCausalLM,\n AutoModelForMaskedLM,\n AutoModelForQuestionAnswering,\n AutoModelForSequenceClassification,\n AutoTokenizer,\n add_start_docstrings,\n)\n\n\nlowercase__\t\t:List[Any] = [\n \"torch\",\n \"numpy\",\n \"tokenizers\",\n \"filelock\",\n \"requests\",\n \"tqdm\",\n \"regex\",\n \"sentencepiece\",\n \"sacremoses\",\n \"importlib_metadata\",\n \"huggingface_hub\",\n]\n\n\n\n@add_start_docstrings(AutoConfig.__doc__\t\t)\ndef UpperCamelCase (\t\t\t\t\t\t\t*lowerCAmelCase__ ,\t\t\t**lowerCAmelCase__\t\t):\n\t\t\t\t\t'''simple docstring'''\n\t\t\t\t\treturn AutoConfig.from_pretrained(*lowerCAmelCase__ ,\t\t\t**lowerCAmelCase__\t\t)\n\n\n\n@add_start_docstrings(AutoTokenizer.__doc__\t\t)\ndef UpperCamelCase (\t\t\t\t\t\t\t*lowerCAmelCase__ ,\t\t\t**lowerCAmelCase__\t\t):\n\t\t\t\t\t'''simple docstring'''\n\t\t\t\t\treturn AutoTokenizer.from_pretrained(*lowerCAmelCase__ ,\t\t\t**lowerCAmelCase__\t\t)\n\n\n\n@add_start_docstrings(AutoModel.__doc__\t\t)\ndef UpperCamelCase (\t\t\t\t\t\t\t*lowerCAmelCase__ ,\t\t\t**lowerCAmelCase__\t\t):\n\t\t\t\t\t'''simple docstring'''\n\t\t\t\t\treturn AutoModel.from_pretrained(*lowerCAmelCase__ ,\t\t\t**lowerCAmelCase__\t\t)\n\n\n\n@add_start_docstrings(AutoModelForCausalLM.__doc__\t\t)\ndef UpperCamelCase (\t\t\t\t\t\t\t*lowerCAmelCase__ ,\t\t\t**lowerCAmelCase__\t\t):\n\t\t\t\t\t'''simple docstring'''\n\t\t\t\t\treturn AutoModelForCausalLM.from_pretrained(*lowerCAmelCase__ ,\t\t\t**lowerCAmelCase__\t\t)\n\n\n\n@add_start_docstrings(AutoModelForMaskedLM.__doc__\t\t)\ndef UpperCamelCase (\t\t\t\t\t\t\t*lowerCAmelCase__ ,\t\t\t**lowerCAmelCase__\t\t):\n\t\t\t\t\t'''simple docstring'''\n\t\t\t\t\treturn AutoModelForMaskedLM.from_pretrained(*lowerCAmelCase__ ,\t\t\t**lowerCAmelCase__\t\t)\n\n\n\n@add_start_docstrings(AutoModelForSequenceClassification.__doc__\t\t)\ndef UpperCamelCase (\t\t\t\t\t\t\t*lowerCAmelCase__ ,\t\t\t**lowerCAmelCase__\t\t):\n\t\t\t\t\t'''simple docstring'''\n\t\t\t\t\treturn AutoModelForSequenceClassification.from_pretrained(*lowerCAmelCase__ ,\t\t\t**lowerCAmelCase__\t\t)\n\n\n\n@add_start_docstrings(AutoModelForQuestionAnswering.__doc__\t\t)\ndef UpperCamelCase (\t\t\t\t\t\t\t*lowerCAmelCase__ ,\t\t\t**lowerCAmelCase__\t\t):\n\t\t\t\t\t'''simple docstring'''\n\t\t\t\t\treturn AutoModelForQuestionAnswering.from_pretrained(*lowerCAmelCase__ ,\t\t\t**lowerCAmelCase__\t\t)\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":101,"string":"101"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":719,"cells":{"code":{"kind":"string","value":"\r\n\r\n\"\"\"simple docstring\"\"\"\r\ndef _SCREAMING_SNAKE_CASE (__lowerCAmelCase\t\t\t\t\t\t\t)\t-> list:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n if len(lowerCamelCase__\t\t\t\t\t\t\t) <= 1:\r\n return lst\r\n\r\n lowercase_ =\t1\r\n\r\n while i < len(lowerCamelCase__\t\t\t\t\t\t\t):\r\n if lst[i - 1] <= lst[i]:\r\n i += 1\r\n else:\r\n lowercase_ =\tlst[i], lst[i - 1]\r\n i -= 1\r\n if i == 0:\r\n lowercase_ =\t1\r\n\r\n return lst\r\n\r\n\r\nif __name__ == \"__main__\":\r\n UpperCAmelCase :\t\t\t\t\t\tUnion[str, Any] =\tinput(\"Enter numbers separated by a comma:\\n\").strip()\r\n UpperCAmelCase :\t\t\t\t\t\tstr =\t[int(item) for item in user_input.split(\",\")]\r\n print(gnome_sort(unsorted))\r\n\r\n"},"code_codestyle":{"kind":"number","value":354,"string":"354"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\nimport math\r\nimport os\r\nimport unittest\r\n\r\nfrom transformers import MegatronBertConfig, is_torch_available\r\nfrom transformers.models.auto import get_values\r\nfrom transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device\r\n\r\nfrom ...test_configuration_common import ConfigTester\r\nfrom ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask\r\nfrom ...test_pipeline_mixin import PipelineTesterMixin\r\n\r\n\r\nif is_torch_available():\r\n import torch\r\n\r\n from transformers import (\r\n MODEL_FOR_PRETRAINING_MAPPING,\r\n MegatronBertForCausalLM,\r\n MegatronBertForMaskedLM,\r\n MegatronBertForMultipleChoice,\r\n MegatronBertForNextSentencePrediction,\r\n MegatronBertForPreTraining,\r\n MegatronBertForQuestionAnswering,\r\n MegatronBertForSequenceClassification,\r\n MegatronBertForTokenClassification,\r\n MegatronBertModel,\r\n )\r\n\r\n\r\nclass \t\t\t\tSCREAMING_SNAKE_CASE__\t:\r\n\r\n def __init__(\t\t\tself :\t\t\tOptional[int]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[str]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[Any]=1_3\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[Any]=7\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tAny=True\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tint=True\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[Any]=True\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[Any]=True\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tTuple=9_9\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[str]=6_4\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[int]=3_2\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[Any]=5\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[int]=4\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tstr=3_7\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tAny=\"gelu\"\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tDict=0.1\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[int]=0.1\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tint=5_1_2\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[int]=1_6\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[str]=2\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[Any]=0.02\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[int]=3\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[Any]=4\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tDict=None\t,\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\tparent\r\n lowercase_ =\tbatch_size\r\n lowercase_ =\tseq_length\r\n lowercase_ =\tis_training\r\n lowercase_ =\tuse_input_mask\r\n lowercase_ =\tuse_token_type_ids\r\n lowercase_ =\tuse_labels\r\n lowercase_ =\tvocab_size\r\n lowercase_ =\thidden_size\r\n lowercase_ =\tembedding_size\r\n lowercase_ =\tnum_hidden_layers\r\n lowercase_ =\tnum_attention_heads\r\n lowercase_ =\tintermediate_size\r\n lowercase_ =\thidden_act\r\n lowercase_ =\thidden_dropout_prob\r\n lowercase_ =\tattention_probs_dropout_prob\r\n lowercase_ =\tmax_position_embeddings\r\n lowercase_ =\ttype_vocab_size\r\n lowercase_ =\ttype_sequence_label_size\r\n lowercase_ =\tinitializer_range\r\n lowercase_ =\tnum_labels\r\n lowercase_ =\tnum_choices\r\n lowercase_ =\tscope\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tTuple):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\tids_tensor([self.batch_size, self.seq_length]\t,\t\t\t\t\tself.vocab_size)\r\n\r\n lowercase_ =\tNone\r\n if self.use_input_mask:\r\n lowercase_ =\trandom_attention_mask([self.batch_size, self.seq_length])\r\n\r\n lowercase_ =\tNone\r\n if self.use_token_type_ids:\r\n lowercase_ =\tids_tensor([self.batch_size, self.seq_length]\t,\t\t\t\t\tself.type_vocab_size)\r\n\r\n lowercase_ =\tNone\r\n lowercase_ =\tNone\r\n lowercase_ =\tNone\r\n if self.use_labels:\r\n lowercase_ =\tids_tensor([self.batch_size]\t,\t\t\t\t\tself.type_sequence_label_size)\r\n lowercase_ =\tids_tensor([self.batch_size, self.seq_length]\t,\t\t\t\t\tself.num_labels)\r\n lowercase_ =\tids_tensor([self.batch_size]\t,\t\t\t\t\tself.num_choices)\r\n\r\n lowercase_ =\tself.get_config()\r\n\r\n return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tOptional[int]):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n return MegatronBertConfig(\r\n vocab_size=self.vocab_size\t,\t\t\t\t\thidden_size=self.hidden_size\t,\t\t\t\t\tnum_hidden_layers=self.num_hidden_layers\t,\t\t\t\t\tnum_attention_heads=self.num_attention_heads\t,\t\t\t\t\tintermediate_size=self.intermediate_size\t,\t\t\t\t\tembedding_size=self.embedding_size\t,\t\t\t\t\thidden_act=self.hidden_act\t,\t\t\t\t\thidden_dropout_prob=self.hidden_dropout_prob\t,\t\t\t\t\tattention_probs_dropout_prob=self.attention_probs_dropout_prob\t,\t\t\t\t\tmax_position_embeddings=self.max_position_embeddings\t,\t\t\t\t\ttype_vocab_size=self.type_vocab_size\t,\t\t\t\t\tis_decoder=lowerCAmelCase_\t,\t\t\t\t\tinitializer_range=self.initializer_range\t,\t\t\t\t\t)\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tDict\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[int]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[int]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[str]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tstr\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tDict\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tstr\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tDict):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\tMegatronBertModel(config=lowerCAmelCase_)\r\n model.to(lowerCAmelCase_)\r\n model.eval()\r\n lowercase_ =\tmodel(lowerCAmelCase_\t,\t\t\t\t\tattention_mask=lowerCAmelCase_\t,\t\t\t\t\ttoken_type_ids=lowerCAmelCase_)\r\n lowercase_ =\tmodel(lowerCAmelCase_\t,\t\t\t\t\ttoken_type_ids=lowerCAmelCase_)\r\n lowercase_ =\tmodel(lowerCAmelCase_)\r\n\r\n self.parent.assertEqual(result.last_hidden_state.shape\t,\t\t\t\t\t(self.batch_size, self.seq_length, self.hidden_size))\r\n self.parent.assertEqual(result.pooler_output.shape\t,\t\t\t\t\t(self.batch_size, self.hidden_size))\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tstr\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tUnion[str, Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tTuple\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tUnion[str, Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[str]):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\tMegatronBertForMaskedLM(config=lowerCAmelCase_)\r\n model.to(lowerCAmelCase_)\r\n model.eval()\r\n lowercase_ =\tmodel(lowerCAmelCase_\t,\t\t\t\t\tattention_mask=lowerCAmelCase_\t,\t\t\t\t\ttoken_type_ids=lowerCAmelCase_\t,\t\t\t\t\tlabels=lowerCAmelCase_)\r\n self.parent.assertEqual(result.logits.shape\t,\t\t\t\t\t(self.batch_size, self.seq_length, self.vocab_size))\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tList[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tTuple\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tAny\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[str]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tint\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[int]):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\tMegatronBertForCausalLM(config=lowerCAmelCase_)\r\n model.to(lowerCAmelCase_)\r\n model.eval()\r\n lowercase_ =\tmodel(lowerCAmelCase_\t,\t\t\t\t\tattention_mask=lowerCAmelCase_\t,\t\t\t\t\ttoken_type_ids=lowerCAmelCase_\t,\t\t\t\t\tlabels=lowerCAmelCase_)\r\n self.parent.assertEqual(result.logits.shape\t,\t\t\t\t\t(self.batch_size, self.seq_length, self.vocab_size))\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tstr\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tTuple\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[int]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[str]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tUnion[str, Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tUnion[str, Any]):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\tMegatronBertForNextSentencePrediction(config=lowerCAmelCase_)\r\n model.to(lowerCAmelCase_)\r\n model.eval()\r\n lowercase_ =\tmodel(\r\n lowerCAmelCase_\t,\t\t\t\t\tattention_mask=lowerCAmelCase_\t,\t\t\t\t\ttoken_type_ids=lowerCAmelCase_\t,\t\t\t\t\tlabels=lowerCAmelCase_\t,\t\t\t\t\t)\r\n self.parent.assertEqual(result.logits.shape\t,\t\t\t\t\t(self.batch_size, 2))\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tint\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tstr\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tDict\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tUnion[str, Any]):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\tMegatronBertForPreTraining(config=lowerCAmelCase_)\r\n model.to(lowerCAmelCase_)\r\n model.eval()\r\n lowercase_ =\tmodel(\r\n lowerCAmelCase_\t,\t\t\t\t\tattention_mask=lowerCAmelCase_\t,\t\t\t\t\ttoken_type_ids=lowerCAmelCase_\t,\t\t\t\t\tlabels=lowerCAmelCase_\t,\t\t\t\t\tnext_sentence_label=lowerCAmelCase_\t,\t\t\t\t\t)\r\n self.parent.assertEqual(result.prediction_logits.shape\t,\t\t\t\t\t(self.batch_size, self.seq_length, self.vocab_size))\r\n self.parent.assertEqual(result.seq_relationship_logits.shape\t,\t\t\t\t\t(self.batch_size, 2))\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tstr\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[str]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tint\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tstr\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tDict\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tTuple):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\tMegatronBertForQuestionAnswering(config=lowerCAmelCase_)\r\n model.to(lowerCAmelCase_)\r\n model.eval()\r\n lowercase_ =\tmodel(\r\n lowerCAmelCase_\t,\t\t\t\t\tattention_mask=lowerCAmelCase_\t,\t\t\t\t\ttoken_type_ids=lowerCAmelCase_\t,\t\t\t\t\tstart_positions=lowerCAmelCase_\t,\t\t\t\t\tend_positions=lowerCAmelCase_\t,\t\t\t\t\t)\r\n self.parent.assertEqual(result.start_logits.shape\t,\t\t\t\t\t(self.batch_size, self.seq_length))\r\n self.parent.assertEqual(result.end_logits.shape\t,\t\t\t\t\t(self.batch_size, self.seq_length))\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tint\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tTuple\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tTuple\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tDict\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tint\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[str]):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\tself.num_labels\r\n lowercase_ =\tMegatronBertForSequenceClassification(lowerCAmelCase_)\r\n model.to(lowerCAmelCase_)\r\n model.eval()\r\n lowercase_ =\tmodel(lowerCAmelCase_\t,\t\t\t\t\tattention_mask=lowerCAmelCase_\t,\t\t\t\t\ttoken_type_ids=lowerCAmelCase_\t,\t\t\t\t\tlabels=lowerCAmelCase_)\r\n self.parent.assertEqual(result.logits.shape\t,\t\t\t\t\t(self.batch_size, self.num_labels))\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tList[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[int]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[str]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tTuple\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tAny\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tint\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tDict):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\tself.num_labels\r\n lowercase_ =\tMegatronBertForTokenClassification(config=lowerCAmelCase_)\r\n model.to(lowerCAmelCase_)\r\n model.eval()\r\n lowercase_ =\tmodel(lowerCAmelCase_\t,\t\t\t\t\tattention_mask=lowerCAmelCase_\t,\t\t\t\t\ttoken_type_ids=lowerCAmelCase_\t,\t\t\t\t\tlabels=lowerCAmelCase_)\r\n self.parent.assertEqual(result.logits.shape\t,\t\t\t\t\t(self.batch_size, self.seq_length, self.num_labels))\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tList[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[str]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tList[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tUnion[str, Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tUnion[str, Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tUnion[str, Any]):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\tself.num_choices\r\n lowercase_ =\tMegatronBertForMultipleChoice(config=lowerCAmelCase_)\r\n model.to(lowerCAmelCase_)\r\n model.eval()\r\n lowercase_ =\tinput_ids.unsqueeze(1).expand(-1\t,\t\t\t\t\tself.num_choices\t,\t\t\t\t\t-1).contiguous()\r\n lowercase_ =\ttoken_type_ids.unsqueeze(1).expand(-1\t,\t\t\t\t\tself.num_choices\t,\t\t\t\t\t-1).contiguous()\r\n lowercase_ =\tinput_mask.unsqueeze(1).expand(-1\t,\t\t\t\t\tself.num_choices\t,\t\t\t\t\t-1).contiguous()\r\n lowercase_ =\tmodel(\r\n lowerCAmelCase_\t,\t\t\t\t\tattention_mask=lowerCAmelCase_\t,\t\t\t\t\ttoken_type_ids=lowerCAmelCase_\t,\t\t\t\t\tlabels=lowerCAmelCase_\t,\t\t\t\t\t)\r\n self.parent.assertEqual(result.logits.shape\t,\t\t\t\t\t(self.batch_size, self.num_choices))\r\n\r\n\r\n\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tList[Any]):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\tself.prepare_config_and_inputs()\r\n (\r\n (\r\n lowercase_\r\n ) , (\r\n lowercase_\r\n ) , (\r\n lowercase_\r\n ) , (\r\n lowercase_\r\n ) , (\r\n lowercase_\r\n ) , (\r\n lowercase_\r\n ) , (\r\n lowercase_\r\n ) , \r\n ) =\tconfig_and_inputs\r\n lowercase_ =\t{\"\"\"input_ids\"\"\": input_ids, \"\"\"token_type_ids\"\"\": token_type_ids, \"\"\"attention_mask\"\"\": input_mask}\r\n return config, inputs_dict\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_torch\r\nclass \t\t\t\tSCREAMING_SNAKE_CASE__\t(\t\t\t\t\t\t\t__UpperCAmelCase ,\t\t\t\t\t__UpperCAmelCase ,\t\t\t\t\tunittest.TestCase\t\t\t\t):\r\n lowercase__ \t=\t\t\t\t(\r\n (\r\n MegatronBertModel,\r\n MegatronBertForMaskedLM,\r\n MegatronBertForCausalLM,\r\n MegatronBertForMultipleChoice,\r\n MegatronBertForNextSentencePrediction,\r\n MegatronBertForPreTraining,\r\n MegatronBertForQuestionAnswering,\r\n MegatronBertForSequenceClassification,\r\n MegatronBertForTokenClassification,\r\n )\r\n if is_torch_available()\r\n else ()\r\n )\r\n lowercase__ \t=\t\t\t\t(\r\n {\r\n \"feature-extraction\": MegatronBertModel,\r\n \"fill-mask\": MegatronBertForMaskedLM,\r\n \"question-answering\": MegatronBertForQuestionAnswering,\r\n \"text-classification\": MegatronBertForSequenceClassification,\r\n \"text-generation\": MegatronBertForCausalLM,\r\n \"token-classification\": MegatronBertForTokenClassification,\r\n \"zero-shot\": MegatronBertForSequenceClassification,\r\n }\r\n if is_torch_available()\r\n else {}\r\n )\r\n lowercase__ \t=\t\t\t\tTrue\r\n # test_resize_embeddings = False\r\n lowercase__ \t=\t\t\t\tFalse\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tAny\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tOptional[Any]\t,\t\t\t\t\tlowerCAmelCase_ :\t\t\tUnion[str, Any]=False):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\tsuper()._prepare_for_class(lowerCAmelCase_\t,\t\t\t\t\tlowerCAmelCase_\t,\t\t\t\t\treturn_labels=lowerCAmelCase_)\r\n\r\n if return_labels:\r\n if model_class in get_values(lowerCAmelCase_):\r\n lowercase_ =\ttorch.zeros(\r\n (self.model_tester.batch_size, self.model_tester.seq_length)\t,\t\t\t\t\tdtype=torch.long\t,\t\t\t\t\tdevice=lowerCAmelCase_)\r\n lowercase_ =\ttorch.zeros(\r\n self.model_tester.batch_size\t,\t\t\t\t\tdtype=torch.long\t,\t\t\t\t\tdevice=lowerCAmelCase_)\r\n return inputs_dict\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tint):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\tMegatronBertModelTester(self)\r\n lowercase_ =\tConfigTester(self\t,\t\t\t\t\tconfig_class=lowerCAmelCase_\t,\t\t\t\t\thidden_size=3_7)\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tDict):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n self.config_tester.run_common_tests()\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tTuple):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\tself.model_tester.prepare_config_and_inputs()\r\n self.model_tester.create_and_check_megatron_bert_model(*lowerCAmelCase_)\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tstr):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\tself.model_tester.prepare_config_and_inputs()\r\n self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCAmelCase_)\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tOptional[int]):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\tself.model_tester.prepare_config_and_inputs()\r\n self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCAmelCase_)\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tList[Any]):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\tself.model_tester.prepare_config_and_inputs()\r\n self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCAmelCase_)\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tList[str]):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\tself.model_tester.prepare_config_and_inputs()\r\n self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCAmelCase_)\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tOptional[int]):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\tself.model_tester.prepare_config_and_inputs()\r\n self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCAmelCase_)\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tOptional[int]):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\tself.model_tester.prepare_config_and_inputs()\r\n self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCAmelCase_)\r\n\r\n\r\n\r\n\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tOptional[Any]):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\tself.model_tester.prepare_config_and_inputs()\r\n self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCAmelCase_)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef _SCREAMING_SNAKE_CASE (__lowerCAmelCase\t\t\t\t\t\t\t)\t-> int:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n return torch.tensor(\r\n __lowerCAmelCase\t,\t\t\t\t\tdtype=torch.long\t,\t\t\t\t\tdevice=__lowerCAmelCase\t,\t\t\t\t\t)\r\n\r\n\r\nUpperCAmelCase :\t\t\t\t\t\tAny =\t1E-4\r\n\r\n\r\n@require_torch\r\n@require_sentencepiece\r\n@require_tokenizers\r\nclass \t\t\t\tSCREAMING_SNAKE_CASE__\t(\t\t\t\t\t\t\tunittest.TestCase\t\t\t\t):\r\n\r\n @slow\r\n @unittest.skip(\"\"\"Model is not available.\"\"\")\r\n def _UpperCAmelCase\t\t(\t\t\tself :\t\t\tOptional[int]):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ =\t\"\"\"nvidia/megatron-bert-uncased-345m\"\"\"\r\n if \"MYDIR\" in os.environ:\r\n lowercase_ =\tos.path.join(os.environ[\"\"\"MYDIR\"\"\"]\t,\t\t\t\t\tlowerCAmelCase_)\r\n lowercase_ =\tMegatronBertModel.from_pretrained(lowerCAmelCase_)\r\n model.to(lowerCAmelCase_)\r\n model.half()\r\n lowercase_ =\t_long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]])\r\n with torch.no_grad():\r\n lowercase_ =\tmodel(lowerCAmelCase_)[0]\r\n lowercase_ =\ttorch.Size((1, 9, 1_0_2_4))\r\n self.assertEqual(output.shape\t,\t\t\t\t\tlowerCAmelCase_)\r\n\r\n lowercase_ =\t[-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]\r\n for ii in range(3):\r\n for jj in range(3):\r\n lowercase_ =\toutput[0, ii, jj]\r\n lowercase_ =\texpected[3 * ii + jj]\r\n lowercase_ =\t\"\"\"ii={} jj={} a={} b={}\"\"\".format(lowerCAmelCase_\t,\t\t\t\t\tlowerCAmelCase_\t,\t\t\t\t\tlowerCAmelCase_\t,\t\t\t\t\tlowerCAmelCase_)\r\n self.assertTrue(math.isclose(lowerCAmelCase_\t,\t\t\t\t\tlowerCAmelCase_\t,\t\t\t\t\trel_tol=lowerCAmelCase_\t,\t\t\t\t\tabs_tol=lowerCAmelCase_)\t,\t\t\t\t\tmsg=lowerCAmelCase_)\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":313,"string":"313"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":720,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\rimport argparse\rimport hashlib # hashlib is only used inside the Test class\rimport struct\r\r\r\r\rclass _lowerCamelCase :\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r def __init__( self , UpperCAmelCase )\t\t\t\t-> Any:\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r\r __snake_case : Optional[Any]\t\t\t\t\t\t=\t\t\t\tdata\r __snake_case : Any\t\t\t\t\t\t=\t\t\t\t[0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0]\r\r\r\r\r\r\r\r @staticmethod\r def UpperCAmelCase\t\t\t\t\t\t( UpperCAmelCase , UpperCAmelCase )\t\t\t\t-> str:\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r\r return ((n << b) | (n >> (32 - b))) & 0xFFFFFFFF\r\r\r\r\r\r\r\r def UpperCAmelCase\t\t\t\t\t\t( self )\t\t\t\t-> Optional[int]:\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r\r __snake_case : int\t\t\t\t\t\t=\t\t\t\tB\"\\x80\" + B\"\\x00\" * (63 - (len(self.data ) + 8) % 64)\r __snake_case : Any\t\t\t\t\t\t=\t\t\t\tself.data + padding + struct.pack(\">Q\" , 8 * len(self.data ) )\r return padded_data\r\r\r\r\r\r\r\r def UpperCAmelCase\t\t\t\t\t\t( self )\t\t\t\t-> int:\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r\r return [\r self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )\r ]\r\r\r\r\r\r\r\r def UpperCAmelCase\t\t\t\t\t\t( self , UpperCAmelCase )\t\t\t\t-> List[str]:\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r\r __snake_case : Any\t\t\t\t\t\t=\t\t\t\tlist(struct.unpack(\">16L\" , UpperCAmelCase ) ) + [0] * 64\r for i in range(16 , 80 ):\r __snake_case : Optional[int]\t\t\t\t\t\t=\t\t\t\tself.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )\r return w\r\r\r\r\r\r\r\r def UpperCAmelCase\t\t\t\t\t\t( self )\t\t\t\t-> List[str]:\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r\r __snake_case : Tuple\t\t\t\t\t\t=\t\t\t\tself.padding()\r __snake_case : Tuple\t\t\t\t\t\t=\t\t\t\tself.split_blocks()\r for block in self.blocks:\r __snake_case : Union[str, Any]\t\t\t\t\t\t=\t\t\t\tself.expand_block(UpperCAmelCase )\r __snake_case\t\t\t, __snake_case\t\t\t, __snake_case\t\t\t, __snake_case\t\t\t, __snake_case : Union[str, Any]\t\t\t\t\t\t=\t\t\t\tself.h\r for i in range(0 , 80 ):\r if 0 <= i < 20:\r __snake_case : Union[str, Any]\t\t\t\t\t\t=\t\t\t\t(b & c) | ((~b) & d)\r __snake_case : int\t\t\t\t\t\t=\t\t\t\t0x5A827999\r elif 20 <= i < 40:\r __snake_case : Any\t\t\t\t\t\t=\t\t\t\tb ^ c ^ d\r __snake_case : Tuple\t\t\t\t\t\t=\t\t\t\t0x6ED9EBA1\r elif 40 <= i < 60:\r __snake_case : Dict\t\t\t\t\t\t=\t\t\t\t(b & c) | (b & d) | (c & d)\r __snake_case : Optional[Any]\t\t\t\t\t\t=\t\t\t\t0x8F1BBCDC\r elif 60 <= i < 80:\r __snake_case : int\t\t\t\t\t\t=\t\t\t\tb ^ c ^ d\r __snake_case : Optional[Any]\t\t\t\t\t\t=\t\t\t\t0xCA62C1D6\r __snake_case\t\t\t, __snake_case\t\t\t, __snake_case\t\t\t, __snake_case\t\t\t, __snake_case : Optional[int]\t\t\t\t\t\t=\t\t\t\t(\r self.rotate(UpperCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0xFFFFFFFF,\r a,\r self.rotate(UpperCAmelCase , 30 ),\r c,\r d,\r )\r __snake_case : str\t\t\t\t\t\t=\t\t\t\t(\r self.h[0] + a & 0xFFFFFFFF,\r self.h[1] + b & 0xFFFFFFFF,\r self.h[2] + c & 0xFFFFFFFF,\r self.h[3] + d & 0xFFFFFFFF,\r self.h[4] + e & 0xFFFFFFFF,\r )\r return (\"{:08x}\" * 5).format(*self.h )\r\r\r\r\r\r\r\rdef \t\t\t\t\tlowerCAmelCase__(\t\t) ->\t\t\tstr:\r __snake_case : List[str]\t\t\t\t\t\t=\t\t\t\tB\"Test String\"\r assert SHAaHash(lowercase ).final_hash() == hashlib.shaa(lowercase ).hexdigest() # noqa: S324\r\r\r\r\r\r\r\rdef \t\t\t\t\tlowerCAmelCase__(\t\t) ->\t\t\tstr:\r __snake_case : int\t\t\t\t\t\t=\t\t\t\targparse.ArgumentParser(description=\"Process some strings or files\" )\r parser.add_argument(\r \"--string\" , dest=\"input_string\" , default=\"Hello World!! Welcome to Cryptography\" , help=\"Hash the string\" , )\r parser.add_argument(\"--file\" , dest=\"input_file\" , help=\"Hash contents of a file\" )\r __snake_case : Optional[int]\t\t\t\t\t\t=\t\t\t\tparser.parse_args()\r __snake_case : List[Any]\t\t\t\t\t\t=\t\t\t\targs.input_string\r # In any case hash input should be a bytestring\r if args.input_file:\r with open(args.input_file , \"rb\" ) as f:\r __snake_case : Optional[Any]\t\t\t\t\t\t=\t\t\t\tf.read()\r else:\r __snake_case : Optional[int]\t\t\t\t\t\t=\t\t\t\tbytes(lowercase , \"utf-8\" )\r print(SHAaHash(lowercase ).final_hash() )\r\r\rif __name__ == \"__main__\":\r main()\r import doctest\r\r doctest.testmod()\r\r"},"code_codestyle":{"kind":"number","value":326,"string":"326"},"style_context":{"kind":"string","value":"\r\r\r\r\r\rimport unittest\r\rfrom transformers import is_flax_available\rfrom transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow\r\r\rif is_flax_available():\r import optax\r from flax.training.common_utils import onehot\r\r from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration\r from transformers.models.ta.modeling_flax_ta import shift_tokens_right\r\r\r\r\r@require_torch\r@require_sentencepiece\r@require_tokenizers\r@require_flax\rclass _lowerCamelCase (\t\t\tunittest.TestCase\t\t\t):\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r @slow\r def UpperCAmelCase\t\t\t\t\t\t( self )\t\t\t\t-> List[Any]:\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r\r __snake_case : Tuple\t\t\t\t\t\t=\t\t\t\tFlaxMTaForConditionalGeneration.from_pretrained(\"google/mt5-small\" )\r __snake_case : str\t\t\t\t\t\t=\t\t\t\tAutoTokenizer.from_pretrained(\"google/mt5-small\" )\r\r __snake_case : List[Any]\t\t\t\t\t\t=\t\t\t\ttokenizer(\"Hello there\" , return_tensors=\"np\" ).input_ids\r __snake_case : int\t\t\t\t\t\t=\t\t\t\ttokenizer(\"Hi I am\" , return_tensors=\"np\" ).input_ids\r\r __snake_case : Tuple\t\t\t\t\t\t=\t\t\t\tshift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )\r\r __snake_case : Tuple\t\t\t\t\t\t=\t\t\t\tmodel(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits\r __snake_case : str\t\t\t\t\t\t=\t\t\t\toptax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()\r\r __snake_case : Any\t\t\t\t\t\t=\t\t\t\t-(labels.shape[-1] * loss.item())\r\r __snake_case : List[str]\t\t\t\t\t\t=\t\t\t\t-84.9_127\r self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )\r\r"},"style_context_codestyle":{"kind":"number","value":326,"string":"326"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":721,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n'''simple docstring'''\n\n\n\n\n\nimport pytest\n\nfrom datasets import inspect_metric, list_metrics, load_metric\n\n\n@pytest.fixture\ndef \t\t\t\t\tUpperCamelCase_( snake_case\t\t\t\t\t\t\t:\tOptional[int]\t\t\t\t\t):\n\n '''simple docstring'''\n\n\n\n\n\n\n monkeypatch.setattr(\"datasets.utils.deprecation_utils._emitted_deprecation_warnings\"\t\t\t\t\t\t\t, set()\t\t\t\t\t)\n\n\n@pytest.fixture\ndef \t\t\t\t\tUpperCamelCase_( snake_case\t\t\t\t\t\t\t:\tint\t\t\t\t\t):\n\n '''simple docstring'''\n\n\n\n\n\n\n class \t\t\t_snake_case\t\t:\n\n\n\n\n def __init__( self ,\t\t\t\t\t\t\ta__\t\t\t\t\t\t\t) -> Any:\n\n '''simple docstring'''\n\n\n\n\n snake_case_ =\t\t\t\tmetric_id\n\n\n\n\n class \t\t\t_snake_case\t\t:\n lowerCAmelCase_\t\t\t\t\t:\t\t\t\tint\t\t\t\t\t\t\t= [MetricMock(lowercase_\t\t\t\t\t\t\t) for metric_id in [\"accuracy\", \"mse\", \"precision\", \"codeparrot/apps_metric\"]]\n\n\n\n\n def lowerCAmelCase__ ( self\t\t\t\t\t\t\t) -> Optional[Any]:\n\n '''simple docstring'''\n\n\n\n\n return self._metrics\n\n monkeypatch.setattr(\"datasets.inspect.huggingface_hub\"\t\t\t\t\t\t\t, HfhMock()\t\t\t\t\t)\n\n\n\n\n@pytest.mark.parametrize(\n \"func, args\"\t\t\t\t\t\t\t, [(load_metric, (\"metrics/mse\",)), (list_metrics, ()), (inspect_metric, (\"metrics/mse\", \"tmp_path\"))]\t\t\t\t\t)\ndef \t\t\t\t\tUpperCamelCase_( snake_case\t\t\t\t\t\t\t:\tAny\t\t\t\t\t\t\t, snake_case\t\t\t\t\t\t\t:\tOptional[Any]\t\t\t\t\t\t\t, snake_case\t\t\t\t\t\t\t:\tstr\t\t\t\t\t\t\t, snake_case\t\t\t\t\t\t\t:\tList[str]\t\t\t\t\t\t\t, snake_case\t\t\t\t\t\t\t:\tList[str]\t\t\t\t\t):\n\n '''simple docstring'''\n\n\n\n\n\n\n if \"tmp_path\" in args:\n snake_case_ =\t\t\t\ttuple(arg if arg != \"tmp_path\" else tmp_path for arg in args\t\t\t\t\t)\n with pytest.warns(snake_case\t\t\t\t\t\t\t, match=\"https://huggingface.co/docs/evaluate\"\t\t\t\t\t):\n func(*snake_case\t\t\t\t\t)\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":361,"string":"361"},"style_context":{"kind":"string","value":"\n\n\n\n\n'''simple docstring'''\n\n\n\n\n\nimport logging\nimport os\n\nfrom .state import PartialState\n\n\n\n\nclass \t\t\t_snake_case\t\t( logging.LoggerAdapter\t\t\t\t\t\t\t):\n\n\n\n\n\t\t\t@staticmethod\n\t\t\tdef lowerCAmelCase__ ( a__\t\t\t\t\t\t\t) -> Optional[Any]:\n\n\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\t\t\t\t\t\t\t\t\t\tsnake_case_ =\t\t\t\tPartialState()\n\t\t\t\t\t\t\t\t\t\treturn not main_process_only or (main_process_only and state.is_main_process)\n\n\n\n\n\t\t\tdef lowerCAmelCase__ ( self ,\t\t\t\t\t\t\ta__ ,\t\t\t\t\t\t\ta__ ,\t\t\t\t\t\t\t*a__ ,\t\t\t\t\t\t\t**a__\t\t\t\t\t\t\t) -> List[Any]:\n\n\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\t\t\t\t\t\t\t\t\t\tif PartialState._shared_state == {}:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise RuntimeError(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.\"\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\tsnake_case_ =\t\t\t\tkwargs.pop(\"main_process_only\" ,\t\t\t\t\t\t\ta__\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\tsnake_case_ =\t\t\t\tkwargs.pop(\"in_order\" ,\t\t\t\t\t\t\ta__\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\tif self.isEnabledFor(a__\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif self._should_log(a__\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_ ,\t\t\t\tsnake_case_ =\t\t\t\tself.process(a__ ,\t\t\t\t\t\t\ta__\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.logger.log(a__ ,\t\t\t\t\t\t\ta__ ,\t\t\t\t\t\t\t*a__ ,\t\t\t\t\t\t\t**a__\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif in_order:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_ =\t\t\t\tPartialState()\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor i in range(state.num_processes\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif i == state.process_index:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_ ,\t\t\t\tsnake_case_ =\t\t\t\tself.process(a__ ,\t\t\t\t\t\t\ta__\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.logger.log(a__ ,\t\t\t\t\t\t\ta__ ,\t\t\t\t\t\t\t*a__ ,\t\t\t\t\t\t\t**a__\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tstate.wait_for_everyone()\n\n\ndef \t\t\t\t\tUpperCamelCase_( snake_case\t\t\t\t\t\t\t:\tstr\t\t\t\t\t\t\t, snake_case\t\t\t\t\t\t\t:\tstr = None\t\t\t\t\t):\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\n\t\t\t\t\t\t\tif log_level is None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_ =\t\t\t\tos.environ.get(\"ACCELERATE_LOG_LEVEL\"\t\t\t\t\t\t\t, snake_case\t\t\t\t\t)\n\t\t\t\t\t\t\tsnake_case_ =\t\t\t\tlogging.getLogger(snake_case\t\t\t\t\t)\n\t\t\t\t\t\t\tif log_level is not None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.setLevel(log_level.upper()\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.root.setLevel(log_level.upper()\t\t\t\t\t)\n\t\t\t\t\t\t\treturn MultiProcessAdapter(snake_case\t\t\t\t\t\t\t, {}\t\t\t\t\t)\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":92,"string":"92"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":722,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport os\r\nimport tempfile\r\nimport unittest\r\n\r\nfrom transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter\r\nfrom transformers.testing_utils import slow\r\nfrom transformers.utils import cached_property\r\n\r\n\r\n\r\n@unittest.skipUnless(os.path.exists(_UpperCamelCase\t\t\t\t\t\t\t) ,\t\t'Tatoeba directory does not exist.'\t\t\t\t\t\t\t)\r\nclass \t\t\tA__ ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t@cached_property\r\n\tdef \t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE ( self:\t\tstr)\t\t\t\t-> int:\r\n\r\n\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t__lowerCAmelCase : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t\t= tempfile.mkdtemp()\r\n\t\t\t\treturn TatoebaConverter(save_dir=_UpperCAmelCase)\r\n\r\n\t@slow\r\n\tdef \t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE ( self:\t\tList[Any])\t\t\t\t-> Any:\r\n\r\n\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\tself.resolver.convert_models([\"heb-eng\"])\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t@slow\r\n\tdef \t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE ( self:\t\tOptional[int])\t\t\t\t-> List[Any]:\r\n\r\n\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t__lowerCAmelCase : str\t\t\t\t\t\t\t\t\t\t\t\t= self.resolver.write_model_card(\"opus-mt-he-en\" ,\t\t\tdry_run=_UpperCAmelCase)\r\n\t\t\t\tassert mmeta[\"long_pair\"] == \"heb-eng\""},"code_codestyle":{"kind":"number","value":269,"string":"269"},"style_context":{"kind":"string","value":"\n\n\n\n'''simple docstring'''\n\nimport unittest\n\nfrom transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding\nfrom transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow\n\nfrom ...test_tokenization_common import TokenizerTesterMixin\n\n@require_tokenizers\n@require_sentencepiece\n@slow # see https://github.com/huggingface/transformers/issues/11457\nclass \t__magic_name__\t\t\t\t\t\t( _UpperCamelCase\t\t\t,\t\t\t\t\t\tunittest.TestCase ):\n\t\t\t\t\t\t\tlowerCAmelCase : Optional[int] \t\t\t\t=\t\t\t\t\tBarthezTokenizer\n\t\t\t\t\t\t\tlowerCAmelCase : int \t\t\t\t=\t\t\t\t\tBarthezTokenizerFast\n\t\t\t\t\t\t\tlowerCAmelCase : Dict \t\t\t\t=\t\t\t\t\tTrue\n\t\t\t\t\t\t\tlowerCAmelCase : str \t\t\t\t=\t\t\t\t\tTrue\n\n\n\n\n\n\n\n\t\t\t\t\t\t\tdef \t\t\t\t\t\t\t__lowercase\t\t\t(\t\t\t\t\t\t\tself\t\t\t\t\t: List[Any]\t\t):\n\t\t\t\t\t\t\t\t\tsuper().setUp()\n\n\t\t\t\t\t\t\t\t\t_a : List[Any] =\t\t\t\t\t\t\tBarthezTokenizerFast.from_pretrained('moussaKam/mbarthez'\t\t)\n\t\t\t\t\t\t\t\t\ttokenizer.save_pretrained(self.tmpdirname\t\t)\n\t\t\t\t\t\t\t\t\ttokenizer.save_pretrained(self.tmpdirname\t,legacy_format=_UpperCAmelCase\t\t)\n\t\t\t\t\t\t\t\t\t_a : Union[str, Any] =\t\t\t\t\t\t\ttokenizer\n\n\n\n\n\n\n\n\t\t\t\t\t\t\tdef \t\t\t\t\t\t\t__lowercase\t\t\t(\t\t\t\t\t\t\tself\t\t\t\t\t: Tuple\t\t):\n\t\t\t\t\t\t\t\t\t_a : Optional[Any] =\t\t\t\t\t\t\t''\n\t\t\t\t\t\t\t\t\t_a : List[Any] =\t\t\t\t\t\t\t1\n\n\t\t\t\t\t\t\t\t\tself.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase\t\t)\t,_UpperCAmelCase\t\t)\n\t\t\t\t\t\t\t\t\tself.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase\t\t)\t,_UpperCAmelCase\t\t)\n\n\n\n\n\n\n\n\t\t\t\t\t\t\tdef \t\t\t\t\t\t\t__lowercase\t\t\t(\t\t\t\t\t\t\tself\t\t\t\t\t: str\t\t):\n\t\t\t\t\t\t\t\t\t_a : Any =\t\t\t\t\t\t\tlist(self.get_tokenizer().get_vocab().keys()\t\t)\n\n\t\t\t\t\t\t\t\t\tself.assertEqual(vocab_keys[0]\t,''\t\t)\n\t\t\t\t\t\t\t\t\tself.assertEqual(vocab_keys[1]\t,''\t\t)\n\t\t\t\t\t\t\t\t\tself.assertEqual(vocab_keys[-1]\t,''\t\t)\n\t\t\t\t\t\t\t\t\tself.assertEqual(len(_UpperCAmelCase\t\t)\t,101122\t\t)\n\n\n\n\n\n\n\n\t\t\t\t\t\t\tdef \t\t\t\t\t\t\t__lowercase\t\t\t(\t\t\t\t\t\t\tself\t\t\t\t\t: Dict\t\t):\n\t\t\t\t\t\t\t\t\tself.assertEqual(self.get_tokenizer().vocab_size\t,101122\t\t)\n\n\n\n\n\n\n\n\t\t\t\t\t\t\t@require_torch\n\t\t\t\t\t\t\tdef \t\t\t\t\t\t\t__lowercase\t\t\t(\t\t\t\t\t\t\tself\t\t\t\t\t: Dict\t\t):\n\t\t\t\t\t\t\t\t\t_a : Any =\t\t\t\t\t\t\t['A long paragraph for summarization.', 'Another paragraph for summarization.']\n\t\t\t\t\t\t\t\t\t_a : Dict =\t\t\t\t\t\t\t[0, 57, 3018, 70307, 91, 2]\n\n\t\t\t\t\t\t\t\t\t_a : Dict =\t\t\t\t\t\t\tself.tokenizer(\n\t\t\t\t\t\t\t\t\t _UpperCAmelCase\t,max_length=len(_UpperCAmelCase\t\t)\t,padding=_UpperCAmelCase\t,truncation=_UpperCAmelCase\t,return_tensors='pt'\t\t)\n\t\t\t\t\t\t\t\t\tself.assertIsInstance(_UpperCAmelCase\t,_UpperCAmelCase\t\t)\n\n\t\t\t\t\t\t\t\t\tself.assertEqual((2, 6)\t,batch.input_ids.shape\t\t)\n\t\t\t\t\t\t\t\t\tself.assertEqual((2, 6)\t,batch.attention_mask.shape\t\t)\n\t\t\t\t\t\t\t\t\t_a : Tuple =\t\t\t\t\t\t\tbatch.input_ids.tolist()[0]\n\t\t\t\t\t\t\t\t\tself.assertListEqual(_UpperCAmelCase\t,_UpperCAmelCase\t\t)\n\n\n\n\n\n\n\n\t\t\t\t\t\t\tdef \t\t\t\t\t\t\t__lowercase\t\t\t(\t\t\t\t\t\t\tself\t\t\t\t\t: Optional[Any]\t\t):\n\t\t\t\t\t\t\t\t\tif not self.test_rust_tokenizer:\n\t\t\t\t\t\t\t\t\t\t\treturn\n\n\t\t\t\t\t\t\t\t\t_a : str =\t\t\t\t\t\t\tself.get_tokenizer()\n\t\t\t\t\t\t\t\t\t_a : List[str] =\t\t\t\t\t\t\tself.get_rust_tokenizer()\n\n\t\t\t\t\t\t\t\t\t_a : Dict =\t\t\t\t\t\t\t'I was born in 92000, and this is falsé.'\n\n\t\t\t\t\t\t\t\t\t_a : List[Any] =\t\t\t\t\t\t\ttokenizer.tokenize(_UpperCAmelCase\t\t)\n\t\t\t\t\t\t\t\t\t_a : Tuple =\t\t\t\t\t\t\trust_tokenizer.tokenize(_UpperCAmelCase\t\t)\n\t\t\t\t\t\t\t\t\tself.assertListEqual(_UpperCAmelCase\t,_UpperCAmelCase\t\t)\n\n\t\t\t\t\t\t\t\t\t_a : Optional[Any] =\t\t\t\t\t\t\ttokenizer.encode(_UpperCAmelCase\t,add_special_tokens=_UpperCAmelCase\t\t)\n\t\t\t\t\t\t\t\t\t_a : Optional[int] =\t\t\t\t\t\t\trust_tokenizer.encode(_UpperCAmelCase\t,add_special_tokens=_UpperCAmelCase\t\t)\n\t\t\t\t\t\t\t\t\tself.assertListEqual(_UpperCAmelCase\t,_UpperCAmelCase\t\t)\n\n\t\t\t\t\t\t\t\t\t_a : Union[str, Any] =\t\t\t\t\t\t\tself.get_rust_tokenizer()\n\t\t\t\t\t\t\t\t\t_a : Any =\t\t\t\t\t\t\ttokenizer.encode(_UpperCAmelCase\t\t)\n\t\t\t\t\t\t\t\t\t_a : Optional[int] =\t\t\t\t\t\t\trust_tokenizer.encode(_UpperCAmelCase\t\t)\n\t\t\t\t\t\t\t\t\tself.assertListEqual(_UpperCAmelCase\t,_UpperCAmelCase\t\t)\n\n\n\n\n\n\n\n\t\t\t\t\t\t\t@slow\n\t\t\t\t\t\t\tdef \t\t\t\t\t\t\t__lowercase\t\t\t(\t\t\t\t\t\t\tself\t\t\t\t\t: Optional[int]\t\t):\n\t\t\t\t\t\t\t\t\t# fmt: off\n\t\t\t\t\t\t\t\t\t_a : Optional[int] =\t\t\t\t\t\t\t{'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501\n\t\t\t\t\t\t\t\t\t# fmt: on\n\n\t\t\t\t\t\t\t\t\t# moussaKam/mbarthez is a french model. So we also use french texts.\n\t\t\t\t\t\t\t\t\t_a : Optional[Any] =\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t\t 'Le transformeur est un modèle d\\'apprentissage profond introduit en 2017, '\n\t\t\t\t\t\t\t\t\t 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',\n\t\t\t\t\t\t\t\t\t 'À l\\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '\n\t\t\t\t\t\t\t\t\t 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '\n\t\t\t\t\t\t\t\t\t 'telles que la traduction et la synthèse de texte.',\n\t\t\t\t\t\t\t\t\t]\n\n\t\t\t\t\t\t\t\t\tself.tokenizer_integration_test_util(\n\t\t\t\t\t\t\t\t\t expected_encoding=_UpperCAmelCase\t,model_name='moussaKam/mbarthez'\t,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6'\t,sequences=_UpperCAmelCase\t,)\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":89,"string":"89"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":723,"cells":{"code":{"kind":"string","value":"\r\"\"\"simple docstring\"\"\"\r\rfrom argparse import ArgumentParser\r\rfrom .add_new_model import AddNewModelCommand\rfrom .add_new_model_like import AddNewModelLikeCommand\rfrom .convert import ConvertCommand\rfrom .download import DownloadCommand\rfrom .env import EnvironmentCommand\rfrom .lfs import LfsCommands\rfrom .pt_to_tf import PTtoTFCommand\rfrom .run import RunCommand\rfrom .serving import ServeCommand\rfrom .user import UserCommands\r\r\r\r\r\r\rdef \t\t\t\t\t\t\tA\t\t\t\t\t\t( )\t\t\t\t-> Union[str, Any]:\r __UpperCamelCase =\tArgumentParser('Transformers CLI tool' , usage='transformers-cli []'\t\t)\r __UpperCamelCase =\tparser.add_subparsers(help='transformers-cli command helpers'\t\t)\r\r # Register commands\r ConvertCommand.register_subcommand(snake_case\t\t)\r DownloadCommand.register_subcommand(snake_case\t\t)\r EnvironmentCommand.register_subcommand(snake_case\t\t)\r RunCommand.register_subcommand(snake_case\t\t)\r ServeCommand.register_subcommand(snake_case\t\t)\r UserCommands.register_subcommand(snake_case\t\t)\r AddNewModelCommand.register_subcommand(snake_case\t\t)\r AddNewModelLikeCommand.register_subcommand(snake_case\t\t)\r LfsCommands.register_subcommand(snake_case\t\t)\r PTtoTFCommand.register_subcommand(snake_case\t\t)\r\r # Let's go\r __UpperCamelCase =\tparser.parse_args()\r\r if not hasattr(snake_case , 'func'\t\t):\r parser.print_help()\r exit(1\t\t)\r\r # Run\r __UpperCamelCase =\targs.func(snake_case\t\t)\r service.run()\r\r\rif __name__ == \"__main__\":\r main()\r\r\r\r"},"code_codestyle":{"kind":"number","value":263,"string":"263"},"style_context":{"kind":"string","value":"\r\"\"\"simple docstring\"\"\"\r\rimport gc\rimport unittest\r\rimport numpy as np\rimport torch\rfrom transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer\r\rfrom diffusers import (\r AutoencoderKL,\r DDIMScheduler,\r StableDiffusionAttendAndExcitePipeline,\r UNetaDConditionModel,\r)\rfrom diffusers.utils import load_numpy, skip_mps, slow\rfrom diffusers.utils.testing_utils import require_torch_gpu\r\rfrom ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS\rfrom ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin\r\r\rUpperCamelCase : List[Any] =\t\t\tFalse\r\r@skip_mps\rclass __lowerCAmelCase\t\t(\t\t\t\t__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):\r lowercase\t\t\t\t\t\t =\t\t\t\t\t\tStableDiffusionAttendAndExcitePipeline\r lowercase\t\t\t\t\t\t =\t\t\t\t\t\tFalse\r lowercase\t\t\t\t\t\t =\t\t\t\t\t\tTEXT_TO_IMAGE_PARAMS\r lowercase\t\t\t\t\t\t =\t\t\t\t\t\tTEXT_TO_IMAGE_BATCH_PARAMS.union({\"token_indices\"} )\r lowercase\t\t\t\t\t\t =\t\t\t\t\t\tTEXT_TO_IMAGE_IMAGE_PARAMS\r lowercase\t\t\t\t\t\t =\t\t\t\t\t\tTEXT_TO_IMAGE_IMAGE_PARAMS\r\r\r\r\r\r @classmethod\r def \t\t\t\t\t\tUpperCAmelCase ( cls\t\t\t\t):\r\r '''simple docstring'''\r\r\r\r\r super().setUpClass()\r torch.use_deterministic_algorithms(__UpperCAmelCase\t\t\t\t)\r\r\r\r\r\r @classmethod\r def \t\t\t\t\t\tUpperCAmelCase ( cls\t\t\t\t):\r\r '''simple docstring'''\r\r\r\r\r super().tearDownClass()\r torch.use_deterministic_algorithms(__UpperCAmelCase\t\t\t\t)\r\r\r\r\r\r def \t\t\t\t\t\tUpperCAmelCase ( self\t\t\t\t):\r\r '''simple docstring'''\r\r\r\r\r torch.manual_seed(0\t\t\t\t)\r __UpperCamelCase =\tUNetaDConditionModel(\r block_out_channels=(32, 64) ,\t\t\t\t\tlayers_per_block=1 ,\t\t\t\t\tsample_size=32 ,\t\t\t\t\tin_channels=4 ,\t\t\t\t\tout_channels=4 ,\t\t\t\t\tdown_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,\t\t\t\t\tup_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,\t\t\t\t\tcross_attention_dim=32 ,\t\t\t\t\tattention_head_dim=(2, 4) ,\t\t\t\t\tuse_linear_projection=__UpperCAmelCase ,\t\t\t\t\t)\r __UpperCamelCase =\tDDIMScheduler(\r beta_start=0.0_0_0_8_5 ,\t\t\t\t\tbeta_end=0.0_1_2 ,\t\t\t\t\tbeta_schedule='scaled_linear' ,\t\t\t\t\tclip_sample=__UpperCAmelCase ,\t\t\t\t\tset_alpha_to_one=__UpperCAmelCase ,\t\t\t\t\t)\r torch.manual_seed(0\t\t\t\t)\r __UpperCamelCase =\tAutoencoderKL(\r block_out_channels=[32, 64] ,\t\t\t\t\tin_channels=3 ,\t\t\t\t\tout_channels=3 ,\t\t\t\t\tdown_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,\t\t\t\t\tup_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,\t\t\t\t\tlatent_channels=4 ,\t\t\t\t\tsample_size=128 ,\t\t\t\t\t)\r torch.manual_seed(0\t\t\t\t)\r __UpperCamelCase =\tCLIPTextConfig(\r bos_token_id=0 ,\t\t\t\t\teos_token_id=2 ,\t\t\t\t\thidden_size=32 ,\t\t\t\t\tintermediate_size=37 ,\t\t\t\t\tlayer_norm_eps=1E-05 ,\t\t\t\t\tnum_attention_heads=4 ,\t\t\t\t\tnum_hidden_layers=5 ,\t\t\t\t\tpad_token_id=1 ,\t\t\t\t\tvocab_size=1000 ,\t\t\t\t\thidden_act='gelu' ,\t\t\t\t\tprojection_dim=512 ,\t\t\t\t\t)\r __UpperCamelCase =\tCLIPTextModel(__UpperCAmelCase\t\t\t\t)\r __UpperCamelCase =\tCLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip'\t\t\t\t)\r\r __UpperCamelCase =\t{\r 'unet': unet,\r 'scheduler': scheduler,\r 'vae': vae,\r 'text_encoder': text_encoder,\r 'tokenizer': tokenizer,\r 'safety_checker': None,\r 'feature_extractor': None,\r }\r\r return components\r\r\r\r\r\r def \t\t\t\t\t\tUpperCAmelCase ( self ,\t\t\t\t\t__UpperCAmelCase ,\t\t\t\t\t__UpperCAmelCase=0\t\t\t\t):\r\r '''simple docstring'''\r\r\r\r\r if str(__UpperCAmelCase\t\t\t\t).startswith('mps'\t\t\t\t):\r __UpperCamelCase =\ttorch.manual_seed(__UpperCAmelCase\t\t\t\t)\r else:\r __UpperCamelCase =\ttorch.Generator(device=__UpperCAmelCase\t\t\t\t).manual_seed(__UpperCAmelCase\t\t\t\t)\r __UpperCamelCase =\t__UpperCamelCase =\t{\r 'prompt': 'a cat and a frog',\r 'token_indices': [2, 5],\r 'generator': generator,\r 'num_inference_steps': 1,\r 'guidance_scale': 6.0,\r 'output_type': 'numpy',\r 'max_iter_to_alter': 2,\r 'thresholds': {0: 0.7},\r }\r return inputs\r\r\r\r\r\r def \t\t\t\t\t\tUpperCAmelCase ( self\t\t\t\t):\r\r '''simple docstring'''\r\r\r\r\r __UpperCamelCase =\t'cpu'\r\r __UpperCamelCase =\tself.get_dummy_components()\r __UpperCamelCase =\tself.pipeline_class(**__UpperCAmelCase\t\t\t\t)\r pipe.to(__UpperCAmelCase\t\t\t\t)\r pipe.set_progress_bar_config(disable=__UpperCAmelCase\t\t\t\t)\r\r __UpperCamelCase =\tself.get_dummy_inputs(__UpperCAmelCase\t\t\t\t)\r __UpperCamelCase =\tpipe(**__UpperCAmelCase\t\t\t\t).images\r __UpperCamelCase =\timage[0, -3:, -3:, -1]\r\r self.assertEqual(image.shape ,\t\t\t\t\t(1, 64, 64, 3)\t\t\t\t)\r __UpperCamelCase =\tnp.array(\r [0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6]\t\t\t\t)\r __UpperCamelCase =\tnp.abs(image_slice.flatten() - expected_slice\t\t\t\t).max()\r self.assertLessEqual(__UpperCAmelCase ,\t\t\t\t\t1E-3\t\t\t\t)\r\r\r\r\r\r def \t\t\t\t\t\tUpperCAmelCase ( self\t\t\t\t):\r\r '''simple docstring'''\r\r\r\r\r super().test_cpu_offload_forward_pass(expected_max_diff=5E-4\t\t\t\t)\r\r\r\r\r\r def \t\t\t\t\t\tUpperCAmelCase ( self\t\t\t\t):\r\r '''simple docstring'''\r\r\r\r\r self._test_inference_batch_consistent(batch_sizes=[1, 2]\t\t\t\t)\r\r\r\r\r\r def \t\t\t\t\t\tUpperCAmelCase ( self\t\t\t\t):\r\r '''simple docstring'''\r\r\r\r\r self._test_inference_batch_single_identical(batch_size=2 ,\t\t\t\t\texpected_max_diff=7E-4\t\t\t\t)\r\r\r\r\r\r def \t\t\t\t\t\tUpperCAmelCase ( self\t\t\t\t):\r\r '''simple docstring'''\r\r\r\r\r super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3\t\t\t\t)\r\r\r\r\r\r def \t\t\t\t\t\tUpperCAmelCase ( self\t\t\t\t):\r\r '''simple docstring'''\r\r\r\r\r super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4\t\t\t\t)\r\r\r\r\r\r def \t\t\t\t\t\tUpperCAmelCase ( self\t\t\t\t):\r\r '''simple docstring'''\r\r\r\r\r super().test_save_load_local(expected_max_difference=5E-4\t\t\t\t)\r\r\r\r\r\r\r\r def \t\t\t\t\t\tUpperCAmelCase ( self\t\t\t\t):\r\r '''simple docstring'''\r\r\r\r\r super().test_save_load_optional_components(expected_max_difference=4E-4\t\t\t\t)\r\r\r@require_torch_gpu\r@slow\rclass __lowerCAmelCase\t\t(\t\t\t\tunittest.TestCase ):\r\r\r\r\r\r @classmethod\r def \t\t\t\t\t\tUpperCAmelCase ( cls\t\t\t\t):\r\r '''simple docstring'''\r\r\r\r\r super().setUpClass()\r torch.use_deterministic_algorithms(__UpperCAmelCase\t\t\t\t)\r\r\r\r\r\r @classmethod\r def \t\t\t\t\t\tUpperCAmelCase ( cls\t\t\t\t):\r\r '''simple docstring'''\r\r\r\r\r super().tearDownClass()\r torch.use_deterministic_algorithms(__UpperCAmelCase\t\t\t\t)\r\r\r\r\r\r def \t\t\t\t\t\tUpperCAmelCase ( self\t\t\t\t):\r\r '''simple docstring'''\r\r\r\r\r super().tearDown()\r gc.collect()\r torch.cuda.empty_cache()\r\r\r\r\r\r\r\r def \t\t\t\t\t\tUpperCAmelCase ( self\t\t\t\t):\r\r '''simple docstring'''\r\r\r\r\r __UpperCamelCase =\ttorch.manual_seed(51\t\t\t\t)\r\r __UpperCamelCase =\tStableDiffusionAttendAndExcitePipeline.from_pretrained(\r 'CompVis/stable-diffusion-v1-4' ,\t\t\t\t\tsafety_checker=__UpperCAmelCase ,\t\t\t\t\ttorch_dtype=torch.floataa\t\t\t\t)\r pipe.to('cuda'\t\t\t\t)\r\r __UpperCamelCase =\t'a painting of an elephant with glasses'\r __UpperCamelCase =\t[5, 7]\r\r __UpperCamelCase =\tpipe(\r prompt=__UpperCAmelCase ,\t\t\t\t\ttoken_indices=__UpperCAmelCase ,\t\t\t\t\tguidance_scale=7.5 ,\t\t\t\t\tgenerator=__UpperCAmelCase ,\t\t\t\t\tnum_inference_steps=5 ,\t\t\t\t\tmax_iter_to_alter=5 ,\t\t\t\t\toutput_type='numpy' ,\t\t\t\t\t).images[0]\r\r __UpperCamelCase =\tload_numpy(\r 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy'\t\t\t\t)\r assert np.abs((expected_image - image).max()\t\t\t\t) < 5E-1\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":263,"string":"263"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":724,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\nimport argparse\r\nimport re\r\nfrom pathlib import Path\r\n\r\nimport requests\r\nimport torch\r\nfrom PIL import Image\r\nfrom torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor\r\n\r\nfrom transformers import (\r\n EfficientFormerConfig,\r\n EfficientFormerForImageClassificationWithTeacher,\r\n EfficientFormerImageProcessor,\r\n)\r\nfrom transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t\tlowerCAmelCase_ (\t\t\t\tA_ ,A_):\r\n UpperCamelCase__: List[str] \t\t\t\t\t\t\t=\t\t\told_name\r\n\r\n if \"patch_embed\" in old_name:\r\n UpperCamelCase__\t\t\t\t\t\t\t, UpperCamelCase__\t\t\t\t\t\t\t, UpperCamelCase__: Dict \t\t\t\t\t\t\t=\t\t\told_name.split(\".\")\r\n\r\n if layer == \"0\":\r\n UpperCamelCase__: Any \t\t\t\t\t\t\t=\t\t\told_name.replace(\"0\" ,\"convolution1\")\r\n elif layer == \"1\":\r\n UpperCamelCase__: str \t\t\t\t\t\t\t=\t\t\told_name.replace(\"1\" ,\"batchnorm_before\")\r\n elif layer == \"3\":\r\n UpperCamelCase__: Tuple \t\t\t\t\t\t\t=\t\t\told_name.replace(\"3\" ,\"convolution2\")\r\n else:\r\n UpperCamelCase__: Optional[Any] \t\t\t\t\t\t\t=\t\t\told_name.replace(\"4\" ,\"batchnorm_after\")\r\n\r\n if \"network\" in old_name and re.search(R\"\\d\\.\\d\" ,A_):\r\n UpperCamelCase__: List[Any] \t\t\t\t\t\t\t=\t\t\tR\"\\b\\d{2}\\b\"\r\n if bool(re.search(A_ ,A_)):\r\n UpperCamelCase__: int \t\t\t\t\t\t\t=\t\t\tre.search(R\"\\d\\.\\d\\d.\" ,A_).group()\r\n else:\r\n UpperCamelCase__: Optional[Any] \t\t\t\t\t\t\t=\t\t\tre.search(R\"\\d\\.\\d.\" ,A_).group()\r\n if int(match[0]) < 6:\r\n UpperCamelCase__: Union[str, Any] \t\t\t\t\t\t\t=\t\t\told_name.replace(A_ ,\"\")\r\n UpperCamelCase__: Any \t\t\t\t\t\t\t=\t\t\ttrimmed_name.replace(\"network\" ,match[0] + \".meta4D_layers.blocks.\" + match[2:-1])\r\n UpperCamelCase__: Tuple \t\t\t\t\t\t\t=\t\t\t\"intermediate_stages.\" + trimmed_name\r\n else:\r\n UpperCamelCase__: Dict \t\t\t\t\t\t\t=\t\t\told_name.replace(A_ ,\"\")\r\n if int(match[2]) < num_meta4D_last_stage:\r\n UpperCamelCase__: Optional[int] \t\t\t\t\t\t\t=\t\t\ttrimmed_name.replace(\"network\" ,\"meta4D_layers.blocks.\" + match[2])\r\n else:\r\n UpperCamelCase__: Dict \t\t\t\t\t\t\t=\t\t\tstr(int(match[2]) - num_meta4D_last_stage)\r\n UpperCamelCase__: List[Any] \t\t\t\t\t\t\t=\t\t\ttrimmed_name.replace(\"network\" ,\"meta3D_layers.blocks.\" + layer_index)\r\n if \"norm1\" in old_name:\r\n UpperCamelCase__: int \t\t\t\t\t\t\t=\t\t\ttrimmed_name.replace(\"norm1\" ,\"layernorm1\")\r\n elif \"norm2\" in old_name:\r\n UpperCamelCase__: List[Any] \t\t\t\t\t\t\t=\t\t\ttrimmed_name.replace(\"norm2\" ,\"layernorm2\")\r\n elif \"fc1\" in old_name:\r\n UpperCamelCase__: Optional[Any] \t\t\t\t\t\t\t=\t\t\ttrimmed_name.replace(\"fc1\" ,\"linear_in\")\r\n elif \"fc2\" in old_name:\r\n UpperCamelCase__: List[str] \t\t\t\t\t\t\t=\t\t\ttrimmed_name.replace(\"fc2\" ,\"linear_out\")\r\n\r\n UpperCamelCase__: Union[str, Any] \t\t\t\t\t\t\t=\t\t\t\"last_stage.\" + trimmed_name\r\n\r\n elif \"network\" in old_name and re.search(R\".\\d.\" ,A_):\r\n UpperCamelCase__: List[str] \t\t\t\t\t\t\t=\t\t\told_name.replace(\"network\" ,\"intermediate_stages\")\r\n\r\n if \"fc\" in new_name:\r\n UpperCamelCase__: List[str] \t\t\t\t\t\t\t=\t\t\tnew_name.replace(\"fc\" ,\"convolution\")\r\n elif (\"norm1\" in new_name) and (\"layernorm1\" not in new_name):\r\n UpperCamelCase__: List[Any] \t\t\t\t\t\t\t=\t\t\tnew_name.replace(\"norm1\" ,\"batchnorm_before\")\r\n elif (\"norm2\" in new_name) and (\"layernorm2\" not in new_name):\r\n UpperCamelCase__: int \t\t\t\t\t\t\t=\t\t\tnew_name.replace(\"norm2\" ,\"batchnorm_after\")\r\n if \"proj\" in new_name:\r\n UpperCamelCase__: Union[str, Any] \t\t\t\t\t\t\t=\t\t\tnew_name.replace(\"proj\" ,\"projection\")\r\n if \"dist_head\" in new_name:\r\n UpperCamelCase__: Any \t\t\t\t\t\t\t=\t\t\tnew_name.replace(\"dist_head\" ,\"distillation_classifier\")\r\n elif \"head\" in new_name:\r\n UpperCamelCase__: Dict \t\t\t\t\t\t\t=\t\t\tnew_name.replace(\"head\" ,\"classifier\")\r\n elif \"patch_embed\" in new_name:\r\n UpperCamelCase__: List[str] \t\t\t\t\t\t\t=\t\t\t\"efficientformer.\" + new_name\r\n elif new_name == \"norm.weight\" or new_name == \"norm.bias\":\r\n UpperCamelCase__: List[Any] \t\t\t\t\t\t\t=\t\t\tnew_name.replace(\"norm\" ,\"layernorm\")\r\n UpperCamelCase__: Optional[int] \t\t\t\t\t\t\t=\t\t\t\"efficientformer.\" + new_name\r\n else:\r\n UpperCamelCase__: str \t\t\t\t\t\t\t=\t\t\t\"efficientformer.encoder.\" + new_name\r\n\r\n return new_name\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t\tlowerCAmelCase_ (\t\t\t\tA_ ,A_):\r\n for key in checkpoint.copy().keys():\r\n UpperCamelCase__: Optional[int] \t\t\t\t\t\t\t=\t\t\tcheckpoint.pop(A_)\r\n UpperCamelCase__: Dict \t\t\t\t\t\t\t=\t\t\tval\r\n\r\n return checkpoint\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t\tlowerCAmelCase_ (\t\t\t\t):\r\n UpperCamelCase__: Dict \t\t\t\t\t\t\t=\t\t\t\"http://images.cocodataset.org/val2017/000000039769.jpg\"\r\n UpperCamelCase__: List[str] \t\t\t\t\t\t\t=\t\t\tImage.open(requests.get(A_ ,stream=A_).raw)\r\n\r\n return image\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t\tlowerCAmelCase_ (\t\t\t\tA_ ,A_ ,A_ ,A_):\r\n UpperCamelCase__: List[Any] \t\t\t\t\t\t\t=\t\t\ttorch.load(A_ ,map_location=\"cpu\")[\"model\"]\r\n UpperCamelCase__: List[Any] \t\t\t\t\t\t\t=\t\t\tEfficientFormerConfig.from_json_file(A_)\r\n UpperCamelCase__: Dict \t\t\t\t\t\t\t=\t\t\tEfficientFormerForImageClassificationWithTeacher(A_)\r\n UpperCamelCase__: int \t\t\t\t\t\t\t=\t\t\t\"_\".join(checkpoint_path.split(\"/\")[-1].split(\".\")[0].split(\"_\")[:-1])\r\n\r\n UpperCamelCase__: List[Any] \t\t\t\t\t\t\t=\t\t\tconfig.depths[-1] - config.num_metaad_blocks + 1\r\n UpperCamelCase__: Optional[int] \t\t\t\t\t\t\t=\t\t\tconvert_torch_checkpoint(A_ ,A_)\r\n\r\n model.load_state_dict(A_)\r\n model.eval()\r\n\r\n UpperCamelCase__: Optional[int] \t\t\t\t\t\t\t=\t\t\t{\r\n \"bilinear\": PILImageResampling.BILINEAR,\r\n \"bicubic\": PILImageResampling.BICUBIC,\r\n \"nearest\": PILImageResampling.NEAREST,\r\n }\r\n\r\n # prepare image\r\n UpperCamelCase__: Union[str, Any] \t\t\t\t\t\t\t=\t\t\tprepare_img()\r\n UpperCamelCase__: Optional[Any] \t\t\t\t\t\t\t=\t\t\t2_56\r\n UpperCamelCase__: Any \t\t\t\t\t\t\t=\t\t\t2_24\r\n UpperCamelCase__: List[str] \t\t\t\t\t\t\t=\t\t\tEfficientFormerImageProcessor(\r\n size={\"shortest_edge\": image_size} ,crop_size={\"height\": crop_size, \"width\": crop_size} ,resample=pillow_resamplings[\"bicubic\"] ,)\r\n UpperCamelCase__: Optional[Any] \t\t\t\t\t\t\t=\t\t\tprocessor(images=A_ ,return_tensors=\"pt\").pixel_values\r\n\r\n # original processing pipeline\r\n UpperCamelCase__: Tuple \t\t\t\t\t\t\t=\t\t\tCompose(\r\n [\r\n Resize(A_ ,interpolation=pillow_resamplings[\"bicubic\"]),\r\n CenterCrop(A_),\r\n ToTensor(),\r\n Normalize(A_ ,A_),\r\n ])\r\n UpperCamelCase__: Tuple \t\t\t\t\t\t\t=\t\t\timage_transforms(A_).unsqueeze(0)\r\n\r\n assert torch.allclose(A_ ,A_)\r\n\r\n UpperCamelCase__: List[str] \t\t\t\t\t\t\t=\t\t\tmodel(A_)\r\n UpperCamelCase__: Dict \t\t\t\t\t\t\t=\t\t\toutputs.logits\r\n\r\n UpperCamelCase__: int \t\t\t\t\t\t\t=\t\t\t(1, 10_00)\r\n\r\n if \"l1\" in model_name:\r\n UpperCamelCase__: Optional[Any] \t\t\t\t\t\t\t=\t\t\ttorch.Tensor(\r\n [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328])\r\n assert torch.allclose(logits[0, :10] ,A_ ,atol=1e-3)\r\n assert logits.shape == expected_shape\r\n elif \"l3\" in model_name:\r\n UpperCamelCase__: List[str] \t\t\t\t\t\t\t=\t\t\ttorch.Tensor(\r\n [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127])\r\n assert torch.allclose(logits[0, :10] ,A_ ,atol=1e-3)\r\n assert logits.shape == expected_shape\r\n elif \"l7\" in model_name:\r\n UpperCamelCase__: str \t\t\t\t\t\t\t=\t\t\ttorch.Tensor(\r\n [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878])\r\n assert logits.shape == expected_shape\r\n else:\r\n raise ValueError(\r\n F\"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7\")\r\n\r\n # Save Checkpoints\r\n Path(A_).mkdir(exist_ok=A_)\r\n model.save_pretrained(A_)\r\n print(F\"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}\")\r\n processor.save_pretrained(A_)\r\n print(F\"Processor successfuly saved at {pytorch_dump_path}\")\r\n\r\n if push_to_hub:\r\n print(\"Pushing model to the hub...\")\r\n\r\n model.push_to_hub(\r\n repo_id=F\"Bearnardd/{pytorch_dump_path}\" ,commit_message=\"Add model\" ,use_temp_dir=A_ ,)\r\n processor.push_to_hub(\r\n repo_id=F\"Bearnardd/{pytorch_dump_path}\" ,commit_message=\"Add image processor\" ,use_temp_dir=A_ ,)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n A__: Union[str, Any] = argparse.ArgumentParser()\r\n # Required parameters\r\n parser.add_argument(\r\n '''--pytorch_model_path''',\r\n default=None,\r\n type=str,\r\n required=True,\r\n help='''Path to EfficientFormer pytorch checkpoint.''',\r\n )\r\n parser.add_argument(\r\n '''--config_file''',\r\n default=None,\r\n type=str,\r\n required=True,\r\n help='''The json file for EfficientFormer model config.''',\r\n )\r\n parser.add_argument(\r\n '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''\r\n )\r\n\r\n parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')\r\n parser.add_argument(\r\n '''--no-push_to_hub''',\r\n dest='''push_to_hub''',\r\n action='''store_false''',\r\n help='''Do not push model and image processor to the hub''',\r\n )\r\n parser.set_defaults(push_to_hub=True)\r\n\r\n A__: Optional[Any] = parser.parse_args()\r\n convert_efficientformer_checkpoint(\r\n checkpoint_path=args.pytorch_model_path,\r\n efficientformer_config_file=args.config_file,\r\n pytorch_dump_path=args.pytorch_dump_path,\r\n push_to_hub=args.push_to_hub,\r\n )\r\n\r\n"},"code_codestyle":{"kind":"number","value":149,"string":"149"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\nimport argparse\r\nimport os\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport torch\r\n\r\nfrom transformers import BertModel\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t\tlowerCAmelCase_ (\t\t\t\tA_ ,A_ ,A_):\r\n UpperCamelCase__: Dict \t\t\t\t\t\t\t=\t\t\t(\"dense.weight\", \"attention.self.query\", \"attention.self.key\", \"attention.self.value\")\r\n\r\n UpperCamelCase__: Dict \t\t\t\t\t\t\t=\t\t\t(\r\n (\"layer.\", \"layer_\"),\r\n (\"word_embeddings.weight\", \"word_embeddings\"),\r\n (\"position_embeddings.weight\", \"position_embeddings\"),\r\n (\"token_type_embeddings.weight\", \"token_type_embeddings\"),\r\n (\".\", \"/\"),\r\n (\"LayerNorm/weight\", \"LayerNorm/gamma\"),\r\n (\"LayerNorm/bias\", \"LayerNorm/beta\"),\r\n (\"weight\", \"kernel\"),\r\n )\r\n\r\n if not os.path.isdir(A_):\r\n os.makedirs(A_)\r\n\r\n UpperCamelCase__: Optional[Any] \t\t\t\t\t\t\t=\t\t\tmodel.state_dict()\r\n\r\n def to_tf_var_name(A_):\r\n for patt, repl in iter(A_):\r\n UpperCamelCase__: Optional[Any] \t\t\t\t\t\t\t=\t\t\tname.replace(A_ ,A_)\r\n return F\"bert/{name}\"\r\n\r\n def create_tf_var(A_ ,A_ ,A_):\r\n UpperCamelCase__: Any \t\t\t\t\t\t\t=\t\t\ttf.dtypes.as_dtype(tensor.dtype)\r\n UpperCamelCase__: int \t\t\t\t\t\t\t=\t\t\ttf.get_variable(dtype=A_ ,shape=tensor.shape ,name=A_ ,initializer=tf.zeros_initializer())\r\n session.run(tf.variables_initializer([tf_var]))\r\n session.run(A_)\r\n return tf_var\r\n\r\n tf.reset_default_graph()\r\n with tf.Session() as session:\r\n for var_name in state_dict:\r\n UpperCamelCase__: List[Any] \t\t\t\t\t\t\t=\t\t\tto_tf_var_name(A_)\r\n UpperCamelCase__: List[str] \t\t\t\t\t\t\t=\t\t\tstate_dict[var_name].numpy()\r\n if any(x in var_name for x in tensors_to_transpose):\r\n UpperCamelCase__: List[Any] \t\t\t\t\t\t\t=\t\t\ttorch_tensor.T\r\n UpperCamelCase__: int \t\t\t\t\t\t\t=\t\t\tcreate_tf_var(tensor=A_ ,name=A_ ,session=A_)\r\n tf.keras.backend.set_value(A_ ,A_)\r\n UpperCamelCase__: Optional[Any] \t\t\t\t\t\t\t=\t\t\tsession.run(A_)\r\n print(F\"Successfully created {tf_name}: {np.allclose(A_ ,A_)}\")\r\n\r\n UpperCamelCase__: Tuple \t\t\t\t\t\t\t=\t\t\ttf.train.Saver(tf.trainable_variables())\r\n saver.save(A_ ,os.path.join(A_ ,model_name.replace(\"-\" ,\"_\") + \".ckpt\"))\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t\tlowerCAmelCase_ (\t\t\t\tA_=None):\r\n UpperCamelCase__: Tuple \t\t\t\t\t\t\t=\t\t\targparse.ArgumentParser()\r\n parser.add_argument(\"--model_name\" ,type=A_ ,required=A_ ,help=\"model name e.g. bert-base-uncased\")\r\n parser.add_argument(\r\n \"--cache_dir\" ,type=A_ ,default=A_ ,required=A_ ,help=\"Directory containing pytorch model\")\r\n parser.add_argument(\"--pytorch_model_path\" ,type=A_ ,required=A_ ,help=\"/path/to/.bin\")\r\n parser.add_argument(\"--tf_cache_dir\" ,type=A_ ,required=A_ ,help=\"Directory in which to save tensorflow model\")\r\n UpperCamelCase__: Any \t\t\t\t\t\t\t=\t\t\tparser.parse_args(A_)\r\n\r\n UpperCamelCase__: List[Any] \t\t\t\t\t\t\t=\t\t\tBertModel.from_pretrained(\r\n pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path) ,cache_dir=args.cache_dir ,)\r\n\r\n convert_pytorch_checkpoint_to_tf(model=A_ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":149,"string":"149"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":725,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\nfrom bisect import bisect\r\nfrom itertools import accumulate\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\tSCREAMING_SNAKE_CASE__ (\t\t\t\tsnake_case_,\t\t\t\t\tsnake_case_,\t\t\t\t\tsnake_case_,\t\t\t\t\tsnake_case_\t\t\t\t\t)\t\t\t\t->\t\t\t\t\tstr:\r\n\r\n\r\n\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\ta\t\t\t\t= sorted(zip(__lowerCamelCase,\t\t\t\t\t__lowerCamelCase\t\t\t\t\t),\t\t\t\t\tkey=lambda snake_case_\t\t\t\t\t: x[0] / x[1],\t\t\t\t\treverse=__lowerCamelCase\t\t\t\t\t)\r\n\ta\t\t,\ta\t\t\t\t= [i[0] for i in r], [i[1] for i in r]\r\n\ta\t\t\t\t= list(accumulate(__lowerCamelCase\t\t\t\t\t)\t\t\t\t\t)\r\n\ta\t\t\t\t= bisect(__lowerCamelCase,\t\t\t\t\t__lowerCamelCase\t\t\t\t\t)\r\n\treturn (\r\n\t 0\r\n\t if k == 0\r\n\t else sum(vl[:k]\t\t\t\t\t) + (w - acc[k - 1]) * (vl[k]) / (wt[k])\r\n\t if k != n\r\n\t else sum(vl[:k]\t\t\t\t\t)\r\n\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":368,"string":"368"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\nfrom collections.abc import Sequence\r\nfrom typing import Literal\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\tSCREAMING_SNAKE_CASE__ (\t\t\t\tsnake_case_,\t\t\t\t\tsnake_case_\t\t\t\t\t)\t\t\t\t->\t\t\t\t\tstr | Literal[False]:\r\n\r\n\r\n\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\ta\t\t\t\t= list(snake_case_\t\t\t\t\t)\r\n\ta\t\t\t\t= list(snake_case_\t\t\t\t\t)\r\n\ta\t\t\t\t= 0\r\n\tfor i in range(len(snake_case_\t\t\t\t\t)\t\t\t\t\t):\r\n\t\tif lista[i] != lista[i]:\r\n\t\t\tcount += 1\r\n\t\t\ta\t\t\t\t= '''_'''\r\n\tif count > 1:\r\n\t\treturn False\r\n\telse:\r\n\t\treturn \"\".join(snake_case_\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\tSCREAMING_SNAKE_CASE__ (\t\t\t\tsnake_case_\t\t\t\t\t)\t\t\t\t->\t\t\t\t\tlist[str]:\r\n\r\n\r\n\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\ta\t\t\t\t= []\r\n\twhile True:\r\n\t\ta\t\t\t\t= ['''$'''] * len(snake_case_\t\t\t\t\t)\r\n\t\ta\t\t\t\t= []\r\n\t\tfor i in range(len(snake_case_\t\t\t\t\t)\t\t\t\t\t):\r\n\t\t\tfor j in range(i + 1,\t\t\t\t\tlen(snake_case_\t\t\t\t\t)\t\t\t\t\t):\r\n\t\t\t\ta\t\t\t\t= compare_string(binary[i],\t\t\t\t\tbinary[j]\t\t\t\t\t)\r\n\t\t\t\tif k is False:\r\n\t\t\t\t\ta\t\t\t\t= '''*'''\r\n\t\t\t\t\ta\t\t\t\t= '''*'''\r\n\t\t\t\t\ttemp.append('''X'''\t\t\t\t\t)\r\n\t\tfor i in range(len(snake_case_\t\t\t\t\t)\t\t\t\t\t):\r\n\t\t\tif checka[i] == \"$\":\r\n\t\t\t\tpi.append(binary[i]\t\t\t\t\t)\r\n\t\tif len(snake_case_\t\t\t\t\t) == 0:\r\n\t\t\treturn pi\r\n\t\ta\t\t\t\t= list(set(snake_case_\t\t\t\t\t)\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\tSCREAMING_SNAKE_CASE__ (\t\t\t\tsnake_case_,\t\t\t\t\tsnake_case_\t\t\t\t\t)\t\t\t\t->\t\t\t\t\tlist[str]:\r\n\r\n\r\n\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\ta\t\t\t\t= []\r\n\tfor minterm in minterms:\r\n\t\ta\t\t\t\t= ''''''\r\n\t\tfor _ in range(snake_case_\t\t\t\t\t):\r\n\t\t\ta\t\t\t\t= str(minterm % 2\t\t\t\t\t) + string\r\n\t\t\tminterm //= 2\r\n\t\ttemp.append(snake_case_\t\t\t\t\t)\r\n\treturn temp\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\tSCREAMING_SNAKE_CASE__ (\t\t\t\tsnake_case_,\t\t\t\t\tsnake_case_,\t\t\t\t\tsnake_case_\t\t\t\t\t)\t\t\t\t->\t\t\t\t\tbool:\r\n\r\n\r\n\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\ta\t\t\t\t= list(snake_case_\t\t\t\t\t)\r\n\ta\t\t\t\t= list(snake_case_\t\t\t\t\t)\r\n\ta\t\t\t\t= 0\r\n\tfor i in range(len(snake_case_\t\t\t\t\t)\t\t\t\t\t):\r\n\t\tif lista[i] != lista[i]:\r\n\t\t\tcount_n += 1\r\n\treturn count_n == count\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\tSCREAMING_SNAKE_CASE__ (\t\t\t\tsnake_case_,\t\t\t\t\tsnake_case_\t\t\t\t\t)\t\t\t\t->\t\t\t\t\tlist[str]:\r\n\r\n\r\n\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\ta\t\t\t\t= []\r\n\ta\t\t\t\t= [0] * len(snake_case_\t\t\t\t\t)\r\n\tfor i in range(len(chart[0]\t\t\t\t\t)\t\t\t\t\t):\r\n\t\ta\t\t\t\t= 0\r\n\t\ta\t\t\t\t= -1\r\n\t\tfor j in range(len(snake_case_\t\t\t\t\t)\t\t\t\t\t):\r\n\t\t\tif chart[j][i] == 1:\r\n\t\t\t\tcount += 1\r\n\t\t\t\ta\t\t\t\t= j\r\n\t\tif count == 1:\r\n\t\t\ta\t\t\t\t= 1\r\n\tfor i in range(len(snake_case_\t\t\t\t\t)\t\t\t\t\t):\r\n\t\tif select[i] == 1:\r\n\t\t\tfor j in range(len(chart[0]\t\t\t\t\t)\t\t\t\t\t):\r\n\t\t\t\tif chart[i][j] == 1:\r\n\t\t\t\t\tfor k in range(len(snake_case_\t\t\t\t\t)\t\t\t\t\t):\r\n\t\t\t\t\t\ta\t\t\t\t= 0\r\n\t\t\ttemp.append(prime_implicants[i]\t\t\t\t\t)\r\n\twhile True:\r\n\t\ta\t\t\t\t= 0\r\n\t\ta\t\t\t\t= -1\r\n\t\ta\t\t\t\t= 0\r\n\t\tfor i in range(len(snake_case_\t\t\t\t\t)\t\t\t\t\t):\r\n\t\t\ta\t\t\t\t= chart[i].count(1\t\t\t\t\t)\r\n\t\t\tif count_n > max_n:\r\n\t\t\t\ta\t\t\t\t= count_n\r\n\t\t\t\ta\t\t\t\t= i\r\n\r\n\t\tif max_n == 0:\r\n\t\t\treturn temp\r\n\r\n\t\ttemp.append(prime_implicants[rem]\t\t\t\t\t)\r\n\r\n\t\tfor i in range(len(chart[0]\t\t\t\t\t)\t\t\t\t\t):\r\n\t\t\tif chart[rem][i] == 1:\r\n\t\t\t\tfor j in range(len(snake_case_\t\t\t\t\t)\t\t\t\t\t):\r\n\t\t\t\t\ta\t\t\t\t= 0\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\tSCREAMING_SNAKE_CASE__ (\t\t\t\tsnake_case_,\t\t\t\t\tsnake_case_\t\t\t\t\t)\t\t\t\t->\t\t\t\t\tlist[list[int]]:\r\n\r\n\r\n\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\ta\t\t\t\t= [[0 for x in range(len(snake_case_\t\t\t\t\t)\t\t\t\t\t)] for x in range(len(snake_case_\t\t\t\t\t)\t\t\t\t\t)]\r\n\tfor i in range(len(snake_case_\t\t\t\t\t)\t\t\t\t\t):\r\n\t\ta\t\t\t\t= prime_implicants[i].count('''_'''\t\t\t\t\t)\r\n\t\tfor j in range(len(snake_case_\t\t\t\t\t)\t\t\t\t\t):\r\n\t\t\tif is_for_table(prime_implicants[i],\t\t\t\t\tbinary[j],\t\t\t\t\tsnake_case_\t\t\t\t\t):\r\n\t\t\t\ta\t\t\t\t= 1\r\n\r\n\treturn chart\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\tSCREAMING_SNAKE_CASE__ (\t\t\t\t)\t\t\t\t->\t\t\t\t\tNone:\r\n\r\n\r\n\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\ta\t\t\t\t= int(input('''Enter the no. of variables\\n'''\t\t\t\t\t)\t\t\t\t\t)\r\n\ta\t\t\t\t= [\r\n\t float(snake_case_\t\t\t\t\t)\r\n\t for x in input(\r\n\t '''Enter the decimal representation of Minterms \\'Spaces Separated\\'\\n'''\t\t\t\t\t).split()\r\n\t]\r\n\ta\t\t\t\t= decimal_to_binary(snake_case_,\t\t\t\t\tsnake_case_\t\t\t\t\t)\r\n\r\n\ta\t\t\t\t= check(snake_case_\t\t\t\t\t)\r\n\tprint('''Prime Implicants are:'''\t\t\t\t\t)\r\n\tprint(snake_case_\t\t\t\t\t)\r\n\ta\t\t\t\t= prime_implicant_chart(snake_case_,\t\t\t\t\tsnake_case_\t\t\t\t\t)\r\n\r\n\ta\t\t\t\t= selection(snake_case_,\t\t\t\t\tsnake_case_\t\t\t\t\t)\r\n\tprint('''Essential Prime Implicants are:'''\t\t\t\t\t)\r\n\tprint(snake_case_\t\t\t\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\t\t\t\tmain()\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":330,"string":"330"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":726,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r'''simple docstring'''\r\r\r\r\rdef a_\t\t\t\t\t\t\t( __snake_case\t\t\t\t\t\t\t:\t\t\tlist[int] )\t\t\t\t\t\t\t->\t\t\t\t\t\tfloat:\r\r\r\r \"\"\"simple docstring\"\"\"\r if not nums: # Makes sure that the list is not empty\r raise ValueError('''List is empty''' )\r\r lowerCamelCase_\t\t\t\t\t\t\t=sum(__snake_case ) / len(__snake_case ) # Calculate the average\r return sum(abs(x - average ) for x in nums ) / len(__snake_case )\r\r\rif __name__ == \"__main__\":\r import doctest\r\r doctest.testmod()\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":75,"string":"75"},"style_context":{"kind":"string","value":"\r\r\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\rimport argparse\rimport json\rfrom pathlib import Path\r\rimport requests\rimport timm\rimport torch\rfrom huggingface_hub import hf_hub_download\rfrom PIL import Image\r\rfrom transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel\rfrom transformers.utils import logging\r\r\rlogging.set_verbosity_info()\rsnake_case__\t\t\t\t\t\t: Any\t\t\t = logging.get_logger(__name__)\r\r\rdef _snake_case ( _snake_case :\tList[Any] , _snake_case :\tTuple=False\t):\r lowerCAmelCase :\t\tList[str]\t\t\t\t= []\r for i in range(config.num_hidden_layers\t):\r # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms\r rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''')\t)\r rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''')\t)\r rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''')\t)\r rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''')\t)\r rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''')\t)\r rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''')\t)\r rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''')\t)\r rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''')\t)\r rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''')\t)\r rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''')\t)\r\r # projection layer + position embeddings\r rename_keys.extend(\r [\r ('''cls_token''', '''vit.embeddings.cls_token'''),\r ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),\r ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),\r ('''pos_embed''', '''vit.embeddings.position_embeddings'''),\r ]\t)\r\r if base_model:\r # layernorm + pooler\r rename_keys.extend(\r [\r ('''norm.weight''', '''layernorm.weight'''),\r ('''norm.bias''', '''layernorm.bias'''),\r ('''pre_logits.fc.weight''', '''pooler.dense.weight'''),\r ('''pre_logits.fc.bias''', '''pooler.dense.bias'''),\r ]\t)\r\r # if just the base model, we should remove \"vit\" from all keys that start with \"vit\"\r lowerCAmelCase :\t\tUnion[str, Any]\t\t\t\t= [(pair[0], pair[1][4:]) if pair[1].startswith('''vit'''\t) else pair for pair in rename_keys]\r else:\r # layernorm + classification head\r rename_keys.extend(\r [\r ('''norm.weight''', '''vit.layernorm.weight'''),\r ('''norm.bias''', '''vit.layernorm.bias'''),\r ('''head.weight''', '''classifier.weight'''),\r ('''head.bias''', '''classifier.bias'''),\r ]\t)\r\r return rename_keys\r\r\rdef _snake_case ( _snake_case :\tTuple , _snake_case :\tList[Any] , _snake_case :\tTuple=False\t):\r for i in range(config.num_hidden_layers\t):\r if base_model:\r lowerCAmelCase :\t\tOptional[int]\t\t\t\t= ''''''\r else:\r lowerCAmelCase :\t\tUnion[str, Any]\t\t\t\t= '''vit.'''\r # read in weights + bias of input projection layer (in timm, this is a single matrix + bias)\r lowerCAmelCase :\t\tList[Any]\t\t\t\t= state_dict.pop(f'''blocks.{i}.attn.qkv.weight'''\t)\r lowerCAmelCase :\t\tTuple\t\t\t\t= state_dict.pop(f'''blocks.{i}.attn.qkv.bias'''\t)\r # next, add query, keys and values (in that order) to the state dict\r lowerCAmelCase :\t\tOptional[Any]\t\t\t\t= in_proj_weight[\r : config.hidden_size, :\r ]\r lowerCAmelCase :\t\tTuple\t\t\t\t= in_proj_bias[: config.hidden_size]\r lowerCAmelCase :\t\tTuple\t\t\t\t= in_proj_weight[\r config.hidden_size : config.hidden_size * 2, :\r ]\r lowerCAmelCase :\t\tTuple\t\t\t\t= in_proj_bias[\r config.hidden_size : config.hidden_size * 2\r ]\r lowerCAmelCase :\t\tUnion[str, Any]\t\t\t\t= in_proj_weight[\r -config.hidden_size :, :\r ]\r lowerCAmelCase :\t\tList[Any]\t\t\t\t= in_proj_bias[-config.hidden_size :]\r\r\rdef _snake_case ( _snake_case :\tTuple\t):\r lowerCAmelCase :\t\tList[Any]\t\t\t\t= ['''head.weight''', '''head.bias''']\r for k in ignore_keys:\r state_dict.pop(_snake_case , _snake_case\t)\r\r\rdef _snake_case ( _snake_case :\tUnion[str, Any] , _snake_case :\tAny , _snake_case :\tList[Any]\t):\r lowerCAmelCase :\t\tOptional[int]\t\t\t\t= dct.pop(_snake_case\t)\r lowerCAmelCase :\t\tUnion[str, Any]\t\t\t\t= val\r\r\rdef _snake_case ( ):\r lowerCAmelCase :\t\tAny\t\t\t\t= '''http://images.cocodataset.org/val2017/000000039769.jpg'''\r lowerCAmelCase :\t\tAny\t\t\t\t= Image.open(requests.get(_snake_case , stream=_snake_case\t).raw\t)\r return im\r\r\r@torch.no_grad()\rdef _snake_case ( _snake_case :\tOptional[int] , _snake_case :\tOptional[Any]\t):\r lowerCAmelCase :\t\tAny\t\t\t\t= ViTConfig()\r lowerCAmelCase :\t\tAny\t\t\t\t= False\r # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size\r if vit_name[-5:] == \"in21k\":\r lowerCAmelCase :\t\tList[str]\t\t\t\t= True\r lowerCAmelCase :\t\tint\t\t\t\t= int(vit_name[-12:-10]\t)\r lowerCAmelCase :\t\tList[Any]\t\t\t\t= int(vit_name[-9:-6]\t)\r else:\r lowerCAmelCase :\t\tstr\t\t\t\t= 1000\r lowerCAmelCase :\t\tOptional[int]\t\t\t\t= '''huggingface/label-files'''\r lowerCAmelCase :\t\tAny\t\t\t\t= '''imagenet-1k-id2label.json'''\r lowerCAmelCase :\t\tOptional[Any]\t\t\t\t= json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset'''\t) , '''r'''\t)\t)\r lowerCAmelCase :\t\tOptional[Any]\t\t\t\t= {int(_snake_case\t): v for k, v in idalabel.items()}\r lowerCAmelCase :\t\tDict\t\t\t\t= idalabel\r lowerCAmelCase :\t\tList[Any]\t\t\t\t= {v: k for k, v in idalabel.items()}\r lowerCAmelCase :\t\tList[str]\t\t\t\t= int(vit_name[-6:-4]\t)\r lowerCAmelCase :\t\tint\t\t\t\t= int(vit_name[-3:]\t)\r # size of the architecture\r if \"deit\" in vit_name:\r if vit_name[9:].startswith('''tiny'''\t):\r lowerCAmelCase :\t\tstr\t\t\t\t= 192\r lowerCAmelCase :\t\tint\t\t\t\t= 768\r lowerCAmelCase :\t\tList[str]\t\t\t\t= 12\r lowerCAmelCase :\t\tstr\t\t\t\t= 3\r elif vit_name[9:].startswith('''small'''\t):\r lowerCAmelCase :\t\tList[str]\t\t\t\t= 384\r lowerCAmelCase :\t\tOptional[int]\t\t\t\t= 1536\r lowerCAmelCase :\t\tint\t\t\t\t= 12\r lowerCAmelCase :\t\tstr\t\t\t\t= 6\r else:\r pass\r else:\r if vit_name[4:].startswith('''small'''\t):\r lowerCAmelCase :\t\tList[str]\t\t\t\t= 768\r lowerCAmelCase :\t\tDict\t\t\t\t= 2304\r lowerCAmelCase :\t\tDict\t\t\t\t= 8\r lowerCAmelCase :\t\tTuple\t\t\t\t= 8\r elif vit_name[4:].startswith('''base'''\t):\r pass\r elif vit_name[4:].startswith('''large'''\t):\r lowerCAmelCase :\t\tUnion[str, Any]\t\t\t\t= 1024\r lowerCAmelCase :\t\tList[Any]\t\t\t\t= 4096\r lowerCAmelCase :\t\tUnion[str, Any]\t\t\t\t= 24\r lowerCAmelCase :\t\tAny\t\t\t\t= 16\r elif vit_name[4:].startswith('''huge'''\t):\r lowerCAmelCase :\t\tAny\t\t\t\t= 1280\r lowerCAmelCase :\t\tstr\t\t\t\t= 5120\r lowerCAmelCase :\t\tTuple\t\t\t\t= 32\r lowerCAmelCase :\t\tTuple\t\t\t\t= 16\r\r # load original model from timm\r lowerCAmelCase :\t\tAny\t\t\t\t= timm.create_model(_snake_case , pretrained=_snake_case\t)\r timm_model.eval()\r\r # load state_dict of original model, remove and rename some keys\r lowerCAmelCase :\t\tint\t\t\t\t= timm_model.state_dict()\r if base_model:\r remove_classification_head_(_snake_case\t)\r lowerCAmelCase :\t\tOptional[Any]\t\t\t\t= create_rename_keys(_snake_case , _snake_case\t)\r for src, dest in rename_keys:\r rename_key(_snake_case , _snake_case , _snake_case\t)\r read_in_q_k_v(_snake_case , _snake_case , _snake_case\t)\r\r # load HuggingFace model\r if vit_name[-5:] == \"in21k\":\r lowerCAmelCase :\t\tAny\t\t\t\t= ViTModel(_snake_case\t).eval()\r else:\r lowerCAmelCase :\t\tAny\t\t\t\t= ViTForImageClassification(_snake_case\t).eval()\r model.load_state_dict(_snake_case\t)\r\r # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor\r if \"deit\" in vit_name:\r lowerCAmelCase :\t\tDict\t\t\t\t= DeiTImageProcessor(size=config.image_size\t)\r else:\r lowerCAmelCase :\t\tUnion[str, Any]\t\t\t\t= ViTImageProcessor(size=config.image_size\t)\r lowerCAmelCase :\t\tUnion[str, Any]\t\t\t\t= image_processor(images=prepare_img() , return_tensors='''pt'''\t)\r lowerCAmelCase :\t\tDict\t\t\t\t= encoding['''pixel_values''']\r lowerCAmelCase :\t\tList[Any]\t\t\t\t= model(_snake_case\t)\r\r if base_model:\r lowerCAmelCase :\t\tDict\t\t\t\t= timm_model.forward_features(_snake_case\t)\r assert timm_pooled_output.shape == outputs.pooler_output.shape\r assert torch.allclose(_snake_case , outputs.pooler_output , atol=1E-3\t)\r else:\r lowerCAmelCase :\t\tDict\t\t\t\t= timm_model(_snake_case\t)\r assert timm_logits.shape == outputs.logits.shape\r assert torch.allclose(_snake_case , outputs.logits , atol=1E-3\t)\r\r Path(_snake_case\t).mkdir(exist_ok=_snake_case\t)\r print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}'''\t)\r model.save_pretrained(_snake_case\t)\r print(f'''Saving image processor to {pytorch_dump_folder_path}'''\t)\r image_processor.save_pretrained(_snake_case\t)\r\r\rif __name__ == \"__main__\":\r snake_case__\t\t\t\t\t\t: Union[str, Any]\t\t\t = argparse.ArgumentParser()\r # Required parameters\r parser.add_argument(\r '''--vit_name''',\r default='''vit_base_patch16_224''',\r type=str,\r help='''Name of the ViT timm model you\\'d like to convert.''',\r )\r parser.add_argument(\r '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''\r )\r\r snake_case__\t\t\t\t\t\t: int\t\t\t = parser.parse_args()\r convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)\r\r\r"},"style_context_codestyle":{"kind":"number","value":60,"string":"60"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":727,"cells":{"code":{"kind":"string","value":"\n\n\nimport math\n\nimport tensorflow as tf\nfrom packaging import version\n\n\n\n\n\n\n\ndef lowerCamelCase__ ( _lowercase\t\t\t\t):\n\n '''simple docstring'''\n\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tAny = tf.convert_to_tensor(_lowercase\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[Any] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0\t\t\t\t)\t\t\t,\t\t\t\t\t\tx.dtype\t\t\t\t)\t\t\t\t))\n\n return x * cdf\n\n\n\n\n\n\n\ndef lowerCamelCase__ ( _lowercase\t\t\t\t):\n\n '''simple docstring'''\n\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tAny = tf.convert_to_tensor(_lowercase\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tTuple = tf.cast(math.pi\t\t\t,\t\t\t\t\t\tx.dtype\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tstr = tf.cast(0.04_4715\t\t\t,\t\t\t\t\t\tx.dtype\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi\t\t\t\t) * (x + coeff * tf.pow(_lowercase\t\t\t,\t\t\t\t\t\t3\t\t\t\t))\t\t\t\t))\n\n return x * cdf\n\n\n\n\n\n\n\ndef lowerCamelCase__ ( _lowercase\t\t\t\t):\n\n '''simple docstring'''\n\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[str] = tf.convert_to_tensor(_lowercase\t\t\t\t)\n\n return x * tf.tanh(tf.math.softplus(_lowercase\t\t\t\t)\t\t\t\t)\n\n\n\n\n\n\n\ndef lowerCamelCase__ ( _lowercase\t\t\t\t):\n\n '''simple docstring'''\n\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[str] = tf.convert_to_tensor(_lowercase\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tstr = tf.cast(0.04_4715\t\t\t,\t\t\t\t\t\tx.dtype\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[int] = tf.cast(0.79_7884_5608\t\t\t,\t\t\t\t\t\tx.dtype\t\t\t\t)\n\n return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x)\t\t\t\t))\n\n\n\n\n\n\n\ndef lowerCamelCase__ ( _lowercase\t\t\t\t):\n\n '''simple docstring'''\n\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tstr = tf.convert_to_tensor(_lowercase\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[str] = tf.cast(1.702\t\t\t,\t\t\t\t\t\tx.dtype\t\t\t\t)\n return x * tf.math.sigmoid(coeff * x\t\t\t\t)\n\n\n\n\n\n\n\ndef lowerCamelCase__ ( _lowercase\t\t\t\t):\n\n '''simple docstring'''\n\n\n return tf.clip_by_value(_gelu(_lowercase\t\t\t\t)\t\t\t,\t\t\t\t\t\t-10\t\t\t,\t\t\t\t\t\t10\t\t\t\t)\n\n\n\n\n\n\n\ndef lowerCamelCase__ ( _lowercase\t\t\t,\t\t\t\t\t\t_lowercase=-1\t\t\t\t):\n\n '''simple docstring'''\n\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tAny = tf.split(_lowercase\t\t\t,\t\t\t\t\t\t2\t\t\t,\t\t\t\t\t\taxis=_lowercase\t\t\t\t)\n return a * tf.math.sigmoid(_lowercase\t\t\t\t)\n\n\nif version.parse(tf.version.VERSION) >= version.parse('2.4'):\n\n\n\n\n\n\n\n def lowerCamelCase__ ( _lowercase\t\t\t\t):\n\n '''simple docstring'''\n\n\n return tf.keras.activations.gelu(_lowercase\t\t\t,\t\t\t\t\t\tapproximate=_lowercase\t\t\t\t)\n\n __a =\t\t\ttf.keras.activations.gelu\n\n\n\n __a =\t\t\tapproximate_gelu_wrap\nelse:\n __a =\t\t\t_gelu\n __a =\t\t\t_gelu_new\n\n\n__a =\t\t\t{\n 'gelu': gelu,\n 'gelu_10': gelu_aa,\n 'gelu_fast': gelu_fast,\n 'gelu_new': gelu_new,\n 'glu': glu,\n 'mish': mish,\n 'quick_gelu': quick_gelu,\n 'relu': tf.keras.activations.relu,\n 'sigmoid': tf.keras.activations.sigmoid,\n 'silu': tf.keras.activations.swish,\n 'swish': tf.keras.activations.swish,\n 'tanh': tf.keras.activations.tanh,\n}\n\n\n\n\n\n\n\ndef lowerCamelCase__ ( _lowercase\t\t\t\t):\n\n '''simple docstring'''\n\n\n if activation_string in ACTaFN:\n return ACTaFN[activation_string]\n else:\n raise KeyError(f'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys()\t\t\t\t)}'''\t\t\t\t)"},"code_codestyle":{"kind":"number","value":371,"string":"371"},"style_context":{"kind":"string","value":"\n\n\n\n\nfrom collections import OrderedDict\nfrom typing import TYPE_CHECKING, Any, Mapping, Optional\n\nfrom packaging import version\n\nfrom ...configuration_utils import PretrainedConfig\nfrom ...onnx import OnnxConfig\nfrom ...onnx.utils import compute_effective_axis_dimension\nfrom ...utils import logging\n\n\nif TYPE_CHECKING:\n from ...processing_utils import ProcessorMixin\n from ...utils import TensorType\n\n\n__a =\t\t\tlogging.get_logger(__name__)\n\n__a =\t\t\t{\n 'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',\n}\n\nclass __a(\t\t\t\t\t_a ):\n\n\n\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n lowerCAmelCase =\t\t\t\t'''layoutlmv3'''\n\n\n\n\n\n def __init__(\t\t\t\tself ,_SCREAMING_SNAKE_CASE=50_265 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE=\"gelu\" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=64 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=224 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,)\t\t\t->\t\tDict:\n super().__init__(\n vocab_size=_SCREAMING_SNAKE_CASE ,hidden_size=_SCREAMING_SNAKE_CASE ,num_hidden_layers=_SCREAMING_SNAKE_CASE ,num_attention_heads=_SCREAMING_SNAKE_CASE ,intermediate_size=_SCREAMING_SNAKE_CASE ,hidden_act=_SCREAMING_SNAKE_CASE ,hidden_dropout_prob=_SCREAMING_SNAKE_CASE ,attention_probs_dropout_prob=_SCREAMING_SNAKE_CASE ,max_position_embeddings=_SCREAMING_SNAKE_CASE ,type_vocab_size=_SCREAMING_SNAKE_CASE ,initializer_range=_SCREAMING_SNAKE_CASE ,layer_norm_eps=_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)\n UpperCAmelCase_ :\t\t\t\t\t\t\tDict = max_ad_position_embeddings\n UpperCAmelCase_ :\t\t\t\t\t\t\tAny = coordinate_size\n UpperCAmelCase_ :\t\t\t\t\t\t\tTuple = shape_size\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[int] = has_relative_attention_bias\n UpperCAmelCase_ :\t\t\t\t\t\t\tUnion[str, Any] = rel_pos_bins\n UpperCAmelCase_ :\t\t\t\t\t\t\tDict = max_rel_pos\n UpperCAmelCase_ :\t\t\t\t\t\t\tUnion[str, Any] = has_spatial_attention_bias\n UpperCAmelCase_ :\t\t\t\t\t\t\tAny = rel_ad_pos_bins\n UpperCAmelCase_ :\t\t\t\t\t\t\tTuple = max_rel_ad_pos\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[str] = text_embed\n UpperCAmelCase_ :\t\t\t\t\t\t\tint = visual_embed\n UpperCAmelCase_ :\t\t\t\t\t\t\tint = input_size\n UpperCAmelCase_ :\t\t\t\t\t\t\tDict = num_channels\n UpperCAmelCase_ :\t\t\t\t\t\t\tint = patch_size\n UpperCAmelCase_ :\t\t\t\t\t\t\tDict = classifier_dropout\n\n\nclass __a(\t\t\t\t\t_a ):\n\n\n\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n lowerCAmelCase =\t\t\t\tversion.parse('''1.12''' )\n\n\n\n\n\n @property\n def \t\t\t\t\t\t\ta__\t\t\t\t(\t\t\t\tself )\t\t\t->\t\tMapping[str, Mapping[int, str]]:\n # The order of inputs is different for question answering and sequence classification\n if self.task in [\"question-answering\", \"sequence-classification\"]:\n return OrderedDict(\n [\n ('''input_ids''', {0: '''batch''', 1: '''sequence'''}),\n ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),\n ('''bbox''', {0: '''batch''', 1: '''sequence'''}),\n ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),\n ] )\n else:\n return OrderedDict(\n [\n ('''input_ids''', {0: '''batch''', 1: '''sequence'''}),\n ('''bbox''', {0: '''batch''', 1: '''sequence'''}),\n ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),\n ('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),\n ] )\n\n\n\n\n\n @property\n def \t\t\t\t\t\t\ta__\t\t\t\t(\t\t\t\tself )\t\t\t->\t\tfloat:\n return 1e-5\n\n\n\n\n\n @property\n def \t\t\t\t\t\t\ta__\t\t\t\t(\t\t\t\tself )\t\t\t->\t\tint:\n return 12\n\n\n\n\n\n def \t\t\t\t\t\t\ta__\t\t\t\t(\t\t\t\tself ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 40 ,_SCREAMING_SNAKE_CASE = 40 ,)\t\t\t->\t\tMapping[str, Any]:\n setattr(processor.image_processor ,'''apply_ocr''' ,_SCREAMING_SNAKE_CASE )\n\n # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[str] = compute_effective_axis_dimension(\n _SCREAMING_SNAKE_CASE ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )\n # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[Any] = processor.tokenizer.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )\n UpperCAmelCase_ :\t\t\t\t\t\t\tint = compute_effective_axis_dimension(\n _SCREAMING_SNAKE_CASE ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=_SCREAMING_SNAKE_CASE )\n # Generate dummy inputs according to compute batch and sequence\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size\n\n # Generate dummy bounding boxes\n UpperCAmelCase_ :\t\t\t\t\t\t\tTuple = [[[48, 84, 73, 128]]] * batch_size\n\n # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX\n # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)\n UpperCAmelCase_ :\t\t\t\t\t\t\tUnion[str, Any] = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tint = dict(\n processor(\n _SCREAMING_SNAKE_CASE ,text=_SCREAMING_SNAKE_CASE ,boxes=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,) )\n\n return inputs"},"style_context_codestyle":{"kind":"number","value":235,"string":"235"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":728,"cells":{"code":{"kind":"string","value":"\r\n\r\nfrom __future__ import annotations\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef SCREAMING_SNAKE_CASE__ ( lowerCamelCase__\t\t\t) -> Optional[int]:\r\n\t__lowerCamelCase\t\t\t\t:\t\t\t\tUnion[str, Any]\t\t\t\t\t = len(SCREAMING_SNAKE_CASE__\t\t\t)\r\n\t# We need to create solution object to save path.\r\n\t__lowerCamelCase\t\t\t\t:\t\t\t\tTuple\t\t\t\t\t = [[0 for _ in range(SCREAMING_SNAKE_CASE__\t\t\t)] for _ in range(SCREAMING_SNAKE_CASE__\t\t\t)]\r\n\t__lowerCamelCase\t\t\t\t:\t\t\t\tOptional[Any]\t\t\t\t\t = run_maze(SCREAMING_SNAKE_CASE__\t, 0\t, 0\t, SCREAMING_SNAKE_CASE__\t\t\t)\r\n\tif solved:\r\n\t\tprint('\\n'.join(str(SCREAMING_SNAKE_CASE__\t\t\t) for row in solutions\t\t\t)\t\t\t)\r\n\telse:\r\n\t\tprint('No solution exists!'\t\t\t)\r\n\treturn solved\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef SCREAMING_SNAKE_CASE__ ( lowerCamelCase__\t, lowerCamelCase__\t, lowerCamelCase__\t, lowerCamelCase__\t\t\t) -> Optional[Any]:\r\n\t__lowerCamelCase\t\t\t\t:\t\t\t\tAny\t\t\t\t\t = len(SCREAMING_SNAKE_CASE__\t\t\t)\r\n\t# Final check point.\r\n\tif i == j == (size - 1):\r\n\t\t__lowerCamelCase\t\t\t\t:\t\t\t\tAny\t\t\t\t\t = 1\r\n\t\treturn True\r\n\r\n\t__lowerCamelCase\t\t\t\t:\t\t\t\tint\t\t\t\t\t = (not i < 0) and (not j < 0) # Check lower bounds\r\n\t__lowerCamelCase\t\t\t\t:\t\t\t\tAny\t\t\t\t\t = (i < size) and (j < size) # Check upper bounds\r\n\r\n\tif lower_flag and upper_flag:\r\n\t\t# check for already visited and block points.\r\n\t\t__lowerCamelCase\t\t\t\t:\t\t\t\tList[str]\t\t\t\t\t = (not solutions[i][j]) and (not maze[i][j])\r\n\t\tif block_flag:\r\n\t\t\t# check visited\r\n\t\t\t__lowerCamelCase\t\t\t\t:\t\t\t\tList[str]\t\t\t\t\t = 1\r\n\r\n\t\t\t# check for directions\r\n\t\t\tif (\r\n\t\t\t run_maze(SCREAMING_SNAKE_CASE__\t, i + 1\t, SCREAMING_SNAKE_CASE__\t, SCREAMING_SNAKE_CASE__\t\t\t)\r\n\t\t\t or run_maze(SCREAMING_SNAKE_CASE__\t, SCREAMING_SNAKE_CASE__\t, j + 1\t, SCREAMING_SNAKE_CASE__\t\t\t)\r\n\t\t\t or run_maze(SCREAMING_SNAKE_CASE__\t, i - 1\t, SCREAMING_SNAKE_CASE__\t, SCREAMING_SNAKE_CASE__\t\t\t)\r\n\t\t\t or run_maze(SCREAMING_SNAKE_CASE__\t, SCREAMING_SNAKE_CASE__\t, j - 1\t, SCREAMING_SNAKE_CASE__\t\t\t)\r\n\t\t\t):\r\n\t\t\t\treturn True\r\n\r\n\t\t\t__lowerCamelCase\t\t\t\t:\t\t\t\tAny\t\t\t\t\t = 0\r\n\t\t\treturn False\r\n\treturn False\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t\timport doctest\r\n\r\n\t\t\t\t\tdoctest.testmod()\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":73,"string":"73"},"style_context":{"kind":"string","value":"\r\n\r\nfrom ..utils import DummyObject, requires_backends\r\n\r\n\r\n\r\n\r\n\r\nclass \tsnake_case_ (\t\tmetaclass=__A\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\t\t\tSCREAMING_SNAKE_CASE : Union[str, Any]\t\t\t\t\t\t\t\t\t=\t\t\t\t[\"note_seq\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self :\t\t\t\tOptional[int]\t\t\t\t\t\t\t, *_UpperCamelCase :\t\t\t\tstr\t\t\t\t\t\t\t, **_UpperCamelCase :\t\t\t\tOptional[int]\t\t\t\t\t\t\t) ->Any:\r\n\t\t\t\t\t\t\t\t\trequires_backends(self\t\t\t\t\t\t\t, ['''note_seq''']\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@classmethod\r\n\t\t\tdef \t\t\t\tsnake_case__( cls :\t\t\t\tint\t\t\t\t\t\t\t, *_UpperCamelCase :\t\t\t\tAny\t\t\t\t\t\t\t, **_UpperCamelCase :\t\t\t\tList[Any]\t\t\t\t\t\t\t) ->int:\r\n\t\t\t\t\t\t\t\t\trequires_backends(cls\t\t\t\t\t\t\t, ['''note_seq''']\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@classmethod\r\n\t\t\tdef \t\t\t\tsnake_case__( cls :\t\t\t\tDict\t\t\t\t\t\t\t, *_UpperCamelCase :\t\t\t\tOptional[int]\t\t\t\t\t\t\t, **_UpperCamelCase :\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t) ->List[str]:\r\n\t\t\t\t\t\t\t\t\trequires_backends(cls\t\t\t\t\t\t\t, ['''note_seq''']\t\t\t\t\t\t\t)"},"style_context_codestyle":{"kind":"number","value":8,"string":"8"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":729,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\n\n# Copyright 2023 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import TYPE_CHECKING\n\nfrom ..models.auto import AutoModelForVisionaSeq\nfrom ..utils import requires_backends\nfrom .base import PipelineTool\n\n\nif TYPE_CHECKING:\n\t\t\t\t\t\t\tfrom PIL import Image\n\n\n\n\nclass \t\t\t\t\t\tUpperCAmelCase_ (\t\tSCREAMING_SNAKE_CASE__ ):\n\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\t\t\t\t\t\tA :\t\t\t\tOptional[Any]\t\t\t\t\t\t\t\t\t= 'Salesforce/blip-image-captioning-base'\n\t\t\t\t\t\tA :\t\t\t\tstr\t\t\t\t\t\t\t\t\t= (\n\t\t\t\t\t\t 'This is a tool that generates a description of an image. It takes an input named `image` which should be the '\n\t\t\t\t\t\t 'image to caption, and returns a text that contains the description in English.'\n\t\t\t\t\t\t)\n\t\t\t\t\t\tA :\t\t\t\tList[Any]\t\t\t\t\t\t\t\t\t= 'image_captioner'\n\t\t\t\t\t\tA :\t\t\t\tint\t\t\t\t\t\t\t\t\t= AutoModelForVisionaSeq\n\n\t\t\t\t\t\tA :\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t= ['image']\n\t\t\t\t\t\tA :\t\t\t\tOptional[int]\t\t\t\t\t\t\t\t\t= ['text']\n\n\n\n\n\n\n\t\t\t\t\t\tdef __init__(\t\tself\t\t\t\t\t\t,\t\t\t*_SCREAMING_SNAKE_CASE\t\t\t\t\t\t,\t\t\t**_SCREAMING_SNAKE_CASE\t\t\t)\t->\t\t\tUnion[str, Any]:\n\t\t\t\t\t\t\t\trequires_backends(self\t\t\t\t\t\t,\t\t\t[\"vision\"]\t\t\t)\n\t\t\t\t\t\t\t\tsuper().__init__(*_SCREAMING_SNAKE_CASE\t\t\t\t\t\t,\t\t\t**_SCREAMING_SNAKE_CASE\t\t\t)\n\n\n\n\n\n\n\t\t\t\t\t\tdef _lowerCAmelCase (\t\tself\t\t\t\t\t\t,\t\t\t_SCREAMING_SNAKE_CASE\t\t\t)\t->\t\t\tAny:\n\t\t\t\t\t\t\t\treturn self.pre_processor(images=_SCREAMING_SNAKE_CASE\t\t\t\t\t\t,\t\t\treturn_tensors=\"pt\"\t\t\t)\n\n\n\n\n\n\n\t\t\t\t\t\tdef _lowerCAmelCase (\t\tself\t\t\t\t\t\t,\t\t\t_SCREAMING_SNAKE_CASE\t\t\t)\t->\t\t\tTuple:\n\t\t\t\t\t\t\t\treturn self.model.generate(**_SCREAMING_SNAKE_CASE\t\t\t)\n\n\n\n\n\n\n\t\t\t\t\t\tdef _lowerCAmelCase (\t\tself\t\t\t\t\t\t,\t\t\t_SCREAMING_SNAKE_CASE\t\t\t)\t->\t\t\tList[Any]:\n\t\t\t\t\t\t\t\treturn self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t,\t\t\tskip_special_tokens=_SCREAMING_SNAKE_CASE\t\t\t)[0].strip()\n"},"code_codestyle":{"kind":"number","value":362,"string":"362"},"style_context":{"kind":"string","value":"\n\n\n\n\n\n\ndef lowerCAmelCase__\t\t( _a : int = 50 ):\n\t\tsnake_case_\t\t\t:\t\t\t\t\t\t\tUnion[str, Any] \t= [1] * (length + 1)\n\n\t\tfor row_length in range(3\t\t\t\t\t, length + 1 ):\n\t\t\t\tfor block_length in range(3\t\t\t\t\t, row_length + 1 ):\n\t\t\t\t\t\tfor block_start in range(row_length - block_length ):\n\t\t\t\t\t\t\t\tways_number[row_length] += ways_number[\n\t\t\t\t\t\t\t\t row_length - block_start - block_length - 1\n\t\t\t\t\t\t\t\t]\n\n\t\t\t\t\t\tways_number[row_length] += 1\n\n\t\treturn ways_number[length]\n\n\nif __name__ == \"__main__\":\n\t\t\t\t\t\t\tprint(F\"\"\"{solution() = }\"\"\")\n"},"style_context_codestyle":{"kind":"number","value":36,"string":"36"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":730,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\n\nimport operator as op\n\ndef SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t(\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t: Tuple\t\t\t\t\t)\t\t->\t\t\tint:\n\n\n\n\n\n\n\t\"\"\"simple docstring\"\"\"\n\n\n\n\tUpperCamelCase :Dict =\t[]\n\tUpperCamelCase :Union[str, Any] =\tlambda __magic_name__\t,\t\t\t__magic_name__\t\t\t\t\t: int(x / y\t\t\t\t\t) # noqa: E731 integer division operation\n\tUpperCamelCase :Optional[Any] =\t{\n\t \"\"\"^\"\"\": op.pow,\n\t \"\"\"*\"\"\": op.mul,\n\t \"\"\"/\"\"\": div,\n\t \"\"\"+\"\"\": op.add,\n\t \"\"\"-\"\"\": op.sub,\n\t} # operators & their respective operation\n\n\t# print table header\n\tprint(\"\"\"Symbol\"\"\".center(8\t\t\t\t\t)\t,\t\t\t\"\"\"Action\"\"\".center(12\t\t\t\t\t)\t,\t\t\t\"\"\"Stack\"\"\"\t,\t\t\tsep=\"\"\" | \"\"\"\t\t\t\t\t)\n\tprint(\"\"\"-\"\"\" * (30 + len(__magic_name__\t\t\t\t\t))\t\t\t\t\t)\n\n\tfor x in post_fix:\n\t\tif x.isdigit(): # if x in digit\n\t\t\tstack.append(__magic_name__\t\t\t\t\t) # append x to stack\n\t\t\t# output in tabular format\n\t\t\tprint(x.rjust(8\t\t\t\t\t)\t,\t\t\t(\"\"\"push(\"\"\" + x + \"\"\")\"\"\").ljust(12\t\t\t\t\t)\t,\t\t\t\"\"\",\"\"\".join(__magic_name__\t\t\t\t\t)\t,\t\t\tsep=\"\"\" | \"\"\"\t\t\t\t\t)\n\t\telse:\n\t\t\tUpperCamelCase :List[Any] =\tstack.pop() # pop stack\n\t\t\t# output in tabular format\n\t\t\tprint(\"\"\"\"\"\".rjust(8\t\t\t\t\t)\t,\t\t\t(\"\"\"pop(\"\"\" + b + \"\"\")\"\"\").ljust(12\t\t\t\t\t)\t,\t\t\t\"\"\",\"\"\".join(__magic_name__\t\t\t\t\t)\t,\t\t\tsep=\"\"\" | \"\"\"\t\t\t\t\t)\n\n\t\t\tUpperCamelCase :Union[str, Any] =\tstack.pop() # pop stack\n\t\t\t# output in tabular format\n\t\t\tprint(\"\"\"\"\"\".rjust(8\t\t\t\t\t)\t,\t\t\t(\"\"\"pop(\"\"\" + a + \"\"\")\"\"\").ljust(12\t\t\t\t\t)\t,\t\t\t\"\"\",\"\"\".join(__magic_name__\t\t\t\t\t)\t,\t\t\tsep=\"\"\" | \"\"\"\t\t\t\t\t)\n\n\t\t\tstack.append(\n\t\t\t str(opr[x](int(__magic_name__\t\t\t\t\t)\t,\t\t\tint(__magic_name__\t\t\t\t\t)\t\t\t\t\t)\t\t\t\t\t)\t\t\t\t\t) # evaluate the 2 values popped from stack & push result to stack\n\t\t\t# output in tabular format\n\t\t\tprint(\n\t\t\t x.rjust(8\t\t\t\t\t)\t,\t\t\t(\"\"\"push(\"\"\" + a + x + b + \"\"\")\"\"\").ljust(12\t\t\t\t\t)\t,\t\t\t\"\"\",\"\"\".join(__magic_name__\t\t\t\t\t)\t,\t\t\tsep=\"\"\" | \"\"\"\t,\t\t\t)\n\n\treturn int(stack[0]\t\t\t\t\t)\n\n\nif __name__ == \"__main__\":\n\tUpperCAmelCase_ :\t\t\t\t\t\tTuple = input('''\\n\\nEnter a Postfix Equation (space separated) = ''').split(''' ''')\n\tprint('''\\n\\tResult = ''', solve(Postfix))\n"},"code_codestyle":{"kind":"number","value":38,"string":"38"},"style_context":{"kind":"string","value":"\n\n\n\n\n\n\nimport html\n\nfrom ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin\nfrom ...utils import is_bsa_available, logging, requires_backends\n\n\nif is_bsa_available():\n\timport bsa\n\tfrom bsa import BeautifulSoup\n\n\nUpperCAmelCase_ :\t\t\t\t\t\tAny = logging.get_logger(__name__)\n\nclass _SCREAMING_SNAKE_CASE (\t\t\t_a ):\n\n\n\n\n\n\tdef __init__(\t\tself\t\t\t\t:\t\t\t\t\tOptional[int] , **__lowerCamelCase\t\t\t\t:\t\t\t\t\tOptional[int]\t):\n\t\trequires_backends(self , [\"\"\"bs4\"\"\"]\t)\n\t\tsuper().__init__(**__lowerCamelCase\t)\n\n\n\n\n\n\tdef \t\t\t\t\t\t_A (\t\tself\t\t\t\t:\t\t\t\t\tList[str] , __lowerCamelCase\t\t\t\t:\t\t\t\t\tAny\t):\n\t\tUpperCamelCase :Optional[int] =\t[]\n\t\tUpperCamelCase :List[str] =\t[]\n\t\tUpperCamelCase :Union[str, Any] =\telement if element.name else element.parent\n\t\tfor parent in child.parents: # type: bs4.element.Tag\n\t\t\tUpperCamelCase :Optional[Any] =\tparent.find_all(child.name , recursive=__lowerCamelCase\t)\n\t\t\txpath_tags.append(child.name\t)\n\t\t\txpath_subscripts.append(\n\t\t\t 0 if 1 == len(__lowerCamelCase\t) else next(i for i, s in enumerate(__lowerCamelCase , 1\t) if s is child\t)\t)\n\t\t\tUpperCamelCase :Any =\tparent\n\t\txpath_tags.reverse()\n\t\txpath_subscripts.reverse()\n\t\treturn xpath_tags, xpath_subscripts\n\n\n\n\n\n\tdef \t\t\t\t\t\t_A (\t\tself\t\t\t\t:\t\t\t\t\tAny , __lowerCamelCase\t\t\t\t:\t\t\t\t\tTuple\t):\n\t\tUpperCamelCase :Any =\tBeautifulSoup(__lowerCamelCase , \"\"\"html.parser\"\"\"\t)\n\n\t\tUpperCamelCase :Union[str, Any] =\t[]\n\t\tUpperCamelCase :Tuple =\t[]\n\t\tUpperCamelCase :Tuple =\t[]\n\n\t\tfor element in html_code.descendants:\n\t\t\tif type(__lowerCamelCase\t) == bsa.element.NavigableString:\n\t\t\t\tif type(element.parent\t) != bsa.element.Tag:\n\t\t\t\t\tcontinue\n\n\t\t\t\tUpperCamelCase :Any =\thtml.unescape(__lowerCamelCase\t).strip()\n\t\t\t\tif not text_in_this_tag:\n\t\t\t\t\tcontinue\n\n\t\t\t\tall_doc_strings.append(__lowerCamelCase\t)\n\n\t\t\t\tUpperCamelCase , UpperCamelCase :Optional[Any] =\tself.xpath_soup(__lowerCamelCase\t)\n\t\t\t\tstringaxtag_seq.append(__lowerCamelCase\t)\n\t\t\t\tstringaxsubs_seq.append(__lowerCamelCase\t)\n\n\t\tif len(__lowerCamelCase\t) != len(__lowerCamelCase\t):\n\t\t\traise ValueError(\"\"\"Number of doc strings and xtags does not correspond\"\"\"\t)\n\t\tif len(__lowerCamelCase\t) != len(__lowerCamelCase\t):\n\t\t\traise ValueError(\"\"\"Number of doc strings and xsubs does not correspond\"\"\"\t)\n\n\t\treturn all_doc_strings, stringaxtag_seq, stringaxsubs_seq\n\n\n\n\n\n\tdef \t\t\t\t\t\t_A (\t\tself\t\t\t\t:\t\t\t\t\tint , __lowerCamelCase\t\t\t\t:\t\t\t\t\tList[Any] , __lowerCamelCase\t\t\t\t:\t\t\t\t\tList[str]\t):\n\t\tUpperCamelCase :Tuple =\t\"\"\"\"\"\"\n\t\tfor tagname, subs in zip(__lowerCamelCase , __lowerCamelCase\t):\n\t\t\txpath += F\"\"\"/{tagname}\"\"\"\n\t\t\tif subs != 0:\n\t\t\t\txpath += F\"\"\"[{subs}]\"\"\"\n\t\treturn xpath\n\n\n\n\n\n\tdef __call__(\t\tself\t\t\t\t:\t\t\t\t\tAny , __lowerCamelCase\t\t\t\t:\t\t\t\t\tDict\t):\n\t\tUpperCamelCase :Any =\tFalse\n\n\t\t# Check that strings has a valid type\n\t\tif isinstance(__lowerCamelCase , __lowerCamelCase\t):\n\t\t\tUpperCamelCase :List[Any] =\tTrue\n\t\telif isinstance(__lowerCamelCase , (list, tuple)\t):\n\t\t\tif len(__lowerCamelCase\t) == 0 or isinstance(html_strings[0] , __lowerCamelCase\t):\n\t\t\t\tUpperCamelCase :Any =\tTrue\n\n\t\tif not valid_strings:\n\t\t\traise ValueError(\n\t\t\t \"\"\"HTML strings must of type `str`, `List[str]` (batch of examples), \"\"\"\n\t\t\t F\"\"\"but is of type {type(__lowerCamelCase\t)}.\"\"\"\t)\n\n\t\tUpperCamelCase :str =\tbool(isinstance(__lowerCamelCase , (list, tuple)\t) and (isinstance(html_strings[0] , __lowerCamelCase\t))\t)\n\n\t\tif not is_batched:\n\t\t\tUpperCamelCase :Any =\t[html_strings]\n\n\t\t# Get nodes + xpaths\n\t\tUpperCamelCase :Union[str, Any] =\t[]\n\t\tUpperCamelCase :str =\t[]\n\t\tfor html_string in html_strings:\n\t\t\tUpperCamelCase , UpperCamelCase , UpperCamelCase :int =\tself.get_three_from_single(__lowerCamelCase\t)\n\t\t\tnodes.append(__lowerCamelCase\t)\n\t\t\tUpperCamelCase :int =\t[]\n\t\t\tfor node, tag_list, sub_list in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase\t):\n\t\t\t\tUpperCamelCase :str =\tself.construct_xpath(__lowerCamelCase , __lowerCamelCase\t)\n\t\t\t\txpath_strings.append(__lowerCamelCase\t)\n\t\t\txpaths.append(__lowerCamelCase\t)\n\n\t\t# return as Dict\n\t\tUpperCamelCase :Optional[int] =\t{\"\"\"nodes\"\"\": nodes, \"\"\"xpaths\"\"\": xpaths}\n\t\tUpperCamelCase :Any =\tBatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase\t)\n\n\t\treturn encoded_inputs\n"},"style_context_codestyle":{"kind":"number","value":38,"string":"38"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":731,"cells":{"code":{"kind":"string","value":"\r\r\r\r\"\"\"simple docstring\"\"\"\rdef \t\t\t\t_snake_case\t\t\t\t\t\t(\t\t\t\t\t\tUpperCamelCase\t\t\t\t\t\t:\t\tint = 3 ,\t\tUpperCamelCase\t\t\t\t\t\t:\t\tint = 7 ,\t\tUpperCamelCase\t\t\t\t\t\t:\t\tint = 1000000\t\t\t\t\t\t):\r UpperCAmelCase : List[str] = 0\r UpperCAmelCase : List[str] = 1\r\r for current_denominator in range(1 ,\t\tlimit + 1\t\t\t\t\t\t):\r UpperCAmelCase : Tuple = current_denominator * numerator // denominator\r if current_denominator % denominator == 0:\r current_numerator -= 1\r if current_numerator * max_denominator > current_denominator * max_numerator:\r UpperCAmelCase : List[str] = current_numerator\r UpperCAmelCase : Dict = current_denominator\r return max_numerator\r\r\rif __name__ == \"__main__\":\r print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))\r"},"code_codestyle":{"kind":"number","value":76,"string":"76"},"style_context":{"kind":"string","value":"\r\r\r\r\"\"\"simple docstring\"\"\"\rimport os\rfrom shutil import copyfile\rfrom typing import List, Optional, Tuple\r\rfrom tokenizers import processors\r\rfrom ...tokenization_utils import AddedToken, BatchEncoding\rfrom ...tokenization_utils_fast import PreTrainedTokenizerFast\rfrom ...utils import is_sentencepiece_available, logging\r\r\rif is_sentencepiece_available():\r from .tokenization_mbart import MBartTokenizer\relse:\r A: str \t\t\t\t\t\t\t=\t\tNone\r\r\rA: List[Any] \t\t\t\t\t\t\t=\t\tlogging.get_logger(__name__)\r\r\rA: Union[str, Any] \t\t\t\t\t\t\t=\t\t{\"vocab_file\": \"sentencepiece.bpe.model\", \"tokenizer_file\": \"tokenizer.json\"}\r\rA: Union[str, Any] \t\t\t\t\t\t\t=\t\t{\r \"vocab_file\": {\r \"facebook/mbart-large-en-ro\": (\r \"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model\"\r ),\r \"facebook/mbart-large-cc25\": (\r \"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model\"\r ),\r },\r \"tokenizer_file\": {\r \"facebook/mbart-large-en-ro\": \"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json\",\r \"facebook/mbart-large-cc25\": \"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json\",\r },\r}\r\rA: Tuple \t\t\t\t\t\t\t=\t\t{\r \"facebook/mbart-large-en-ro\": 1_0_2_4,\r \"facebook/mbart-large-cc25\": 1_0_2_4,\r}\r\r# fmt: off\rA: Any \t\t\t\t\t\t\t=\t\t[\"ar_AR\", \"cs_CZ\", \"de_DE\", \"en_XX\", \"es_XX\", \"et_EE\", \"fi_FI\", \"fr_XX\", \"gu_IN\", \"hi_IN\", \"it_IT\", \"ja_XX\", \"kk_KZ\", \"ko_KR\", \"lt_LT\", \"lv_LV\", \"my_MM\", \"ne_NP\", \"nl_XX\", \"ro_RO\", \"ru_RU\", \"si_LK\", \"tr_TR\", \"vi_VN\", \"zh_CN\"]\r\r\r\rclass \t\t\t\tSCREAMING_SNAKE_CASE__ ( UpperCAmelCase__\t\t\t):\r __lowerCAmelCase : Tuple\t\t\t\t\t\t=\t\t\t\t\t\tVOCAB_FILES_NAMES\r __lowerCAmelCase : int\t\t\t\t\t\t=\t\t\t\t\t\tPRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\r __lowerCAmelCase : Dict\t\t\t\t\t\t=\t\t\t\t\t\tPRETRAINED_VOCAB_FILES_MAP\r __lowerCAmelCase : Tuple\t\t\t\t\t\t=\t\t\t\t\t\t['input_ids', 'attention_mask']\r __lowerCAmelCase : str\t\t\t\t\t\t=\t\t\t\t\t\tMBartTokenizer\r\r __lowerCAmelCase : List[int]\t\t\t\t\t\t=\t\t\t\t\t\t[]\r __lowerCAmelCase : List[int]\t\t\t\t\t\t=\t\t\t\t\t\t[]\r\r def __init__( self\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=None\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=None\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=\"\"\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=\"\"\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=\"\"\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=\"\"\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=\"\"\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=\"\"\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=\"\"\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=None\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=None\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=None\t\t\t\t,\t\t\t\t\t**_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\t) ->\t\t\t\tAny:\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r UpperCAmelCase : Union[str, Any] = AddedToken(_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\tlstrip=_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\trstrip=_SCREAMING_SNAKE_CASE\t\t\t\t\t\t) if isinstance(_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t\t\t) else mask_token\r\r super().__init__(\r vocab_file=_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\ttokenizer_file=_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\tbos_token=_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\teos_token=_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\tsep_token=_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\tcls_token=_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\tunk_token=_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\tpad_token=_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\tmask_token=_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\tsrc_lang=_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\ttgt_lang=_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\tadditional_special_tokens=_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\t**_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\t)\r\r UpperCAmelCase : int = vocab_file\r UpperCAmelCase : Optional[int] = False if not self.vocab_file else True\r\r UpperCAmelCase : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()\r\r if additional_special_tokens is not None:\r # Only add those special tokens if they are not already there.\r _additional_special_tokens.extend(\r [t for t in additional_special_tokens if t not in _additional_special_tokens]\t\t\t\t\t\t)\r\r self.add_special_tokens({\"\"\"additional_special_tokens\"\"\": _additional_special_tokens}\t\t\t\t\t\t)\r UpperCAmelCase : List[Any] = {\r lang_code: self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t) for lang_code in FAIRSEQ_LANGUAGE_CODES\r }\r\r UpperCAmelCase : int = src_lang if src_lang is not None else \"\"\"en_XX\"\"\"\r UpperCAmelCase : List[Any] = self.convert_tokens_to_ids(self._src_lang\t\t\t\t\t\t)\r UpperCAmelCase : int = tgt_lang\r self.set_src_lang_special_tokens(self._src_lang\t\t\t\t\t\t)\r\r @property\r def SCREAMING_SNAKE_CASE\t\t\t\t\t( self\t\t\t\t\t\t) ->\t\t\t\tstr:\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r return self._src_lang\r\r @src_lang.setter\r def SCREAMING_SNAKE_CASE\t\t\t\t\t( self\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t\t\t) ->\t\t\t\tNone:\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r UpperCAmelCase : Dict = new_src_lang\r self.set_src_lang_special_tokens(self._src_lang\t\t\t\t\t\t)\r\r def SCREAMING_SNAKE_CASE\t\t\t\t\t( self\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE = None\t\t\t\t\t\t) ->\t\t\t\tList[int]:\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r if token_ids_a is None:\r return self.prefix_tokens + token_ids_a + self.suffix_tokens\r # We don't expect to process pairs, but leave the pair logic for API consistency\r return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens\r\r def SCREAMING_SNAKE_CASE\t\t\t\t\t( self\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE = None\t\t\t\t\t\t) ->\t\t\t\tList[int]:\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r UpperCAmelCase : str = [self.sep_token_id]\r UpperCAmelCase : str = [self.cls_token_id]\r\r if token_ids_a is None:\r return len(cls + token_ids_a + sep\t\t\t\t\t\t) * [0]\r return len(cls + token_ids_a + sep + sep + token_ids_a + sep\t\t\t\t\t\t) * [0]\r\r def SCREAMING_SNAKE_CASE\t\t\t\t\t( self\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\t**_SCREAMING_SNAKE_CASE\t\t\t\t\t\t) ->\t\t\t\tOptional[int]:\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r if src_lang is None or tgt_lang is None:\r raise ValueError(\"\"\"Translation requires a `src_lang` and a `tgt_lang` for this model\"\"\"\t\t\t\t\t\t)\r UpperCAmelCase : List[str] = src_lang\r UpperCAmelCase : Union[str, Any] = self(_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\tadd_special_tokens=_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\treturn_tensors=_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\t**_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\r UpperCAmelCase : Dict = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\r UpperCAmelCase : Tuple = tgt_lang_id\r return inputs\r\r def SCREAMING_SNAKE_CASE\t\t\t\t\t( self\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE = \"en_XX\"\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE = None\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE = \"ro_RO\"\t\t\t\t,\t\t\t\t\t**_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\t) ->\t\t\t\tBatchEncoding:\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r UpperCAmelCase : int = src_lang\r UpperCAmelCase : Dict = tgt_lang\r return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\t**_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\r\r def SCREAMING_SNAKE_CASE\t\t\t\t\t( self\t\t\t\t\t\t) ->\t\t\t\tUnion[str, Any]:\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r return self.set_src_lang_special_tokens(self.src_lang\t\t\t\t\t\t)\r\r def SCREAMING_SNAKE_CASE\t\t\t\t\t( self\t\t\t\t\t\t) ->\t\t\t\tstr:\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r return self.set_tgt_lang_special_tokens(self.tgt_lang\t\t\t\t\t\t)\r\r def SCREAMING_SNAKE_CASE\t\t\t\t\t( self\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t\t\t) ->\t\t\t\tNone:\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r UpperCAmelCase : Any = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\r UpperCAmelCase : Any = []\r UpperCAmelCase : Tuple = [self.eos_token_id, self.cur_lang_code]\r\r UpperCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens\t\t\t\t\t\t)\r UpperCAmelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens\t\t\t\t\t\t)\r\r UpperCAmelCase : str = processors.TemplateProcessing(\r single=prefix_tokens_str + [\"\"\"$A\"\"\"] + suffix_tokens_str\t\t\t\t,\t\t\t\t\tpair=prefix_tokens_str + [\"\"\"$A\"\"\", \"\"\"$B\"\"\"] + suffix_tokens_str\t\t\t\t,\t\t\t\t\tspecial_tokens=list(zip(prefix_tokens_str + suffix_tokens_str\t\t\t\t,\t\t\t\t\tself.prefix_tokens + self.suffix_tokens\t\t\t\t\t\t)\t\t\t\t\t\t)\t\t\t\t,\t\t\t\t\t)\r\r def SCREAMING_SNAKE_CASE\t\t\t\t\t( self\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t\t\t) ->\t\t\t\tNone:\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r UpperCAmelCase : Tuple = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\r UpperCAmelCase : int = []\r UpperCAmelCase : Optional[int] = [self.eos_token_id, self.cur_lang_code]\r\r UpperCAmelCase : str = self.convert_ids_to_tokens(self.prefix_tokens\t\t\t\t\t\t)\r UpperCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens\t\t\t\t\t\t)\r\r UpperCAmelCase : int = processors.TemplateProcessing(\r single=prefix_tokens_str + [\"\"\"$A\"\"\"] + suffix_tokens_str\t\t\t\t,\t\t\t\t\tpair=prefix_tokens_str + [\"\"\"$A\"\"\", \"\"\"$B\"\"\"] + suffix_tokens_str\t\t\t\t,\t\t\t\t\tspecial_tokens=list(zip(prefix_tokens_str + suffix_tokens_str\t\t\t\t,\t\t\t\t\tself.prefix_tokens + self.suffix_tokens\t\t\t\t\t\t)\t\t\t\t\t\t)\t\t\t\t,\t\t\t\t\t)\r\r\r\r\r def SCREAMING_SNAKE_CASE\t\t\t\t\t( self\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE = None\t\t\t\t\t\t) ->\t\t\t\tTuple[str]:\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r if not self.can_save_slow_tokenizer:\r raise ValueError(\r \"\"\"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow \"\"\"\r \"\"\"tokenizer.\"\"\"\t\t\t\t\t\t)\r\r if not os.path.isdir(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t):\r logger.error(F\"Vocabulary path ({save_directory}) should be a directory.\"\t\t\t\t\t\t)\r return\r UpperCAmelCase : Any = os.path.join(\r _SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\t(filename_prefix + \"\"\"-\"\"\" if filename_prefix else \"\"\"\"\"\") + VOCAB_FILES_NAMES[\"\"\"vocab_file\"\"\"]\t\t\t\t\t\t)\r\r if os.path.abspath(self.vocab_file\t\t\t\t\t\t) != os.path.abspath(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t):\r copyfile(self.vocab_file\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\r\r return (out_vocab_file,)\r"},"style_context_codestyle":{"kind":"number","value":76,"string":"76"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":732,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\r'''simple docstring'''\r\rimport warnings\r\rfrom ...utils import logging\rfrom .image_processing_perceiver import PerceiverImageProcessor\r\r\rlowercase_\t\t\t\t\t= logging.get_logger(__name__)\r\r\rclass \t\t\t\t\ta_ (\t\t\t\tsnake_case_ ):\r\r '''simple docstring'''\r\r\r def __init__(\t\tself ,\t\t*A ,\t\t**A )\t\t-> None:\r warnings.warn(\r \"\"\"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.\"\"\"\r \"\"\" Please use PerceiverImageProcessor instead.\"\"\" ,\t\tA ,\t\t)\r super().__init__(*A ,\t\t**A )\r\r\r\r"},"code_codestyle":{"kind":"number","value":58,"string":"58"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...utils import logging\r\n\r\n\r\nlowerCamelCase\t:\t\t\t\t\t\t\tOptional[int]\t\t\t\t=logging.get_logger(__name__)\r\n\r\nlowerCamelCase\t:\t\t\t\t\t\t\tDict\t\t\t\t={\r\n '''facebook/s2t-wav2vec2-large-en-de''': (\r\n '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''\r\n ),\r\n # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2\r\n}\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t__a ( A__ ):\r\n _lowerCAmelCase : Tuple = '''speech_to_text_2'''\r\n _lowerCAmelCase : Dict = ['''past_key_values''']\r\n _lowerCAmelCase : Any = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __init__(\t\tself\t\t\t\t\t: List[str]\t\t\t\t\t\t\t, SCREAMING_SNAKE_CASE\t\t\t\t\t: Union[str, Any]=1_00_00\t\t\t\t\t\t\t, SCREAMING_SNAKE_CASE\t\t\t\t\t: List[Any]=6\t\t\t\t\t\t\t, SCREAMING_SNAKE_CASE\t\t\t\t\t: List[Any]=20_48\t\t\t\t\t\t\t, SCREAMING_SNAKE_CASE\t\t\t\t\t: Dict=4\t\t\t\t\t\t\t, SCREAMING_SNAKE_CASE\t\t\t\t\t: List[Any]=0.0\t\t\t\t\t\t\t, SCREAMING_SNAKE_CASE\t\t\t\t\t: Optional[int]=True\t\t\t\t\t\t\t, SCREAMING_SNAKE_CASE\t\t\t\t\t: List[Any]=\"relu\"\t\t\t\t\t\t\t, SCREAMING_SNAKE_CASE\t\t\t\t\t: Tuple=2_56\t\t\t\t\t\t\t, SCREAMING_SNAKE_CASE\t\t\t\t\t: List[str]=0.1\t\t\t\t\t\t\t, SCREAMING_SNAKE_CASE\t\t\t\t\t: Tuple=0.0\t\t\t\t\t\t\t, SCREAMING_SNAKE_CASE\t\t\t\t\t: Any=0.0\t\t\t\t\t\t\t, SCREAMING_SNAKE_CASE\t\t\t\t\t: int=0.0_2\t\t\t\t\t\t\t, SCREAMING_SNAKE_CASE\t\t\t\t\t: Dict=2\t\t\t\t\t\t\t, SCREAMING_SNAKE_CASE\t\t\t\t\t: List[str]=True\t\t\t\t\t\t\t, SCREAMING_SNAKE_CASE\t\t\t\t\t: Any=1\t\t\t\t\t\t\t, SCREAMING_SNAKE_CASE\t\t\t\t\t: List[Any]=0\t\t\t\t\t\t\t, SCREAMING_SNAKE_CASE\t\t\t\t\t: Any=2\t\t\t\t\t\t\t, SCREAMING_SNAKE_CASE\t\t\t\t\t: str=10_24\t\t\t\t\t\t\t, **SCREAMING_SNAKE_CASE\t\t\t\t\t: int\t\t\t\t\t\t\t, ):\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCamelCase__\t\t\t: int \t\t\t\t= vocab_size\r\n UpperCamelCase__\t\t\t: Optional[Any] \t\t\t\t= d_model\r\n UpperCamelCase__\t\t\t: Optional[Any] \t\t\t\t= decoder_ffn_dim\r\n UpperCamelCase__\t\t\t: str \t\t\t\t= decoder_layers\r\n UpperCamelCase__\t\t\t: Any \t\t\t\t= decoder_attention_heads\r\n UpperCamelCase__\t\t\t: List[str] \t\t\t\t= dropout\r\n UpperCamelCase__\t\t\t: int \t\t\t\t= attention_dropout\r\n UpperCamelCase__\t\t\t: Optional[int] \t\t\t\t= activation_dropout\r\n UpperCamelCase__\t\t\t: Union[str, Any] \t\t\t\t= activation_function\r\n UpperCamelCase__\t\t\t: Tuple \t\t\t\t= init_std\r\n UpperCamelCase__\t\t\t: Optional[int] \t\t\t\t= decoder_layerdrop\r\n UpperCamelCase__\t\t\t: Dict \t\t\t\t= use_cache\r\n UpperCamelCase__\t\t\t: str \t\t\t\t= decoder_layers\r\n UpperCamelCase__\t\t\t: Union[str, Any] \t\t\t\t= scale_embedding # scale factor will be sqrt(d_model) if True\r\n UpperCamelCase__\t\t\t: Optional[Any] \t\t\t\t= max_target_positions\r\n\r\n super().__init__(\r\n pad_token_id=SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, bos_token_id=SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, eos_token_id=SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, decoder_start_token_id=SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, **SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, )"},"style_context_codestyle":{"kind":"number","value":189,"string":"189"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":733,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available\r\n\r\n\r\n_UpperCamelCase:\t\t\tOptional[int] \t\t\t\t\t\t\t=\t\t\t\t\t{\r\n \"configuration_xlm\": [\"XLM_PRETRAINED_CONFIG_ARCHIVE_MAP\", \"XLMConfig\", \"XLMOnnxConfig\"],\r\n \"tokenization_xlm\": [\"XLMTokenizer\"],\r\n}\r\n\r\ntry:\r\n if not is_torch_available():\r\n raise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n pass\r\nelse:\r\n _UpperCamelCase:\t\t\tDict \t\t\t\t\t\t\t=\t\t\t\t\t[\r\n \"XLM_PRETRAINED_MODEL_ARCHIVE_LIST\",\r\n \"XLMForMultipleChoice\",\r\n \"XLMForQuestionAnswering\",\r\n \"XLMForQuestionAnsweringSimple\",\r\n \"XLMForSequenceClassification\",\r\n \"XLMForTokenClassification\",\r\n \"XLMModel\",\r\n \"XLMPreTrainedModel\",\r\n \"XLMWithLMHeadModel\",\r\n ]\r\n\r\ntry:\r\n if not is_tf_available():\r\n raise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n pass\r\nelse:\r\n _UpperCamelCase:\t\t\tint \t\t\t\t\t\t\t=\t\t\t\t\t[\r\n \"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST\",\r\n \"TFXLMForMultipleChoice\",\r\n \"TFXLMForQuestionAnsweringSimple\",\r\n \"TFXLMForSequenceClassification\",\r\n \"TFXLMForTokenClassification\",\r\n \"TFXLMMainLayer\",\r\n \"TFXLMModel\",\r\n \"TFXLMPreTrainedModel\",\r\n \"TFXLMWithLMHeadModel\",\r\n ]\r\n\r\n\r\nif TYPE_CHECKING:\r\n from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig\r\n from .tokenization_xlm import XLMTokenizer\r\n\r\n try:\r\n if not is_torch_available():\r\n raise OptionalDependencyNotAvailable()\r\n except OptionalDependencyNotAvailable:\r\n pass\r\n else:\r\n from .modeling_xlm import (\r\n XLM_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n XLMForMultipleChoice,\r\n XLMForQuestionAnswering,\r\n XLMForQuestionAnsweringSimple,\r\n XLMForSequenceClassification,\r\n XLMForTokenClassification,\r\n XLMModel,\r\n XLMPreTrainedModel,\r\n XLMWithLMHeadModel,\r\n )\r\n\r\n try:\r\n if not is_tf_available():\r\n raise OptionalDependencyNotAvailable()\r\n except OptionalDependencyNotAvailable:\r\n pass\r\n else:\r\n from .modeling_tf_xlm import (\r\n TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n TFXLMForMultipleChoice,\r\n TFXLMForQuestionAnsweringSimple,\r\n TFXLMForSequenceClassification,\r\n TFXLMForTokenClassification,\r\n TFXLMMainLayer,\r\n TFXLMModel,\r\n TFXLMPreTrainedModel,\r\n TFXLMWithLMHeadModel,\r\n )\r\n\r\nelse:\r\n import sys\r\n\r\n _UpperCamelCase:\t\t\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\t\t\t_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)\r\n"},"code_codestyle":{"kind":"number","value":354,"string":"354"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\nfrom collections import OrderedDict\r\nfrom typing import Any, Mapping, Optional\r\n\r\nfrom ... import PreTrainedTokenizer, TensorType, is_torch_available\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...onnx import OnnxConfigWithPast\r\nfrom ...utils import logging\r\n\r\n\r\n_UpperCamelCase:\t\t\tOptional[int] \t\t\t\t\t\t\t=\t\t\t\t\tlogging.get_logger(__name__)\r\n\r\n_UpperCamelCase:\t\t\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\t\t\t{\r\n 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',\r\n # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo\r\n}\r\n\r\n\r\nclass \t\t\t\t\ta__\t\t\t\t\t\t( SCREAMING_SNAKE_CASE__ ):\r\n _lowerCamelCase \t\t\t\t\t\t\t=\t\t\t'gpt_neo'\r\n _lowerCamelCase \t\t\t\t\t\t\t=\t\t\t['past_key_values']\r\n _lowerCamelCase \t\t\t\t\t\t\t=\t\t\t{'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}\r\n\r\n\r\n def __init__(\t\t\t\t\t\t\tself :\t\t\t\t\t\t\tOptional[Any], lowerCAmelCase :\t\t\t\t\t\t\tint=50257, lowerCAmelCase :\t\t\t\t\t\t\tTuple=2048, lowerCAmelCase :\t\t\t\t\t\t\tint=2048, lowerCAmelCase :\t\t\t\t\t\t\tTuple=24, lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any]=[[[\"global\", \"local\"], 12]], lowerCAmelCase :\t\t\t\t\t\t\tOptional[int]=16, lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any]=None, lowerCAmelCase :\t\t\t\t\t\t\tDict=256, lowerCAmelCase :\t\t\t\t\t\t\tOptional[int]=\"gelu_new\", lowerCAmelCase :\t\t\t\t\t\t\tAny=0.0, lowerCAmelCase :\t\t\t\t\t\t\tDict=0.0, lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any]=0.0, lowerCAmelCase :\t\t\t\t\t\t\tDict=0.1, lowerCAmelCase :\t\t\t\t\t\t\tList[Any]=1e-5, lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any]=0.02, lowerCAmelCase :\t\t\t\t\t\t\tDict=True, lowerCAmelCase :\t\t\t\t\t\t\tint=50256, lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any]=50256, **lowerCAmelCase :\t\t\t\t\t\t\tAny, ) -> Optional[Any]:\r\n lowercase\t\t\t\t\t: List[Any]\t\t\t\t\t=\t\t\t\tvocab_size\r\n lowercase\t\t\t\t\t: Optional[Any]\t\t\t\t\t=\t\t\t\tmax_position_embeddings\r\n lowercase\t\t\t\t\t: Dict\t\t\t\t\t=\t\t\t\thidden_size\r\n lowercase\t\t\t\t\t: Optional[Any]\t\t\t\t\t=\t\t\t\tnum_layers\r\n lowercase\t\t\t\t\t: str\t\t\t\t\t=\t\t\t\tnum_heads\r\n lowercase\t\t\t\t\t: Optional[int]\t\t\t\t\t=\t\t\t\tintermediate_size\r\n lowercase\t\t\t\t\t: List[str]\t\t\t\t\t=\t\t\t\twindow_size\r\n lowercase\t\t\t\t\t: Dict\t\t\t\t\t=\t\t\t\tactivation_function\r\n lowercase\t\t\t\t\t: Dict\t\t\t\t\t=\t\t\t\tresid_dropout\r\n lowercase\t\t\t\t\t: int\t\t\t\t\t=\t\t\t\tembed_dropout\r\n lowercase\t\t\t\t\t: Optional[Any]\t\t\t\t\t=\t\t\t\tattention_dropout\r\n lowercase\t\t\t\t\t: Tuple\t\t\t\t\t=\t\t\t\tclassifier_dropout\r\n lowercase\t\t\t\t\t: Optional[int]\t\t\t\t\t=\t\t\t\tlayer_norm_epsilon\r\n lowercase\t\t\t\t\t: Dict\t\t\t\t\t=\t\t\t\tinitializer_range\r\n lowercase\t\t\t\t\t: Optional[Any]\t\t\t\t\t=\t\t\t\tuse_cache\r\n\r\n lowercase\t\t\t\t\t: Union[str, Any]\t\t\t\t\t=\t\t\t\tbos_token_id\r\n lowercase\t\t\t\t\t: int\t\t\t\t\t=\t\t\t\teos_token_id\r\n\r\n lowercase\t\t\t\t\t: str\t\t\t\t\t=\t\t\t\tattention_types\r\n lowercase\t\t\t\t\t: int\t\t\t\t\t=\t\t\t\tself.expand_attention_types_params(lowerCAmelCase )\r\n\r\n if len(self.attention_layers ) != self.num_layers:\r\n raise ValueError(\r\n 'Configuration for convolutional module is incorrect. '\r\n 'It is required that `len(config.attention_layers)` == `config.num_layers` '\r\n f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''\r\n f'''`config.num_layers = {self.num_layers}`. '''\r\n '`config.attention_layers` is prepared using `config.attention_types`. '\r\n 'Please verify the value of `config.attention_types` argument.' )\r\n\r\n super().__init__(bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase )\r\n\r\n\r\n\r\n\r\n\r\n\r\n @staticmethod\r\n def lowercase\t\t\t(\t\t\t\t\t\t\tlowerCAmelCase :\t\t\t\t\t\t\tstr ) -> Optional[Any]:\r\n lowercase\t\t\t\t\t: Dict\t\t\t\t\t=\t\t\t\t[]\r\n for item in attention_types:\r\n for _ in range(item[1] ):\r\n attentions.extend(item[0] )\r\n return attentions\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\tlowercase__ ( _UpperCAmelCase\t\t, _UpperCAmelCase\t\t, _UpperCAmelCase\t\t, _UpperCAmelCase\t\t\t) -> int:\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n import torch\r\n\r\n lowercase\t\t\t\t\t: Dict\t\t\t\t\t=\t\t\t\tinput.size()\r\n lowercase\t\t\t\t\t: Optional[int]\t\t\t\t\t=\t\t\t\tlen(_UpperCAmelCase\t\t\t)\r\n lowercase\t\t\t\t\t: str\t\t\t\t\t=\t\t\t\tshape[dimension]\r\n\r\n lowercase\t\t\t\t\t: Optional[Any]\t\t\t\t\t=\t\t\t\ttorch.arange(0\t\t, _UpperCAmelCase\t\t, _UpperCAmelCase\t\t\t)\r\n lowercase\t\t\t\t\t: List[str]\t\t\t\t\t=\t\t\t\ttorch.div(sizedim - size\t\t, _UpperCAmelCase\t\t, rounding_mode='floor'\t\t\t) + 1\r\n lowercase\t\t\t\t\t: Any\t\t\t\t\t=\t\t\t\ttorch.arange(_UpperCAmelCase\t\t\t) + low_indices[:min_length][:, None]\r\n\r\n lowercase\t\t\t\t\t: List[Any]\t\t\t\t\t=\t\t\t\t[slice(_UpperCAmelCase\t\t\t)] * rank\r\n lowercase\t\t\t\t\t: int\t\t\t\t\t=\t\t\t\tindices\r\n lowercase\t\t\t\t\t: Optional[Any]\t\t\t\t\t=\t\t\t\tinput[s]\r\n\r\n lowercase\t\t\t\t\t: str\t\t\t\t\t=\t\t\t\tlist(range(0\t\t, rank + 1\t\t\t)\t\t\t)\r\n perm.append(perm.pop(dimension + 1\t\t\t)\t\t\t)\r\n\r\n return sliced.permute(_UpperCAmelCase\t\t\t)\r\n\r\n\r\ndef \t\t\t\t\tlowercase__ ( _UpperCAmelCase\t\t, _UpperCAmelCase\t\t\t) -> Any:\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n import torch\r\n\r\n lowercase\t\t\t\t\t: int\t\t\t\t\t=\t\t\t\ttorch.arange(1\t\t, _UpperCAmelCase\t\t\t)\r\n lowercase\t\t\t\t\t: List[str]\t\t\t\t\t=\t\t\t\ttorch.remainder(_UpperCAmelCase\t\t, _UpperCAmelCase\t\t\t)\r\n lowercase\t\t\t\t\t: Optional[int]\t\t\t\t\t=\t\t\t\tremainders == 0\r\n lowercase\t\t\t\t\t: Tuple\t\t\t\t\t=\t\t\t\tcandidates[divisor_indices]\r\n lowercase\t\t\t\t\t: Any\t\t\t\t\t=\t\t\t\ttorch.max(_UpperCAmelCase\t\t\t)\r\n return largest_divisor, torch.div(_UpperCAmelCase\t\t, _UpperCAmelCase\t\t, rounding_mode='floor'\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\ta__\t\t\t\t\t\t( SCREAMING_SNAKE_CASE__ ):\r\n\r\n\r\n @property\r\n def lowercase\t\t\t(\t\t\t\t\t\t\tself :\t\t\t\t\t\t\tint ) -> Mapping[str, Mapping[int, str]]:\r\n lowercase\t\t\t\t\t: str\t\t\t\t\t=\t\t\t\tOrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )\r\n if self.use_past:\r\n self.fill_with_past_key_values_(lowerCAmelCase, direction='inputs' )\r\n lowercase\t\t\t\t\t: Dict\t\t\t\t\t=\t\t\t\t{0: 'batch', 1: 'past_sequence + sequence'}\r\n else:\r\n lowercase\t\t\t\t\t: List[str]\t\t\t\t\t=\t\t\t\t{0: 'batch', 1: 'sequence'}\r\n\r\n return common_inputs\r\n\r\n\r\n @property\r\n def lowercase\t\t\t(\t\t\t\t\t\t\tself :\t\t\t\t\t\t\tint ) -> int:\r\n return self._config.num_heads\r\n\r\n\r\n def lowercase\t\t\t(\t\t\t\t\t\t\tself :\t\t\t\t\t\t\tTuple, lowerCAmelCase :\t\t\t\t\t\t\tPreTrainedTokenizer, lowerCAmelCase :\t\t\t\t\t\t\tint = -1, lowerCAmelCase :\t\t\t\t\t\t\tint = -1, lowerCAmelCase :\t\t\t\t\t\t\tbool = False, lowerCAmelCase :\t\t\t\t\t\t\tOptional[TensorType] = None, ) -> Mapping[str, Any]:\r\n lowercase\t\t\t\t\t: Union[str, Any]\t\t\t\t\t=\t\t\t\tsuper(lowerCAmelCase, self ).generate_dummy_inputs(\r\n lowerCAmelCase, batch_size=lowerCAmelCase, seq_length=lowerCAmelCase, is_pair=lowerCAmelCase, framework=lowerCAmelCase )\r\n\r\n # We need to order the input in the way they appears in the forward()\r\n lowercase\t\t\t\t\t: int\t\t\t\t\t=\t\t\t\tOrderedDict({'input_ids': common_inputs['input_ids']} )\r\n\r\n # Need to add the past_keys\r\n if self.use_past:\r\n if not is_torch_available():\r\n raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )\r\n else:\r\n import torch\r\n\r\n lowercase\t\t, lowercase\t\t\t\t\t: str\t\t\t\t\t=\t\t\t\tcommon_inputs['input_ids'].shape\r\n # Not using the same length for past_key_values\r\n lowercase\t\t\t\t\t: Tuple\t\t\t\t\t=\t\t\t\tseqlen + 2\r\n lowercase\t\t\t\t\t: Tuple\t\t\t\t\t=\t\t\t\t(\r\n batch,\r\n self.num_attention_heads,\r\n past_key_values_length,\r\n self._config.hidden_size // self.num_attention_heads,\r\n )\r\n lowercase\t\t\t\t\t: Any\t\t\t\t\t=\t\t\t\t[\r\n (torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(self.num_layers )\r\n ]\r\n\r\n lowercase\t\t\t\t\t: Optional[int]\t\t\t\t\t=\t\t\t\tcommon_inputs['attention_mask']\r\n if self.use_past:\r\n lowercase\t\t\t\t\t: Optional[int]\t\t\t\t\t=\t\t\t\tordered_inputs['attention_mask'].dtype\r\n lowercase\t\t\t\t\t: Dict\t\t\t\t\t=\t\t\t\ttorch.cat(\r\n [ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase, lowerCAmelCase, dtype=lowerCAmelCase )], dim=1 )\r\n\r\n return ordered_inputs\r\n\r\n\r\n\r\n\r\n\r\n\r\n @property\r\n def lowercase\t\t\t(\t\t\t\t\t\t\tself :\t\t\t\t\t\t\tint ) -> int:\r\n return 13\r\n"},"style_context_codestyle":{"kind":"number","value":53,"string":"53"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":734,"cells":{"code":{"kind":"string","value":"\r\r\r\r'''simple docstring'''\r\r\r\r\r\rfrom typing import Dict\r\rimport numpy as np\r\rfrom ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging\rfrom .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException\r\r\rif is_tf_available():\r import tensorflow as tf\r\r from ..tf_utils import stable_softmax\r\r\rif is_torch_available():\r import torch\r\r\r__a =\t\tlogging.get_logger(__name__)\r\r\r\r\r\r\r\r@add_end_docstrings(\r _a\t\t,\t\tr\"\\n top_k (`int`, defaults to 5):\\n The number of predictions to return.\\n targets (`str` or `List[str]`, *optional*):\\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\\n token will be used (with a warning, and that might be slower).\\n\\n \"\t\t,\t\t)\rclass UpperCAmelCase_ ( _a\t):\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r def lowerCamelCase ( self\t\t\t\t\t: Union[str, Any] , snake_case_\t\t\t\t\t: GenericTensor\t\t\t\t\t\t):\r if self.framework == \"tf\":\r snake_case__ : Optional[Any] \t\t\t\t\t\t= tf.where(input_ids == self.tokenizer.mask_token_id\t\t\t\t\t\t).numpy()\r elif self.framework == \"pt\":\r snake_case__ : Tuple \t\t\t\t\t\t= torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_\t\t\t\t\t\t)\r else:\r raise ValueError(\"\"\"Unsupported framework\"\"\"\t\t\t\t\t\t)\r return masked_index\r\r\r\r\r def lowerCamelCase ( self\t\t\t\t\t: Optional[Any] , snake_case_\t\t\t\t\t: GenericTensor\t\t\t\t\t\t):\r snake_case__ : List[Any] \t\t\t\t\t\t= self.get_masked_index(snake_case_\t\t\t\t\t\t)\r snake_case__ : List[Any] \t\t\t\t\t\t= np.prod(masked_index.shape\t\t\t\t\t\t)\r if numel < 1:\r raise PipelineException(\r \"\"\"fill-mask\"\"\" , self.model.base_model_prefix , f\"No mask_token ({self.tokenizer.mask_token}) found on the input\" , )\r\r\r\r\r def lowerCamelCase ( self\t\t\t\t\t: Tuple , snake_case_\t\t\t\t\t: GenericTensor\t\t\t\t\t\t):\r if isinstance(snake_case_ , snake_case_\t\t\t\t\t\t):\r for model_input in model_inputs:\r self._ensure_exactly_one_mask_token(model_input[\"\"\"input_ids\"\"\"][0]\t\t\t\t\t\t)\r else:\r for input_ids in model_inputs[\"input_ids\"]:\r self._ensure_exactly_one_mask_token(snake_case_\t\t\t\t\t\t)\r\r\r\r\r def lowerCamelCase ( self\t\t\t\t\t: List[Any] , snake_case_\t\t\t\t\t: Any , snake_case_\t\t\t\t\t: Optional[int]=None , **snake_case_\t\t\t\t\t: Optional[Any]\t\t\t\t\t\t):\r if return_tensors is None:\r snake_case__ : Tuple \t\t\t\t\t\t= self.framework\r snake_case__ : Optional[Any] \t\t\t\t\t\t= self.tokenizer(snake_case_ , return_tensors=snake_case_\t\t\t\t\t\t)\r self.ensure_exactly_one_mask_token(snake_case_\t\t\t\t\t\t)\r return model_inputs\r\r\r\r\r def lowerCamelCase ( self\t\t\t\t\t: str , snake_case_\t\t\t\t\t: str\t\t\t\t\t\t):\r snake_case__ : Union[str, Any] \t\t\t\t\t\t= self.model(**snake_case_\t\t\t\t\t\t)\r snake_case__ : Dict \t\t\t\t\t\t= model_inputs[\"\"\"input_ids\"\"\"]\r return model_outputs\r\r\r\r\r def lowerCamelCase ( self\t\t\t\t\t: Union[str, Any] , snake_case_\t\t\t\t\t: Union[str, Any] , snake_case_\t\t\t\t\t: str=5 , snake_case_\t\t\t\t\t: List[Any]=None\t\t\t\t\t\t):\r # Cap top_k if there are targets\r if target_ids is not None and target_ids.shape[0] < top_k:\r snake_case__ : Any \t\t\t\t\t\t= target_ids.shape[0]\r snake_case__ : List[Any] \t\t\t\t\t\t= model_outputs[\"\"\"input_ids\"\"\"][0]\r snake_case__ : Optional[Any] \t\t\t\t\t\t= model_outputs[\"\"\"logits\"\"\"]\r\r if self.framework == \"tf\":\r snake_case__ : Optional[Any] \t\t\t\t\t\t= tf.where(input_ids == self.tokenizer.mask_token_id\t\t\t\t\t\t).numpy()[:, 0]\r\r snake_case__ : Optional[int] \t\t\t\t\t\t= outputs.numpy()\r\r snake_case__ : Optional[int] \t\t\t\t\t\t= outputs[0, masked_index, :]\r snake_case__ : Optional[int] \t\t\t\t\t\t= stable_softmax(snake_case_ , axis=-1\t\t\t\t\t\t)\r if target_ids is not None:\r snake_case__ : Optional[Any] \t\t\t\t\t\t= tf.gather_nd(tf.squeeze(snake_case_ , 0\t\t\t\t\t\t) , target_ids.reshape(-1 , 1\t\t\t\t\t\t)\t\t\t\t\t\t)\r snake_case__ : Optional[int] \t\t\t\t\t\t= tf.expand_dims(snake_case_ , 0\t\t\t\t\t\t)\r\r snake_case__ : int \t\t\t\t\t\t= tf.math.top_k(snake_case_ , k=snake_case_\t\t\t\t\t\t)\r snake_case__\t\t\t\t\t\t,\t\t\t\tsnake_case__ : Any \t\t\t\t\t\t= topk.values.numpy(), topk.indices.numpy()\r else:\r snake_case__ : List[Any] \t\t\t\t\t\t= torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_\t\t\t\t\t\t).squeeze(-1\t\t\t\t\t\t)\r # Fill mask pipeline supports only one ${mask_token} per sample\r\r snake_case__ : Tuple \t\t\t\t\t\t= outputs[0, masked_index, :]\r snake_case__ : Tuple \t\t\t\t\t\t= logits.softmax(dim=-1\t\t\t\t\t\t)\r if target_ids is not None:\r snake_case__ : List[str] \t\t\t\t\t\t= probs[..., target_ids]\r\r snake_case__\t\t\t\t\t\t,\t\t\t\tsnake_case__ : List[str] \t\t\t\t\t\t= probs.topk(snake_case_\t\t\t\t\t\t)\r\r snake_case__ : Tuple \t\t\t\t\t\t= []\r snake_case__ : List[str] \t\t\t\t\t\t= values.shape[0] == 1\r for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist()\t\t\t\t\t\t)\t\t\t\t\t\t):\r snake_case__ : Union[str, Any] \t\t\t\t\t\t= []\r for v, p in zip(_values , _predictions\t\t\t\t\t\t):\r # Copy is important since we're going to modify this array in place\r snake_case__ : Dict \t\t\t\t\t\t= input_ids.numpy().copy()\r if target_ids is not None:\r snake_case__ : Any \t\t\t\t\t\t= target_ids[p].tolist()\r\r snake_case__ : Union[str, Any] \t\t\t\t\t\t= p\r # Filter padding out:\r snake_case__ : List[str] \t\t\t\t\t\t= tokens[np.where(tokens != self.tokenizer.pad_token_id\t\t\t\t\t\t)]\r # Originally we skip special tokens to give readable output.\r # For multi masks though, the other [MASK] would be removed otherwise\r # making the output look odd, so we add them back\r snake_case__ : str \t\t\t\t\t\t= self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_\t\t\t\t\t\t)\r snake_case__ : Union[str, Any] \t\t\t\t\t\t= {\"\"\"score\"\"\": v, \"\"\"token\"\"\": p, \"\"\"token_str\"\"\": self.tokenizer.decode([p]\t\t\t\t\t\t), \"\"\"sequence\"\"\": sequence}\r row.append(snake_case_\t\t\t\t\t\t)\r result.append(snake_case_\t\t\t\t\t\t)\r if single_mask:\r return result[0]\r return result\r\r\r\r\r def lowerCamelCase ( self\t\t\t\t\t: int , snake_case_\t\t\t\t\t: Any , snake_case_\t\t\t\t\t: str=None\t\t\t\t\t\t):\r if isinstance(snake_case_ , snake_case_\t\t\t\t\t\t):\r snake_case__ : Union[str, Any] \t\t\t\t\t\t= [targets]\r try:\r snake_case__ : Any \t\t\t\t\t\t= self.tokenizer.get_vocab()\r except Exception:\r snake_case__ : str \t\t\t\t\t\t= {}\r snake_case__ : List[Any] \t\t\t\t\t\t= []\r for target in targets:\r snake_case__ : List[str] \t\t\t\t\t\t= vocab.get(snake_case_ , snake_case_\t\t\t\t\t\t)\r if id_ is None:\r snake_case__ : int \t\t\t\t\t\t= self.tokenizer(\r snake_case_ , add_special_tokens=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , max_length=1 , truncation=snake_case_ , )[\"\"\"input_ids\"\"\"]\r if len(snake_case_\t\t\t\t\t\t) == 0:\r logger.warning(\r f\"The specified target token `{target}` does not exist in the model vocabulary. \"\r \"\"\"We cannot replace it with anything meaningful, ignoring it\"\"\"\t\t\t\t\t\t)\r continue\r snake_case__ : Optional[Any] \t\t\t\t\t\t= input_ids[0]\r # XXX: If users encounter this pass\r # it becomes pretty slow, so let's make sure\r # The warning enables them to fix the input to\r # get faster performance.\r logger.warning(\r f\"The specified target token `{target}` does not exist in the model vocabulary. \"\r f\"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_\t\t\t\t\t\t)}`.\"\t\t\t\t\t\t)\r target_ids.append(id_\t\t\t\t\t\t)\r snake_case__ : Optional[Any] \t\t\t\t\t\t= list(set(snake_case_\t\t\t\t\t\t)\t\t\t\t\t\t)\r if len(snake_case_\t\t\t\t\t\t) == 0:\r raise ValueError(\"\"\"At least one target must be provided when passed.\"\"\"\t\t\t\t\t\t)\r snake_case__ : Dict \t\t\t\t\t\t= np.array(snake_case_\t\t\t\t\t\t)\r return target_ids\r\r\r\r\r def lowerCamelCase ( self\t\t\t\t\t: Union[str, Any] , snake_case_\t\t\t\t\t: Tuple=None , snake_case_\t\t\t\t\t: Union[str, Any]=None\t\t\t\t\t\t):\r snake_case__ : Union[str, Any] \t\t\t\t\t\t= {}\r\r if targets is not None:\r snake_case__ : List[str] \t\t\t\t\t\t= self.get_target_ids(snake_case_ , snake_case_\t\t\t\t\t\t)\r snake_case__ : Union[str, Any] \t\t\t\t\t\t= target_ids\r\r if top_k is not None:\r snake_case__ : Optional[int] \t\t\t\t\t\t= top_k\r\r if self.tokenizer.mask_token_id is None:\r raise PipelineException(\r \"\"\"fill-mask\"\"\" , self.model.base_model_prefix , \"\"\"The tokenizer does not define a `mask_token`.\"\"\"\t\t\t\t\t\t)\r return {}, {}, postprocess_params\r\r\r\r\r def __call__( self\t\t\t\t\t: List[str] , snake_case_\t\t\t\t\t: Union[str, Any] , *snake_case_\t\t\t\t\t: Tuple , **snake_case_\t\t\t\t\t: List[Any]\t\t\t\t\t\t):\r snake_case__ : Optional[int] \t\t\t\t\t\t= super().__call__(snake_case_ , **snake_case_\t\t\t\t\t\t)\r if isinstance(snake_case_ , snake_case_\t\t\t\t\t\t) and len(snake_case_\t\t\t\t\t\t) == 1:\r return outputs[0]\r return outputs\r"},"code_codestyle":{"kind":"number","value":35,"string":"35"},"style_context":{"kind":"string","value":"'''simple docstring'''\r\r\r\r\rfrom typing import TYPE_CHECKING\r\rfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available\r\r\r__UpperCAmelCase \t\t\t\t\t\t\t={\"configuration_vit_msn\": [\"VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP\", \"ViTMSNConfig\"]}\r\rtry:\r if not is_torch_available():\r raise OptionalDependencyNotAvailable()\rexcept OptionalDependencyNotAvailable:\r pass\relse:\r __UpperCAmelCase \t\t\t\t\t\t\t=[\r \"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST\",\r \"ViTMSNModel\",\r \"ViTMSNForImageClassification\",\r \"ViTMSNPreTrainedModel\",\r ]\r\rif TYPE_CHECKING:\r from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig\r\r try:\r if not is_torch_available():\r raise OptionalDependencyNotAvailable()\r except OptionalDependencyNotAvailable:\r pass\r else:\r from .modeling_vit_msn import (\r VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,\r ViTMSNForImageClassification,\r ViTMSNModel,\r ViTMSNPreTrainedModel,\r )\r\relse:\r import sys\r\r __UpperCAmelCase \t\t\t\t\t\t\t=_LazyModule(__name__, globals()[\"__file__\"], _import_structure, module_spec=__spec__)\r"},"style_context_codestyle":{"kind":"number","value":67,"string":"67"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":735,"cells":{"code":{"kind":"string","value":"from typing import Optional\r\n\r\nimport torch\r\nimport torch.utils.checkpoint\r\nfrom torch import Tensor, nn\r\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\r\n\r\nfrom ...activations import ACTaFN\r\nfrom ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward\r\nfrom ...modeling_outputs import (\r\n BaseModelOutputWithNoAttention,\r\n BaseModelOutputWithPoolingAndNoAttention,\r\n ImageClassifierOutputWithNoAttention,\r\n)\r\nfrom ...modeling_utils import PreTrainedModel\r\nfrom ...utils import logging\r\nfrom .configuration_regnet import RegNetConfig\r\n\r\n\r\nUpperCamelCase__ \t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n# General docstring\r\nUpperCamelCase__ \t\t\t\t\t\t\t= 'RegNetConfig'\r\n\r\n# Base docstring\r\nUpperCamelCase__ \t\t\t\t\t\t\t= 'facebook/regnet-y-040'\r\nUpperCamelCase__ \t\t\t\t\t\t\t= [1, 1_0_8_8, 7, 7]\r\n\r\n# Image classification docstring\r\nUpperCamelCase__ \t\t\t\t\t\t\t= 'facebook/regnet-y-040'\r\nUpperCamelCase__ \t\t\t\t\t\t\t= 'tabby, tabby cat'\r\n\r\nUpperCamelCase__ \t\t\t\t\t\t\t= [\r\n 'facebook/regnet-y-040',\r\n # See all regnet models at https://huggingface.co/models?filter=regnet\r\n]\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\tA\t\t\t\t\t\t\t( nn.Module\t\t\t\t\t):\r\n\r\n\r\n def __init__(self\t\t\t\t\t:\t\t\t\t\t\tOptional[Any]\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint = 3\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint = 1\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint = 1\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tOptional[str] = \"relu\"\t, )\t\t\t\t\t\t-> Any:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n super().__init__()\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= nn.Convad(\r\n __UpperCAmelCase\t, __UpperCAmelCase\t, kernel_size=__UpperCAmelCase\t, stride=__UpperCAmelCase\t, padding=kernel_size // 2\t, groups=__UpperCAmelCase\t, bias=__UpperCAmelCase\t, )\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= nn.BatchNormad(__UpperCAmelCase\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= ACTaFN[activation] if activation is not None else nn.Identity()\r\n\r\n\r\n\r\n\r\n\r\n def lowercase_\t\t\t\t(self\t\t\t\t\t:\t\t\t\t\t\tstr\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint\t\t)\t\t\t\t\t\t-> Tuple:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= self.convolution(__UpperCAmelCase\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= self.normalization(__UpperCAmelCase\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= self.activation(__UpperCAmelCase\t\t)\r\n return hidden_state\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\tA\t\t\t\t\t\t\t( nn.Module\t\t\t\t\t):\r\n\r\n\r\n def __init__(self\t\t\t\t\t:\t\t\t\t\t\tDict\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tRegNetConfig\t\t)\t\t\t\t\t\t-> Optional[int]:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n super().__init__()\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= RegNetConvLayer(\r\n config.num_channels\t, config.embedding_size\t, kernel_size=3\t, stride=2\t, activation=config.hidden_act\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= config.num_channels\r\n\r\n\r\n\r\n\r\n\r\n def lowercase_\t\t\t\t(self\t\t\t\t\t:\t\t\t\t\t\tList[Any]\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tOptional[Any]\t\t)\t\t\t\t\t\t-> str:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= pixel_values.shape[1]\r\n if num_channels != self.num_channels:\r\n raise ValueError(\r\n \"Make sure that the channel dimension of the pixel values match with the one set in the configuration.\"\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= self.embedder(__UpperCAmelCase\t\t)\r\n return hidden_state\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\tA\t\t\t\t\t\t\t( nn.Module\t\t\t\t\t):\r\n\r\n\r\n def __init__(self\t\t\t\t\t:\t\t\t\t\t\tOptional[int]\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint = 2\t\t)\t\t\t\t\t\t-> List[str]:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n super().__init__()\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= nn.Convad(__UpperCAmelCase\t, __UpperCAmelCase\t, kernel_size=1\t, stride=__UpperCAmelCase\t, bias=__UpperCAmelCase\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= nn.BatchNormad(__UpperCAmelCase\t\t)\r\n\r\n\r\n\r\n\r\n\r\n def lowercase_\t\t\t\t(self\t\t\t\t\t:\t\t\t\t\t\tOptional[int]\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tTensor\t\t)\t\t\t\t\t\t-> Tensor:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= self.convolution(__UpperCAmelCase\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= self.normalization(__UpperCAmelCase\t\t)\r\n return hidden_state\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\tA\t\t\t\t\t\t\t( nn.Module\t\t\t\t\t):\r\n\r\n\r\n def __init__(self\t\t\t\t\t:\t\t\t\t\t\tOptional[int]\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint\t\t)\t\t\t\t\t\t-> Tuple:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n super().__init__()\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= nn.AdaptiveAvgPoolad((1, 1)\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= nn.Sequential(\r\n nn.Convad(__UpperCAmelCase\t, __UpperCAmelCase\t, kernel_size=1\t\t)\t, nn.ReLU()\t, nn.Convad(__UpperCAmelCase\t, __UpperCAmelCase\t, kernel_size=1\t\t)\t, nn.Sigmoid()\t, )\r\n\r\n\r\n\r\n\r\n\r\n def lowercase_\t\t\t\t(self\t\t\t\t\t:\t\t\t\t\t\tint\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tOptional[int]\t\t)\t\t\t\t\t\t-> str:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= self.pooler(__UpperCAmelCase\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= self.attention(__UpperCAmelCase\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= hidden_state * attention\r\n return hidden_state\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\tA\t\t\t\t\t\t\t( nn.Module\t\t\t\t\t):\r\n\r\n\r\n def __init__(self\t\t\t\t\t:\t\t\t\t\t\tstr\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tRegNetConfig\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint = 1\t\t)\t\t\t\t\t\t-> Optional[Any]:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n super().__init__()\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= in_channels != out_channels or stride != 1\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= max(1\t, out_channels // config.groups_width\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= (\r\n RegNetShortCut(__UpperCAmelCase\t, __UpperCAmelCase\t, stride=__UpperCAmelCase\t\t) if should_apply_shortcut else nn.Identity()\r\n )\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= nn.Sequential(\r\n RegNetConvLayer(__UpperCAmelCase\t, __UpperCAmelCase\t, kernel_size=1\t, activation=config.hidden_act\t\t)\t, RegNetConvLayer(__UpperCAmelCase\t, __UpperCAmelCase\t, stride=__UpperCAmelCase\t, groups=__UpperCAmelCase\t, activation=config.hidden_act\t\t)\t, RegNetConvLayer(__UpperCAmelCase\t, __UpperCAmelCase\t, kernel_size=1\t, activation=__UpperCAmelCase\t\t)\t, )\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= ACTaFN[config.hidden_act]\r\n\r\n\r\n\r\n\r\n\r\n def lowercase_\t\t\t\t(self\t\t\t\t\t:\t\t\t\t\t\tOptional[int]\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tOptional[int]\t\t)\t\t\t\t\t\t-> int:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= hidden_state\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= self.layer(__UpperCAmelCase\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= self.shortcut(__UpperCAmelCase\t\t)\r\n hidden_state += residual\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= self.activation(__UpperCAmelCase\t\t)\r\n return hidden_state\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\tA\t\t\t\t\t\t\t( nn.Module\t\t\t\t\t):\r\n\r\n\r\n def __init__(self\t\t\t\t\t:\t\t\t\t\t\tstr\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tRegNetConfig\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint = 1\t\t)\t\t\t\t\t\t-> Optional[int]:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n super().__init__()\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= in_channels != out_channels or stride != 1\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= max(1\t, out_channels // config.groups_width\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= (\r\n RegNetShortCut(__UpperCAmelCase\t, __UpperCAmelCase\t, stride=__UpperCAmelCase\t\t) if should_apply_shortcut else nn.Identity()\r\n )\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= nn.Sequential(\r\n RegNetConvLayer(__UpperCAmelCase\t, __UpperCAmelCase\t, kernel_size=1\t, activation=config.hidden_act\t\t)\t, RegNetConvLayer(__UpperCAmelCase\t, __UpperCAmelCase\t, stride=__UpperCAmelCase\t, groups=__UpperCAmelCase\t, activation=config.hidden_act\t\t)\t, RegNetSELayer(__UpperCAmelCase\t, reduced_channels=int(round(in_channels / 4\t\t)\t\t)\t\t)\t, RegNetConvLayer(__UpperCAmelCase\t, __UpperCAmelCase\t, kernel_size=1\t, activation=__UpperCAmelCase\t\t)\t, )\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= ACTaFN[config.hidden_act]\r\n\r\n\r\n\r\n\r\n\r\n def lowercase_\t\t\t\t(self\t\t\t\t\t:\t\t\t\t\t\tList[str]\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint\t\t)\t\t\t\t\t\t-> str:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= hidden_state\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= self.layer(__UpperCAmelCase\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= self.shortcut(__UpperCAmelCase\t\t)\r\n hidden_state += residual\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= self.activation(__UpperCAmelCase\t\t)\r\n return hidden_state\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\tA\t\t\t\t\t\t\t( nn.Module\t\t\t\t\t):\r\n\r\n\r\n def __init__(self\t\t\t\t\t:\t\t\t\t\t\tTuple\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tRegNetConfig\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint = 2\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint = 2\t, )\t\t\t\t\t\t-> Any:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n super().__init__()\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= RegNetXLayer if config.layer_type == \"x\" else RegNetYLayer\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= nn.Sequential(\r\n # downsampling is done in the first layer with stride of 2\r\n layer(\r\n __UpperCAmelCase\t, __UpperCAmelCase\t, __UpperCAmelCase\t, stride=__UpperCAmelCase\t, )\t, *[layer(__UpperCAmelCase\t, __UpperCAmelCase\t, __UpperCAmelCase\t\t) for _ in range(depth - 1\t\t)]\t, )\r\n\r\n\r\n\r\n\r\n\r\n def lowercase_\t\t\t\t(self\t\t\t\t\t:\t\t\t\t\t\tTuple\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tOptional[int]\t\t)\t\t\t\t\t\t-> Tuple:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= self.layers(__UpperCAmelCase\t\t)\r\n return hidden_state\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\tA\t\t\t\t\t\t\t( nn.Module\t\t\t\t\t):\r\n\r\n\r\n def __init__(self\t\t\t\t\t:\t\t\t\t\t\tstr\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tRegNetConfig\t\t)\t\t\t\t\t\t-> Tuple:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n super().__init__()\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= nn.ModuleList([]\t\t)\r\n # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input\r\n self.stages.append(\r\n RegNetStage(\r\n __UpperCAmelCase\t, config.embedding_size\t, config.hidden_sizes[0]\t, stride=2 if config.downsample_in_first_stage else 1\t, depth=config.depths[0]\t, )\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= zip(config.hidden_sizes\t, config.hidden_sizes[1:]\t\t)\r\n for (in_channels, out_channels), depth in zip(__UpperCAmelCase\t, config.depths[1:]\t\t):\r\n self.stages.append(RegNetStage(__UpperCAmelCase\t, __UpperCAmelCase\t, __UpperCAmelCase\t, depth=__UpperCAmelCase\t\t)\t\t)\r\n\r\n\r\n\r\n\r\n\r\n def lowercase_\t\t\t\t(self\t\t\t\t\t:\t\t\t\t\t\tList[Any]\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tTensor\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tbool = False\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tbool = True\t\t)\t\t\t\t\t\t-> BaseModelOutputWithNoAttention:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= () if output_hidden_states else None\r\n\r\n for stage_module in self.stages:\r\n if output_hidden_states:\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= hidden_states + (hidden_state,)\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= stage_module(__UpperCAmelCase\t\t)\r\n\r\n if output_hidden_states:\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= hidden_states + (hidden_state,)\r\n\r\n if not return_dict:\r\n return tuple(v for v in [hidden_state, hidden_states] if v is not None\t\t)\r\n\r\n return BaseModelOutputWithNoAttention(last_hidden_state=__UpperCAmelCase\t, hidden_states=__UpperCAmelCase\t\t)\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\tA\t\t\t\t\t\t\t( UpperCAmelCase_\t\t\t\t\t):\r\n __UpperCAmelCase\t\t\t:\t\t\tUnion[str, Any] = RegNetConfig\r\n __UpperCAmelCase\t\t\t:\t\t\tDict = 'regnet'\r\n __UpperCAmelCase\t\t\t:\t\t\tList[Any] = 'pixel_values'\r\n __UpperCAmelCase\t\t\t:\t\t\tTuple = True\r\n\r\n\r\n def lowercase_\t\t\t\t(self\t\t\t\t\t:\t\t\t\t\t\tDict\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tList[str]\t\t)\t\t\t\t\t\t-> Optional[Any]:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n if isinstance(__UpperCAmelCase\t, nn.Convad\t\t):\r\n nn.init.kaiming_normal_(module.weight\t, mode=\"fan_out\"\t, nonlinearity=\"relu\"\t\t)\r\n elif isinstance(__UpperCAmelCase\t, (nn.BatchNormad, nn.GroupNorm)\t\t):\r\n nn.init.constant_(module.weight\t, 1\t\t)\r\n nn.init.constant_(module.bias\t, 0\t\t)\r\n\r\n\r\n\r\n\r\n\r\n def lowercase_\t\t\t\t(self\t\t\t\t\t:\t\t\t\t\t\tDict\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tint\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tstr=False\t\t)\t\t\t\t\t\t-> int:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n if isinstance(__UpperCAmelCase\t, __UpperCAmelCase\t\t):\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= value\r\n\r\n\r\n\r\n\r\nUpperCamelCase__ \t\t\t\t\t\t\t= R'\\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\\n behavior.\\n\\n Parameters:\\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\\n Initializing with a config file does not load the weights associated with the model, only the\\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\\n'\r\n\r\nUpperCamelCase__ \t\t\t\t\t\t\t= R'\\n Args:\\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\\n [`ConvNextImageProcessor.__call__`] for details.\\n\\n output_hidden_states (`bool`, *optional*):\\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\\n more detail.\\n return_dict (`bool`, *optional*):\\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\\n'\r\n\r\n\r\n\r\n\r\n\r\n@add_start_docstrings(\r\n 'The bare RegNet model outputting raw features without any specific head on top.'\t\t, UpperCAmelCase_\t\t, )\r\n# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet\r\nclass \t\t\t\t\tA\t\t\t\t\t\t\t( UpperCAmelCase_\t\t\t\t\t):\r\n\r\n\r\n def __init__(self\t\t\t\t\t:\t\t\t\t\t\tUnion[str, Any]\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tList[str]\t\t)\t\t\t\t\t\t-> List[str]:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n super().__init__(__UpperCAmelCase\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= config\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= RegNetEmbeddings(__UpperCAmelCase\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= RegNetEncoder(__UpperCAmelCase\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= nn.AdaptiveAvgPoolad((1, 1)\t\t)\r\n # Initialize weights and apply final processing\r\n self.post_init()\r\n\r\n\r\n\r\n\r\n\r\n @add_start_docstrings_to_model_forward(__UpperCAmelCase\t\t)\r\n @add_code_sample_docstrings(\r\n checkpoint=_CHECKPOINT_FOR_DOC\t, output_type=__UpperCAmelCase\t, config_class=_CONFIG_FOR_DOC\t, modality=\"vision\"\t, expected_output=_EXPECTED_OUTPUT_SHAPE\t, )\r\n def lowercase_\t\t\t\t(self\t\t\t\t\t:\t\t\t\t\t\tDict\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tTensor\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tOptional[bool] = None\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tOptional[bool] = None\t\t)\t\t\t\t\t\t-> BaseModelOutputWithPoolingAndNoAttention:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= (\r\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\r\n )\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= return_dict if return_dict is not None else self.config.use_return_dict\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= self.embedder(__UpperCAmelCase\t\t)\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= self.encoder(\r\n __UpperCAmelCase\t, output_hidden_states=__UpperCAmelCase\t, return_dict=__UpperCAmelCase\t\t)\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= encoder_outputs[0]\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= self.pooler(__UpperCAmelCase\t\t)\r\n\r\n if not return_dict:\r\n return (last_hidden_state, pooled_output) + encoder_outputs[1:]\r\n\r\n return BaseModelOutputWithPoolingAndNoAttention(\r\n last_hidden_state=__UpperCAmelCase\t, pooler_output=__UpperCAmelCase\t, hidden_states=encoder_outputs.hidden_states\t, )\r\n\r\n\r\n\r\n\r\n\r\n@add_start_docstrings(\r\n '\\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\\n ImageNet.\\n '\t\t, UpperCAmelCase_\t\t, )\r\n# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet\r\nclass \t\t\t\t\tA\t\t\t\t\t\t\t( UpperCAmelCase_\t\t\t\t\t):\r\n\r\n\r\n def __init__(self\t\t\t\t\t:\t\t\t\t\t\tUnion[str, Any]\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tstr\t\t)\t\t\t\t\t\t-> Tuple:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n super().__init__(__UpperCAmelCase\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= config.num_labels\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= RegNetModel(__UpperCAmelCase\t\t)\r\n # classification head\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= nn.Sequential(\r\n nn.Flatten()\t, nn.Linear(config.hidden_sizes[-1]\t, config.num_labels\t\t) if config.num_labels > 0 else nn.Identity()\t, )\r\n # initialize weights and apply final processing\r\n self.post_init()\r\n\r\n\r\n\r\n\r\n\r\n @add_start_docstrings_to_model_forward(__UpperCAmelCase\t\t)\r\n @add_code_sample_docstrings(\r\n checkpoint=_IMAGE_CLASS_CHECKPOINT\t, output_type=__UpperCAmelCase\t, config_class=_CONFIG_FOR_DOC\t, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT\t, )\r\n def lowercase_\t\t\t\t(self\t\t\t\t\t:\t\t\t\t\t\tint\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tOptional[torch.FloatTensor] = None\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tOptional[torch.LongTensor] = None\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tOptional[bool] = None\t, __UpperCAmelCase\t\t\t\t\t:\t\t\t\t\t\tOptional[bool] = None\t, )\t\t\t\t\t\t-> ImageClassifierOutputWithNoAttention:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= return_dict if return_dict is not None else self.config.use_return_dict\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= self.regnet(__UpperCAmelCase\t, output_hidden_states=__UpperCAmelCase\t, return_dict=__UpperCAmelCase\t\t)\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= outputs.pooler_output if return_dict else outputs[1]\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= self.classifier(__UpperCAmelCase\t\t)\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= None\r\n\r\n if labels is not None:\r\n if self.config.problem_type is None:\r\n if self.num_labels == 1:\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= \"regression\"\r\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= \"single_label_classification\"\r\n else:\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= \"multi_label_classification\"\r\n if self.config.problem_type == \"regression\":\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= MSELoss()\r\n if self.num_labels == 1:\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= loss_fct(logits.squeeze()\t, labels.squeeze()\t\t)\r\n else:\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= loss_fct(__UpperCAmelCase\t, __UpperCAmelCase\t\t)\r\n elif self.config.problem_type == \"single_label_classification\":\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= CrossEntropyLoss()\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= loss_fct(logits.view(-1\t, self.num_labels\t\t)\t, labels.view(-1\t\t)\t\t)\r\n elif self.config.problem_type == \"multi_label_classification\":\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= BCEWithLogitsLoss()\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= loss_fct(__UpperCAmelCase\t, __UpperCAmelCase\t\t)\r\n\r\n if not return_dict:\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= (logits,) + outputs[2:]\r\n return (loss,) + output if loss is not None else output\r\n\r\n return ImageClassifierOutputWithNoAttention(loss=__UpperCAmelCase\t, logits=__UpperCAmelCase\t, hidden_states=outputs.hidden_states\t\t)\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":143,"string":"143"},"style_context":{"kind":"string","value":"import argparse\r\nimport logging\r\nimport os\r\n\r\nimport datasets\r\nimport tensorflow as tf\r\n\r\nfrom transformers import AutoTokenizer\r\n\r\n\r\nUpperCamelCase__ \t\t\t\t\t\t\t= logging.getLogger(__name__)\r\n\r\n\r\n\r\n\r\ndef lowerCAmelCase_\t\t\t(\t\t\t\t\t)\t\t\t\t\t-> Union[str, Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= argparse.ArgumentParser(\r\n description=\"Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.\"\t\t\t\t\t\t\t)\r\n parser.add_argument(\r\n \"--dataset_name\", type=__A, default=\"wikitext\", help=\"Name of the training. Explore datasets at: hf.co/datasets.\", )\r\n parser.add_argument(\r\n \"--dataset_config\", type=__A, default=\"wikitext-103-raw-v1\", help=\"Configuration name of the dataset.\"\t\t\t\t\t\t\t)\r\n parser.add_argument(\r\n \"--tokenizer_name_or_path\", type=__A, default=\"sayakpaul/unigram-tokenizer-wikitext\", help=\"Tokenizer identifier. Can be a local filepath or a Hub identifier.\", )\r\n parser.add_argument(\r\n \"--shard_size\", type=__A, default=1_000, help=\"Number of entries to go in a single shard.\", )\r\n parser.add_argument(\"--split\", type=__A, default=\"train\", choices=[\"train\", \"test\", \"validation\"]\t\t\t\t\t\t\t)\r\n parser.add_argument(\r\n \"--limit\", default=__A, type=__A, help=\"Limit the number of shards (used for debugging).\", )\r\n parser.add_argument(\r\n \"--max_length\", type=__A, default=512, help=\"Maximum sequence length. For training on TPUs, it helps to have a maximum\"\r\n \" sequence length that is a multiple of 8.\", )\r\n parser.add_argument(\r\n \"--output_dir\", default=\"tf-tpu\", type=__A, help=\"Output directory where the TFRecord shards will be saved. If the\"\r\n \" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord\"\r\n \" shards will be directly saved to a Google Cloud Storage bucket.\", )\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= parser.parse_args()\r\n return args\r\n\r\n\r\n\r\n\r\ndef lowerCAmelCase_\t\t\t(\t\t\t\t\t__A\t\t\t\t\t\t\t)\t\t\t\t\t-> Optional[int]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n def fn(__A\t\t\t\t\t\t\t):\r\n return tokenizer(examples[\"text\"]\t\t\t\t\t\t\t)\r\n\r\n return fn\r\n\r\n\r\n\r\n\r\ndef lowerCAmelCase_\t\t\t(\t\t\t\t\t__A\t\t\t\t\t\t\t)\t\t\t\t\t-> List[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= []\r\n for i in range(len(tokenized_data[\"input_ids\"]\t\t\t\t\t\t\t)\t\t\t\t\t\t\t):\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= {\r\n \"input_ids\": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data[\"input_ids\"][i]\t\t\t\t\t\t\t)\t\t\t\t\t\t\t),\r\n \"attention_mask\": tf.train.Feature(\r\n intaa_list=tf.train.IntaaList(value=tokenized_data[\"attention_mask\"][i]\t\t\t\t\t\t\t)\t\t\t\t\t\t\t),\r\n }\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= tf.train.Features(feature=__A\t\t\t\t\t\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= tf.train.Example(features=__A\t\t\t\t\t\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= example.SerializeToString()\r\n records.append(__A\t\t\t\t\t\t\t)\r\n return records\r\n\r\n\r\n\r\n\r\ndef lowerCAmelCase_\t\t\t(\t\t\t\t\t__A\t\t\t\t\t\t\t)\t\t\t\t\t-> Union[str, Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split\t\t\t\t\t\t\t)\r\n\r\n if args.limit is not None:\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= min(len(__A\t\t\t\t\t\t\t), args.limit\t\t\t\t\t\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= dataset.select(range(__A\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\n print(f\"\"\"Limiting the dataset to {args.limit} entries.\"\"\"\t\t\t\t\t\t\t)\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= AutoTokenizer.from_pretrained(args.tokenizer_name_or_path\t\t\t\t\t\t\t)\r\n\r\n # Handle output directory creation.\r\n # For serializing into a Google Cloud Storage Bucket, one needs to first\r\n # create a bucket.\r\n if \"gs\" not in args.output_dir:\r\n if not os.path.exists(args.output_dir\t\t\t\t\t\t\t):\r\n os.makedirs(args.output_dir\t\t\t\t\t\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= os.path.join(args.output_dir, args.split\t\t\t\t\t\t\t)\r\n if not os.path.exists(__A\t\t\t\t\t\t\t):\r\n os.makedirs(__A\t\t\t\t\t\t\t)\r\n else:\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= os.path.join(args.output_dir, args.split\t\t\t\t\t\t\t)\r\n\r\n # Tokenize the whole dataset at once.\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= tokenize_function(__A\t\t\t\t\t\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= dataset.map(__A, batched=__A, num_proc=4, remove_columns=[\"text\"]\t\t\t\t\t\t\t)\r\n\r\n # We need to concatenate all our texts together, and then split the result\r\n # into chunks of a fixed size, which we will call block_size. To do this, we\r\n # will use the map method again, with the option batched=True. When we use batched=True,\r\n # the function we pass to map() will be passed multiple inputs at once, allowing us\r\n # to group them into more or fewer examples than we had in the input.\r\n # This allows us to create our new fixed-length samples. The advantage of this\r\n # method is that we don't lose a whole lot of content from the dataset compared to the\r\n # case where we simply tokenize with a pre-defined max_length.\r\n\r\n def group_texts(__A\t\t\t\t\t\t\t):\r\n # Concatenate all texts.\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= {k: sum(examples[k], []\t\t\t\t\t\t\t) for k in examples.keys()}\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= len(concatenated_examples[list(examples.keys()\t\t\t\t\t\t\t)[0]]\t\t\t\t\t\t\t)\r\n # We drop the small remainder, though you could add padding instead if the model supports it\r\n # In this, as in all things, we advise you to follow your heart 🫀\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= (total_length // args.max_length) * args.max_length\r\n # Split by chunks of max_len.\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= {\r\n k: [t[i : i + args.max_length] for i in range(0, __A, args.max_length\t\t\t\t\t\t\t)]\r\n for k, t in concatenated_examples.items()\r\n }\r\n return result\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= dataset_tokenized.map(__A, batched=__A, batch_size=1_000, num_proc=4\t\t\t\t\t\t\t)\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= 0\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= 0\r\n for shard in range(0, len(__A\t\t\t\t\t\t\t), args.shard_size\t\t\t\t\t\t\t):\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= grouped_dataset[shard : shard + args.shard_size]\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= len(dataset_snapshot[\"input_ids\"]\t\t\t\t\t\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= os.path.join(__A, f\"\"\"dataset-{shard_count}-{records_containing}.tfrecord\"\"\"\t\t\t\t\t\t\t)\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= get_serialized_examples(__A\t\t\t\t\t\t\t)\r\n\r\n with tf.io.TFRecordWriter(__A\t\t\t\t\t\t\t) as out_file:\r\n for i in range(len(__A\t\t\t\t\t\t\t)\t\t\t\t\t\t\t):\r\n UpperCAmelCase__\t\t\t\t\t\t\t\t= serialized_examples[i]\r\n out_file.write(__A\t\t\t\t\t\t\t)\r\n print(\"Wrote file {} containing {} records\".format(__A, __A\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\n\r\n shard_count += 1\r\n total_records += records_containing\r\n\r\n with open(f\"\"\"split-{args.split}-records-count.txt\"\"\", \"w\"\t\t\t\t\t\t\t) as f:\r\n print(f\"\"\"Total {args.split} records: {total_records}\"\"\", file=__A\t\t\t\t\t\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n UpperCamelCase__ \t\t\t\t\t\t\t= parse_args()\r\n main(args)\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":143,"string":"143"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":736,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\n\nfrom typing import TYPE_CHECKING\n\nfrom ...utils import (\n OptionalDependencyNotAvailable,\n _LazyModule,\n is_sentencepiece_available,\n is_tokenizers_available,\n is_torch_available,\n is_vision_available,\n)\n\n\nUpperCAmelCase_ :\t\t\t\t\t\tList[str] = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}\n\ntry:\n\tif not is_sentencepiece_available():\n\t\traise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n\tpass\nelse:\n\tUpperCAmelCase_ :\t\t\t\t\t\tint = ['''LayoutXLMTokenizer''']\n\ntry:\n\tif not is_tokenizers_available():\n\t\traise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n\tpass\nelse:\n\tUpperCAmelCase_ :\t\t\t\t\t\tOptional[int] = ['''LayoutXLMTokenizerFast''']\n\nif TYPE_CHECKING:\n\tfrom .processing_layoutxlm import LayoutXLMProcessor\n\n\ttry:\n\t\tif not is_sentencepiece_available():\n\t\t\traise OptionalDependencyNotAvailable()\n\texcept OptionalDependencyNotAvailable:\n\t\tpass\n\telse:\n\t\tfrom .tokenization_layoutxlm import LayoutXLMTokenizer\n\n\ttry:\n\t\tif not is_tokenizers_available():\n\t\t\traise OptionalDependencyNotAvailable()\n\texcept OptionalDependencyNotAvailable:\n\t\tpass\n\telse:\n\t\tfrom .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast\n\nelse:\n\timport sys\n\n\tUpperCAmelCase_ :\t\t\t\t\t\tUnion[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)\n"},"code_codestyle":{"kind":"number","value":38,"string":"38"},"style_context":{"kind":"string","value":"\n\n\n\n\n\n\ndef SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t(\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t: list[list[int]]\t,\t\t\t__magic_name__\t\t\t\t\t: int\t,\t\t\t__magic_name__\t\t\t\t\t: int\t,\t\t\t__magic_name__\t\t\t\t\t: list[int]\t\t\t\t\t)\t\t->\t\t\tbool:\n\n\n\n\n\n\n\t\"\"\"simple docstring\"\"\"\n\n\n\n\tif graph[path[curr_ind - 1]][next_ver] == 0:\n\t\treturn False\n\n\t# 2. Validate that next vertex is not already in path\n\treturn not any(vertex == next_ver for vertex in path\t\t\t\t\t)\n\ndef SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t(\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t: list[list[int]]\t,\t\t\t__magic_name__\t\t\t\t\t: list[int]\t,\t\t\t__magic_name__\t\t\t\t\t: int\t\t\t\t\t)\t\t->\t\t\tbool:\n\n\n\n\n\n\n\t\"\"\"simple docstring\"\"\"\n\n\n\n\tif curr_ind == len(__magic_name__\t\t\t\t\t):\n\t\t# return whether path exists between current and starting vertices\n\t\treturn graph[path[curr_ind - 1]][path[0]] == 1\n\n\t# Recursive Step\n\tfor next_ver in range(0\t,\t\t\tlen(__magic_name__\t\t\t\t\t)\t\t\t\t\t):\n\t\tif valid_connection(__magic_name__\t,\t\t\t__magic_name__\t,\t\t\t__magic_name__\t,\t\t\t__magic_name__\t\t\t\t\t):\n\t\t\t# Insert current vertex into path as next transition\n\t\t\tUpperCamelCase :str =\tnext_ver\n\t\t\t# Validate created path\n\t\t\tif util_hamilton_cycle(__magic_name__\t,\t\t\t__magic_name__\t,\t\t\tcurr_ind + 1\t\t\t\t\t):\n\t\t\t\treturn True\n\t\t\t# Backtrack\n\t\t\tUpperCamelCase :Union[str, Any] =\t-1\n\treturn False\n\ndef SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t(\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t: list[list[int]]\t,\t\t\t__magic_name__\t\t\t\t\t: int = 0\t\t\t\t\t)\t\t->\t\t\tlist[int]:\n\n\n\n\n\n\n\t\"\"\"simple docstring\"\"\"\n\n\n\n\tUpperCamelCase :Union[str, Any] =\t[-1] * (len(__magic_name__\t\t\t\t\t) + 1)\n\t# initialize start and end of path with starting index\n\tUpperCamelCase :Any =\tstart_index\n\t# evaluate and if we find answer return path either return empty array\n\treturn path if util_hamilton_cycle(__magic_name__\t,\t\t\t__magic_name__\t,\t\t\t1\t\t\t\t\t) else []\n"},"style_context_codestyle":{"kind":"number","value":38,"string":"38"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":737,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\nimport itertools\r\nimport os\r\nfrom collections import Counter, defaultdict\r\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\r\n\r\nimport numpy as np\r\n\r\nimport datasets\r\n\r\nfrom .execute import check_correctness\r\n\r\n\r\nUpperCamelCase\t\t\t\t\t\t\t\t=\t\t\t\t\t\"\\\\n@misc{chen2021evaluating,\\n title={Evaluating Large Language Models Trained on Code},\\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\\\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\\\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\\\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\\\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\\\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\\\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\\\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\\\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\\\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\\\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\\\nand William Saunders and Christopher Hesse and Andrew N. Carr \\\\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\\\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\\\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\\\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\\n year={2021},\\n eprint={2107.03374},\\n archivePrefix={arXiv},\\n primaryClass={cs.LG}\\n}\\n\"\r\n\r\nUpperCamelCase\t\t\t\t\t\t\t\t=\t\t\t\t\t\"\\\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\\ndescribed in the paper \\\"Evaluating Large Language Models Trained on Code\\\"\\n(https://arxiv.org/abs/2107.03374).\\n\"\r\n\r\n\r\nUpperCamelCase\t\t\t\t\t\t\t\t=\t\t\t\t\t\"\\nCalculates how good are predictions given some references, using certain scores\\nArgs:\\n predictions: list of candidates to evaluate. Each candidates should be a list\\n of strings with several code candidates to solve the problem.\\n references: a list with a test for each prediction. Each test should evaluate the\\n correctness of a code candidate.\\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\\n timeout:\\nReturns:\\n pass_at_k: dict with pass rates for each k\\n results: dict with granular results of each unittest\\nExamples:\\n >>> code_eval = datasets.load_metric(\\\"code_eval\\\")\\n >>> test_cases = [\\\"assert add(2,3)==5\\\"]\\n >>> candidates = [[\\\"def add(a,b): return a*b\\\", \\\"def add(a, b): return a+b\\\"]]\\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\\n >>> print(pass_at_k)\\n {'pass@1': 0.5, 'pass@2': 1.0}\\n\"\r\n\r\n\r\nUpperCamelCase\t\t\t\t\t\t\t\t=\t\t\t\t\t\"\\n################################################################################\\n !!!WARNING!!!\\n################################################################################\\nThe \\\"code_eval\\\" metric executes untrusted model-generated code in Python.\\nAlthough it is highly unlikely that model-generated code will do something\\novertly malicious in response to this test suite, model-generated code may act\\ndestructively due to a lack of model capability or alignment.\\nUsers are strongly encouraged to sandbox this evaluation suite so that it\\ndoes not perform destructive actions on their host or network. For more\\ninformation on how OpenAI sandboxes its code, see the paper \\\"Evaluating Large\\nLanguage Models Trained on Code\\\" (https://arxiv.org/abs/2107.03374).\\n\\nOnce you have read this disclaimer and taken appropriate precautions,\\nset the environment variable HF_ALLOW_CODE_EVAL=\\\"1\\\". Within Python you can to this\\nwith:\\n\\n>>> import os\\n>>> os.environ[\\\"HF_ALLOW_CODE_EVAL\\\"] = \\\"1\\\"\\n\\n################################################################################\\\\n\"\r\n\r\nUpperCamelCase\t\t\t\t\t\t\t\t=\t\t\t\t\t\"The MIT License\\n\\nCopyright (c) OpenAI (https://openai.com)\\n\\nPermission is hereby granted, free of charge, to any person obtaining a copy\\nof this software and associated documentation files (the \\\"Software\\\"), to deal\\nin the Software without restriction, including without limitation the rights\\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\\ncopies of the Software, and to permit persons to whom the Software is\\nfurnished to do so, subject to the following conditions:\\n\\nThe above copyright notice and this permission notice shall be included in\\nall copies or substantial portions of the Software.\\n\\nTHE SOFTWARE IS PROVIDED \\\"AS IS\\\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\\nTHE SOFTWARE.\"\r\n\r\n\r\n\r\n\r\n\r\n@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )\r\nclass \t\t\t__lowerCamelCase\t( datasets.Metric ):\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n def a\t\t\t\t(\tself\t\t\t\t\t:\t\t\t\t\tOptional[int] )\t\t\t\t->\tUnion[str, Any]:\r\n return datasets.MetricInfo(\r\n # This is the description that will appear on the metrics page.\r\n description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(\r\n {\r\n \"predictions\": datasets.Sequence(datasets.Value(\"string\" ) ),\r\n \"references\": datasets.Value(\"string\" ),\r\n } ) , homepage=\"https://github.com/openai/human-eval\" , codebase_urls=[\"https://github.com/openai/human-eval\"] , reference_urls=[\"https://github.com/openai/human-eval\"] , license=_LICENSE , )\r\n\r\n\r\n def a\t\t\t\t(\tself\t\t\t\t\t:\t\t\t\t\tUnion[str, Any] , SCREAMING_SNAKE_CASE__\t\t\t\t\t:\t\t\t\t\tOptional[Any] , SCREAMING_SNAKE_CASE__\t\t\t\t\t:\t\t\t\t\tAny , SCREAMING_SNAKE_CASE__\t\t\t\t\t:\t\t\t\t\tUnion[str, Any]=[1, 10, 100] , SCREAMING_SNAKE_CASE__\t\t\t\t\t:\t\t\t\t\tList[str]=4 , SCREAMING_SNAKE_CASE__\t\t\t\t\t:\t\t\t\t\tUnion[str, Any]=3.0 )\t\t\t\t->\tUnion[str, Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n if os.getenv(\"HF_ALLOW_CODE_EVAL\" , 0 ) != \"1\":\r\n raise ValueError(_WARNING )\r\n\r\n if os.name == \"nt\":\r\n raise NotImplementedError(\"This metric is currently not supported on Windows.\" )\r\n\r\n with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE__ ) as executor:\r\n lowerCAmelCase__ =\t\t\t[]\r\n lowerCAmelCase__ =\t\t\tCounter()\r\n lowerCAmelCase__ =\t\t\t0\r\n lowerCAmelCase__ =\t\t\tdefaultdict(SCREAMING_SNAKE_CASE__ )\r\n\r\n for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):\r\n for candidate in candidates:\r\n lowerCAmelCase__ =\t\t\tcandidate + \"\\n\" + test_case\r\n lowerCAmelCase__ =\t\t\t(test_program, timeout, task_id, completion_id[task_id])\r\n lowerCAmelCase__ =\t\t\texecutor.submit(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )\r\n futures.append(SCREAMING_SNAKE_CASE__ )\r\n completion_id[task_id] += 1\r\n n_samples += 1\r\n\r\n for future in as_completed(SCREAMING_SNAKE_CASE__ ):\r\n lowerCAmelCase__ =\t\t\tfuture.result()\r\n results[result[\"task_id\"]].append((result[\"completion_id\"], result) )\r\n\r\n lowerCAmelCase__ ,\t\tlowerCAmelCase__ =\t\t\t[], []\r\n for result in results.values():\r\n result.sort()\r\n lowerCAmelCase__ =\t\t\t[r[1][\"passed\"] for r in result]\r\n total.append(len(SCREAMING_SNAKE_CASE__ ) )\r\n correct.append(sum(SCREAMING_SNAKE_CASE__ ) )\r\n lowerCAmelCase__ =\t\t\tnp.array(SCREAMING_SNAKE_CASE__ )\r\n lowerCAmelCase__ =\t\t\tnp.array(SCREAMING_SNAKE_CASE__ )\r\n\r\n lowerCAmelCase__ =\t\t\tk\r\n lowerCAmelCase__ =\t\t\t{f'pass@{k}': estimate_pass_at_k(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).mean() for k in ks if (total >= k).all()}\r\n\r\n return pass_at_k, results\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t\t_A (\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t: Any , lowerCAmelCase_\t\t\t\t\t: str , lowerCAmelCase_\t\t\t\t\t: Tuple ):\r\n \"\"\"simple docstring\"\"\"\r\n\r\n def estimator(lowerCAmelCase_\t\t\t\t\t: int , lowerCAmelCase_\t\t\t\t\t: int , lowerCAmelCase_\t\t\t\t\t: int ) -> float:\r\n if n - c < k:\r\n return 1.0\r\n return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )\r\n\r\n if isinstance(lowerCamelCase__ , lowerCamelCase__ ):\r\n lowerCAmelCase__ =\t\t\titertools.repeat(lowerCamelCase__ , len(lowerCamelCase__ ) )\r\n else:\r\n assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )\r\n lowerCAmelCase__ =\t\t\titer(lowerCamelCase__ )\r\n\r\n return np.array([estimator(int(lowerCamelCase__ ) , int(lowerCamelCase__ ) , lowerCamelCase__ ) for n, c in zip(lowerCamelCase__ , lowerCamelCase__ )] )\r\n"},"code_codestyle":{"kind":"number","value":350,"string":"350"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\nimport inspect\r\nimport unittest\r\n\r\nimport torch\r\nimport torch.nn as nn\r\n\r\nfrom accelerate.hooks import (\r\n AlignDevicesHook,\r\n ModelHook,\r\n SequentialHook,\r\n add_hook_to_module,\r\n attach_align_device_hook,\r\n remove_hook_from_module,\r\n remove_hook_from_submodules,\r\n)\r\nfrom accelerate.test_utils import require_multi_gpu\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t__lowerCamelCase\t( nn.Module ):\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n def __init__(\tself\t\t\t\t\t:\t\t\t\t\tDict )\t\t\t\t->\tOptional[int]:\r\n super().__init__()\r\n lowerCAmelCase__ =\t\t\tnn.Linear(3 , 4 )\r\n lowerCAmelCase__ =\t\t\tnn.BatchNormad(4 )\r\n lowerCAmelCase__ =\t\t\tnn.Linear(4 , 5 )\r\n\r\n\r\n def a\t\t\t\t(\tself\t\t\t\t\t:\t\t\t\t\tUnion[str, Any] , SCREAMING_SNAKE_CASE__\t\t\t\t\t:\t\t\t\t\tint )\t\t\t\t->\tList[Any]:\r\n return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE__ ) ) )\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t__lowerCamelCase\t( UpperCamelCase__ ):\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n def a\t\t\t\t(\tself\t\t\t\t\t:\t\t\t\t\tAny , SCREAMING_SNAKE_CASE__\t\t\t\t\t:\t\t\t\t\tstr , *SCREAMING_SNAKE_CASE__\t\t\t\t\t:\t\t\t\t\tTuple , **SCREAMING_SNAKE_CASE__\t\t\t\t\t:\t\t\t\t\tAny )\t\t\t\t->\tUnion[str, Any]:\r\n return (args[0] + 1,) + args[1:], kwargs\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t__lowerCamelCase\t( UpperCamelCase__ ):\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n def a\t\t\t\t(\tself\t\t\t\t\t:\t\t\t\t\tList[Any] , SCREAMING_SNAKE_CASE__\t\t\t\t\t:\t\t\t\t\tTuple , SCREAMING_SNAKE_CASE__\t\t\t\t\t:\t\t\t\t\tstr )\t\t\t\t->\tDict:\r\n return output + 1\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t__lowerCamelCase\t( unittest.TestCase ):\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n def a\t\t\t\t(\tself\t\t\t\t\t:\t\t\t\t\tList[str] )\t\t\t\t->\tTuple:\r\n lowerCAmelCase__ =\t\t\tModelForTest()\r\n lowerCAmelCase__ =\t\t\tModelHook()\r\n\r\n add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )\r\n self.assertEqual(test_model._hf_hook , SCREAMING_SNAKE_CASE__ )\r\n self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , \"_old_forward\" ) )\r\n\r\n # Check adding the hook did not change the name or the signature\r\n self.assertEqual(test_model.forward.__name__ , \"forward\" )\r\n self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , [\"x\"] )\r\n\r\n remove_hook_from_module(SCREAMING_SNAKE_CASE__ )\r\n self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , \"_hf_hook\" ) )\r\n self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , \"_old_forward\" ) )\r\n\r\n\r\n def a\t\t\t\t(\tself\t\t\t\t\t:\t\t\t\t\tUnion[str, Any] )\t\t\t\t->\tint:\r\n lowerCAmelCase__ =\t\t\tModelForTest()\r\n lowerCAmelCase__ =\t\t\tModelHook()\r\n\r\n add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )\r\n add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , append=SCREAMING_SNAKE_CASE__ )\r\n\r\n self.assertEqual(isinstance(test_model._hf_hook , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )\r\n self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )\r\n self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , \"_old_forward\" ) )\r\n\r\n # Check adding the hook did not change the name or the signature\r\n self.assertEqual(test_model.forward.__name__ , \"forward\" )\r\n self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , [\"x\"] )\r\n\r\n remove_hook_from_module(SCREAMING_SNAKE_CASE__ )\r\n self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , \"_hf_hook\" ) )\r\n self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , \"_old_forward\" ) )\r\n\r\n\r\n def a\t\t\t\t(\tself\t\t\t\t\t:\t\t\t\t\tList[str] )\t\t\t\t->\tAny:\r\n lowerCAmelCase__ =\t\t\tModelForTest()\r\n lowerCAmelCase__ =\t\t\ttorch.randn(2 , 3 )\r\n lowerCAmelCase__ =\t\t\ttest_model(x + 1 )\r\n lowerCAmelCase__ =\t\t\ttest_model(x + 2 )\r\n\r\n lowerCAmelCase__ =\t\t\tPreForwardHook()\r\n add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )\r\n lowerCAmelCase__ =\t\t\ttest_model(SCREAMING_SNAKE_CASE__ )\r\n self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 ) )\r\n\r\n # Attaching a hook to a model when it already has one replaces, does not chain\r\n lowerCAmelCase__ =\t\t\tPreForwardHook()\r\n add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )\r\n lowerCAmelCase__ =\t\t\ttest_model(SCREAMING_SNAKE_CASE__ )\r\n self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 ) )\r\n\r\n # You need to use the sequential hook to chain two or more hooks\r\n lowerCAmelCase__ =\t\t\tSequentialHook(PreForwardHook() , PreForwardHook() )\r\n add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )\r\n\r\n lowerCAmelCase__ =\t\t\ttest_model(SCREAMING_SNAKE_CASE__ )\r\n assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 )\r\n\r\n\r\n def a\t\t\t\t(\tself\t\t\t\t\t:\t\t\t\t\tAny )\t\t\t\t->\tUnion[str, Any]:\r\n lowerCAmelCase__ =\t\t\tModelForTest()\r\n lowerCAmelCase__ =\t\t\ttorch.randn(2 , 3 )\r\n lowerCAmelCase__ =\t\t\ttest_model(SCREAMING_SNAKE_CASE__ )\r\n\r\n lowerCAmelCase__ =\t\t\tPostForwardHook()\r\n add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )\r\n lowerCAmelCase__ =\t\t\ttest_model(SCREAMING_SNAKE_CASE__ )\r\n self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 , atol=1e-5 ) )\r\n\r\n # Attaching a hook to a model when it already has one replaces, does not chain\r\n lowerCAmelCase__ =\t\t\tPostForwardHook()\r\n add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )\r\n lowerCAmelCase__ =\t\t\ttest_model(SCREAMING_SNAKE_CASE__ )\r\n self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 , atol=1e-5 ) )\r\n\r\n # You need to use the sequential hook to chain two or more hooks\r\n lowerCAmelCase__ =\t\t\tSequentialHook(PostForwardHook() , PostForwardHook() )\r\n add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )\r\n\r\n lowerCAmelCase__ =\t\t\ttest_model(SCREAMING_SNAKE_CASE__ )\r\n assert torch.allclose(SCREAMING_SNAKE_CASE__ , output + 2 , atol=1e-5 )\r\n\r\n\r\n def a\t\t\t\t(\tself\t\t\t\t\t:\t\t\t\t\tOptional[int] )\t\t\t\t->\tint:\r\n lowerCAmelCase__ =\t\t\tModelForTest()\r\n lowerCAmelCase__ =\t\t\ttorch.randn(2 , 3 )\r\n lowerCAmelCase__ =\t\t\ttest_model(SCREAMING_SNAKE_CASE__ )\r\n\r\n lowerCAmelCase__ =\t\t\tPostForwardHook()\r\n add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )\r\n lowerCAmelCase__ =\t\t\ttest_model(SCREAMING_SNAKE_CASE__ )\r\n self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 ) )\r\n self.assertTrue(outputa.requires_grad )\r\n\r\n lowerCAmelCase__ =\t\t\tTrue\r\n lowerCAmelCase__ =\t\t\ttest_model(SCREAMING_SNAKE_CASE__ )\r\n self.assertFalse(outputa.requires_grad )\r\n\r\n\r\n @require_multi_gpu\r\n def a\t\t\t\t(\tself\t\t\t\t\t:\t\t\t\t\tOptional[Any] )\t\t\t\t->\tList[str]:\r\n lowerCAmelCase__ =\t\t\tModelForTest()\r\n # Everything is on CPU\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"cpu\" ) )\r\n self.assertEqual(model.batchnorm.weight.device , torch.device(\"cpu\" ) )\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"cpu\" ) )\r\n\r\n # This will move each submodule on different devices\r\n add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )\r\n add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )\r\n add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )\r\n\r\n self.assertEqual(model.lineara.weight.device , torch.device(0 ) )\r\n self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )\r\n self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )\r\n self.assertEqual(model.lineara.weight.device , torch.device(1 ) )\r\n\r\n # We can still make a forward pass. The input does not need to be on any particular device\r\n lowerCAmelCase__ =\t\t\ttorch.randn(2 , 3 )\r\n lowerCAmelCase__ =\t\t\tmodel(SCREAMING_SNAKE_CASE__ )\r\n self.assertEqual(output.device , torch.device(1 ) )\r\n\r\n # We can add a general hook to put back output on same device as input.\r\n add_hook_to_module(SCREAMING_SNAKE_CASE__ , AlignDevicesHook(io_same_device=SCREAMING_SNAKE_CASE__ ) )\r\n lowerCAmelCase__ =\t\t\ttorch.randn(2 , 3 ).to(0 )\r\n lowerCAmelCase__ =\t\t\tmodel(SCREAMING_SNAKE_CASE__ )\r\n self.assertEqual(output.device , torch.device(0 ) )\r\n\r\n\r\n def a\t\t\t\t(\tself\t\t\t\t\t:\t\t\t\t\tList[str] )\t\t\t\t->\tList[str]:\r\n lowerCAmelCase__ =\t\t\tModelForTest()\r\n\r\n # Everything is on CPU\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"cpu\" ) )\r\n self.assertEqual(model.batchnorm.weight.device , torch.device(\"cpu\" ) )\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"cpu\" ) )\r\n\r\n # This will move each submodule on different devices\r\n lowerCAmelCase__ =\t\t\t{\"execution_device\": 0 if torch.cuda.is_available() else \"cpu\", \"offload\": True}\r\n\r\n add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )\r\n add_hook_to_module(model.batchnorm , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )\r\n add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )\r\n\r\n # Parameters have been offloaded, so on the meta device\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"meta\" ) )\r\n self.assertEqual(model.batchnorm.weight.device , torch.device(\"meta\" ) )\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"meta\" ) )\r\n # Buffers are not included in the offload by default, so are on the execution device\r\n lowerCAmelCase__ =\t\t\ttorch.device(hook_kwargs[\"execution_device\"] )\r\n self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ )\r\n\r\n lowerCAmelCase__ =\t\t\ttorch.randn(2 , 3 )\r\n lowerCAmelCase__ =\t\t\tmodel(SCREAMING_SNAKE_CASE__ )\r\n self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )\r\n\r\n # Removing hooks loads back the weights in the model.\r\n remove_hook_from_module(model.lineara )\r\n remove_hook_from_module(model.batchnorm )\r\n remove_hook_from_module(model.lineara )\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"cpu\" ) )\r\n self.assertEqual(model.batchnorm.weight.device , torch.device(\"cpu\" ) )\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"cpu\" ) )\r\n\r\n # Now test with buffers included in the offload\r\n lowerCAmelCase__ =\t\t\t{\r\n \"execution_device\": 0 if torch.cuda.is_available() else \"cpu\",\r\n \"offload\": True,\r\n \"offload_buffers\": True,\r\n }\r\n\r\n add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )\r\n add_hook_to_module(model.batchnorm , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )\r\n add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )\r\n\r\n # Parameters have been offloaded, so on the meta device, buffers included\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"meta\" ) )\r\n self.assertEqual(model.batchnorm.weight.device , torch.device(\"meta\" ) )\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"meta\" ) )\r\n self.assertEqual(model.batchnorm.running_mean.device , torch.device(\"meta\" ) )\r\n\r\n lowerCAmelCase__ =\t\t\ttorch.randn(2 , 3 )\r\n lowerCAmelCase__ =\t\t\tmodel(SCREAMING_SNAKE_CASE__ )\r\n self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )\r\n\r\n # Removing hooks loads back the weights in the model.\r\n remove_hook_from_module(model.lineara )\r\n remove_hook_from_module(model.batchnorm )\r\n remove_hook_from_module(model.lineara )\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"cpu\" ) )\r\n self.assertEqual(model.batchnorm.weight.device , torch.device(\"cpu\" ) )\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"cpu\" ) )\r\n\r\n\r\n def a\t\t\t\t(\tself\t\t\t\t\t:\t\t\t\t\tOptional[int] )\t\t\t\t->\tUnion[str, Any]:\r\n lowerCAmelCase__ =\t\t\tModelForTest()\r\n\r\n # Everything is on CPU\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"cpu\" ) )\r\n self.assertEqual(model.batchnorm.weight.device , torch.device(\"cpu\" ) )\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"cpu\" ) )\r\n\r\n # This will move each submodule on different devices\r\n lowerCAmelCase__ =\t\t\t0 if torch.cuda.is_available() else \"cpu\"\r\n attach_align_device_hook(SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ )\r\n\r\n # Parameters have been offloaded, so on the meta device\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"meta\" ) )\r\n self.assertEqual(model.batchnorm.weight.device , torch.device(\"meta\" ) )\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"meta\" ) )\r\n # Buffers are not included in the offload by default, so are on the execution device\r\n lowerCAmelCase__ =\t\t\ttorch.device(SCREAMING_SNAKE_CASE__ )\r\n self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ )\r\n\r\n lowerCAmelCase__ =\t\t\ttorch.randn(2 , 3 )\r\n lowerCAmelCase__ =\t\t\tmodel(SCREAMING_SNAKE_CASE__ )\r\n self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )\r\n\r\n # Removing hooks loads back the weights in the model.\r\n remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"cpu\" ) )\r\n self.assertEqual(model.batchnorm.weight.device , torch.device(\"cpu\" ) )\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"cpu\" ) )\r\n\r\n # Now test with buffers included in the offload\r\n attach_align_device_hook(SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , offload_buffers=SCREAMING_SNAKE_CASE__ )\r\n\r\n # Parameters have been offloaded, so on the meta device, buffers included\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"meta\" ) )\r\n self.assertEqual(model.batchnorm.weight.device , torch.device(\"meta\" ) )\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"meta\" ) )\r\n self.assertEqual(model.batchnorm.running_mean.device , torch.device(\"meta\" ) )\r\n\r\n lowerCAmelCase__ =\t\t\ttorch.randn(2 , 3 )\r\n lowerCAmelCase__ =\t\t\tmodel(SCREAMING_SNAKE_CASE__ )\r\n self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )\r\n\r\n # Removing hooks loads back the weights in the model.\r\n remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"cpu\" ) )\r\n self.assertEqual(model.batchnorm.weight.device , torch.device(\"cpu\" ) )\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"cpu\" ) )\r\n\r\n\r\n def a\t\t\t\t(\tself\t\t\t\t\t:\t\t\t\t\tOptional[Any] )\t\t\t\t->\tstr:\r\n lowerCAmelCase__ =\t\t\tModelForTest()\r\n\r\n # Everything is on CPU\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"cpu\" ) )\r\n self.assertEqual(model.batchnorm.weight.device , torch.device(\"cpu\" ) )\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"cpu\" ) )\r\n\r\n # This will move each submodule on different devices\r\n lowerCAmelCase__ =\t\t\t0 if torch.cuda.is_available() else \"cpu\"\r\n attach_align_device_hook(\r\n SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , weights_map=model.state_dict() )\r\n\r\n # Parameters have been offloaded, so on the meta device\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"meta\" ) )\r\n self.assertEqual(model.batchnorm.weight.device , torch.device(\"meta\" ) )\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"meta\" ) )\r\n # Buffers are not included in the offload by default, so are on the execution device\r\n lowerCAmelCase__ =\t\t\ttorch.device(SCREAMING_SNAKE_CASE__ )\r\n self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ )\r\n\r\n lowerCAmelCase__ =\t\t\ttorch.randn(2 , 3 )\r\n lowerCAmelCase__ =\t\t\tmodel(SCREAMING_SNAKE_CASE__ )\r\n self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )\r\n\r\n # Removing hooks loads back the weights in the model.\r\n remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"cpu\" ) )\r\n self.assertEqual(model.batchnorm.weight.device , torch.device(\"cpu\" ) )\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"cpu\" ) )\r\n\r\n # Now test with buffers included in the offload\r\n attach_align_device_hook(\r\n SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , weights_map=model.state_dict() , offload_buffers=SCREAMING_SNAKE_CASE__ , )\r\n\r\n # Parameters have been offloaded, so on the meta device, buffers included\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"meta\" ) )\r\n self.assertEqual(model.batchnorm.weight.device , torch.device(\"meta\" ) )\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"meta\" ) )\r\n self.assertEqual(model.batchnorm.running_mean.device , torch.device(\"meta\" ) )\r\n\r\n lowerCAmelCase__ =\t\t\ttorch.randn(2 , 3 )\r\n lowerCAmelCase__ =\t\t\tmodel(SCREAMING_SNAKE_CASE__ )\r\n self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )\r\n\r\n # Removing hooks loads back the weights in the model.\r\n remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"cpu\" ) )\r\n self.assertEqual(model.batchnorm.weight.device , torch.device(\"cpu\" ) )\r\n self.assertEqual(model.lineara.weight.device , torch.device(\"cpu\" ) )\r\n"},"style_context_codestyle":{"kind":"number","value":221,"string":"221"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":738,"cells":{"code":{"kind":"string","value":"\r\n\r\n# Copyright 2023 The HuggingFace Team. All rights reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\nfrom typing import TYPE_CHECKING\r\n\r\n# rely on isort to merge the imports\r\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available\r\n\r\n\r\na\t\t={\"\"\"configuration_mra\"\"\": [\"\"\"MRA_PRETRAINED_CONFIG_ARCHIVE_MAP\"\"\", \"\"\"MraConfig\"\"\"]}\r\n\r\ntry:\r\n\t\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\tpass\r\nelse:\r\n\t\t\t\t\ta\t\t=[\r\n\t\t\t\t\t \"\"\"MRA_PRETRAINED_MODEL_ARCHIVE_LIST\"\"\",\r\n\t\t\t\t\t \"\"\"MraForMaskedLM\"\"\",\r\n\t\t\t\t\t \"\"\"MraForMultipleChoice\"\"\",\r\n\t\t\t\t\t \"\"\"MraForQuestionAnswering\"\"\",\r\n\t\t\t\t\t \"\"\"MraForSequenceClassification\"\"\",\r\n\t\t\t\t\t \"\"\"MraForTokenClassification\"\"\",\r\n\t\t\t\t\t \"\"\"MraLayer\"\"\",\r\n\t\t\t\t\t \"\"\"MraModel\"\"\",\r\n\t\t\t\t\t \"\"\"MraPreTrainedModel\"\"\",\r\n\t\t\t\t\t]\r\n\r\n\r\nif TYPE_CHECKING:\r\n\t\t\t\t\tfrom .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig\r\n\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tfrom .modeling_mra import (\r\n\t\t\t\t\t\t\t\t\t\t MRA_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t\t\t MraForMaskedLM,\r\n\t\t\t\t\t\t\t\t\t\t MraForMultipleChoice,\r\n\t\t\t\t\t\t\t\t\t\t MraForQuestionAnswering,\r\n\t\t\t\t\t\t\t\t\t\t MraForSequenceClassification,\r\n\t\t\t\t\t\t\t\t\t\t MraForTokenClassification,\r\n\t\t\t\t\t\t\t\t\t\t MraLayer,\r\n\t\t\t\t\t\t\t\t\t\t MraModel,\r\n\t\t\t\t\t\t\t\t\t\t MraPreTrainedModel,\r\n\t\t\t\t\t\t\t\t\t\t)\r\nelse:\r\n\t\t\t\t\timport sys\r\n\r\n\t\t\t\t\ta\t\t=_LazyModule(__name__, globals()[\"\"\"__file__\"\"\"], _import_structure)\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":73,"string":"73"},"style_context":{"kind":"string","value":"\n\n\n\nlowerCAmelCase__ = 0 # The first color of the flag.\nlowerCAmelCase__ = 1 # The second color of the flag.\nlowerCAmelCase__ = 2 # The third color of the flag.\nlowerCAmelCase__ = (red, white, blue)\n\n\ndef \t\t\t\t\t__lowerCamelCase\t\t\t\t\t( lowerCamelCase__\t\t\t\t\t\t):\n\n\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\t\t\t\tif not sequence:\n\t\t\t\t\t\t\t\treturn []\n\t\t\t\tif len(lowerCamelCase__\t\t\t\t\t\t) == 1:\n\t\t\t\t\t\t\t\treturn list(lowerCamelCase__\t\t\t\t\t\t)\n\t\t\t\tlowercase__ : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t= 0\n\t\t\t\tlowercase__ : Any\t\t\t\t\t\t\t\t\t\t\t\t\t= len(lowerCamelCase__\t\t\t\t\t\t) - 1\n\t\t\t\tlowercase__ : Dict\t\t\t\t\t\t\t\t\t\t\t\t\t= 0\n\t\t\t\twhile mid <= high:\n\t\t\t\t\t\t\t\tif sequence[mid] == colors[0]:\n\t\t\t\t\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t\t,\t\t\t\t\t\t\tlowercase__ : int\t\t\t\t\t\t\t\t\t\t\t\t\t= sequence[mid], sequence[low]\n\t\t\t\t\t\t\t\t\t\t\t\tlow += 1\n\t\t\t\t\t\t\t\t\t\t\t\tmid += 1\n\t\t\t\t\t\t\t\telif sequence[mid] == colors[1]:\n\t\t\t\t\t\t\t\t\t\t\t\tmid += 1\n\t\t\t\t\t\t\t\telif sequence[mid] == colors[2]:\n\t\t\t\t\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t\t,\t\t\t\t\t\t\tlowercase__ : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t= sequence[high], sequence[mid]\n\t\t\t\t\t\t\t\t\t\t\t\thigh -= 1\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\tlowercase__ : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t= F\"\"\"The elements inside the sequence must contains only {colors} values\"\"\"\n\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(lowerCamelCase__\t\t\t\t\t\t)\n\t\t\t\treturn sequence\n\n\nif __name__ == \"__main__\":\n\t\t\t\timport doctest\n\n\t\t\t\tdoctest.testmod()\n\n\t\t\t\tlowerCAmelCase__ = input('''Enter numbers separated by commas:\\n''').strip()\n\t\t\t\tlowerCAmelCase__ = [int(item.strip()) for item in user_input.split(''',''')]\n\t\t\t\tprint(f'''{dutch_national_flag_sort(unsorted)}''')\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":130,"string":"130"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":739,"cells":{"code":{"kind":"string","value":"import copy\nfrom collections import OrderedDict\nfrom typing import Mapping\n\nfrom packaging import version\n\nfrom ...configuration_utils import PretrainedConfig\nfrom ...onnx import OnnxConfig\nfrom ...utils import logging\nfrom ..auto import CONFIG_MAPPING\n\n\nlowerCamelCase__ =\t\t\tlogging.get_logger(__name__)\n\nlowerCamelCase__ =\t\t\t{\n '''microsoft/conditional-detr-resnet-50''': (\n '''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''\n ),\n}\n\n\n\n\nclass \t\t\t_UpperCAmelCase\t( lowerCAmelCase ):\n\n\n\n '''simple docstring'''\n\n\n\n\n __A\t =\t\t\t'''conditional_detr'''\n __A\t =\t\t\t['''past_key_values''']\n __A\t =\t\t\t{\n '''hidden_size''': '''d_model''',\n '''num_attention_heads''': '''encoder_attention_heads''',\n }\n\n\n\n\n\n\n\n def __init__(\t\t\t\t\t\tself\t\t\t\t\t\t\t: Tuple ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Optional[int]=True ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: List[Any]=None ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Union[str, Any]=3 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Optional[Any]=300 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: List[str]=6 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Union[str, Any]=2048 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Any=8 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Dict=6 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Tuple=2048 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Union[str, Any]=8 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Optional[int]=0.0 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: str=0.0 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Tuple=True ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Any=\"relu\" ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Any=256 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: str=0.1 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Dict=0.0 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: List[Any]=0.0 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Tuple=0.02 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Optional[int]=1.0 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Tuple=False ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Dict=\"sine\" ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: List[Any]=\"resnet50\" ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: str=True ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: List[Any]=False ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: List[str]=2 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Tuple=5 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Dict=2 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: List[str]=1 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Any=1 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Any=2 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: int=5 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: str=2 ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Tuple=0.25 ,\t\t\t\t**lowercase_\t\t\t\t\t\t\t: Tuple ,\t\t\t\t)\t\t->\t\t\t\t\tDict:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n if backbone_config is not None and use_timm_backbone:\n raise ValueError(\"You can't specify both `backbone_config` and `use_timm_backbone`.\")\n\n if not use_timm_backbone:\n if backbone_config is None:\n logger.info(\"`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.\")\n _UpperCamelCase = CONFIG_MAPPING[\"resnet\"](out_features=[\"stage4\"])\n elif isinstance(lowercase_ ,\t\t\t\tlowercase_):\n _UpperCamelCase = backbone_config.get(\"model_type\")\n _UpperCamelCase = CONFIG_MAPPING[backbone_model_type]\n _UpperCamelCase = config_class.from_dict(lowercase_)\n\n _UpperCamelCase = use_timm_backbone\n _UpperCamelCase = backbone_config\n _UpperCamelCase = num_channels\n _UpperCamelCase = num_queries\n _UpperCamelCase = d_model\n _UpperCamelCase = encoder_ffn_dim\n _UpperCamelCase = encoder_layers\n _UpperCamelCase = encoder_attention_heads\n _UpperCamelCase = decoder_ffn_dim\n _UpperCamelCase = decoder_layers\n _UpperCamelCase = decoder_attention_heads\n _UpperCamelCase = dropout\n _UpperCamelCase = attention_dropout\n _UpperCamelCase = activation_dropout\n _UpperCamelCase = activation_function\n _UpperCamelCase = init_std\n _UpperCamelCase = init_xavier_std\n _UpperCamelCase = encoder_layerdrop\n _UpperCamelCase = decoder_layerdrop\n _UpperCamelCase = encoder_layers\n _UpperCamelCase = auxiliary_loss\n _UpperCamelCase = position_embedding_type\n _UpperCamelCase = backbone\n _UpperCamelCase = use_pretrained_backbone\n _UpperCamelCase = dilation\n # Hungarian matcher\n _UpperCamelCase = class_cost\n _UpperCamelCase = bbox_cost\n _UpperCamelCase = giou_cost\n # Loss coefficients\n _UpperCamelCase = mask_loss_coefficient\n _UpperCamelCase = dice_loss_coefficient\n _UpperCamelCase = cls_loss_coefficient\n _UpperCamelCase = bbox_loss_coefficient\n _UpperCamelCase = giou_loss_coefficient\n _UpperCamelCase = focal_alpha\n super().__init__(is_encoder_decoder=lowercase_ ,\t\t\t\t**lowercase_)\n\n\n\n\n\n\n\n @property\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: Dict)\t\t->\t\t\t\t\tint:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n return self.encoder_attention_heads\n\n\n\n\n\n\n\n @property\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: List[Any])\t\t->\t\t\t\t\tint:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n return self.d_model\n\n\n\n\n\n\n\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: Optional[int])\t\t->\t\t\t\t\tOptional[int]:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n _UpperCamelCase = copy.deepcopy(self.__dict__)\n if self.backbone_config is not None:\n _UpperCamelCase = self.backbone_config.to_dict()\n _UpperCamelCase = self.__class__.model_type\n return output\n\n\n\n\nclass \t\t\t_UpperCAmelCase\t( lowerCAmelCase ):\n\n\n\n '''simple docstring'''\n\n\n\n\n __A\t =\t\t\tversion.parse('''1.11''' )\n\n\n\n\n\n\n\n @property\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: Union[str, Any])\t\t->\t\t\t\t\tMapping[str, Mapping[int, str]]:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n return OrderedDict(\n [\n (\"pixel_values\", {0: \"batch\", 1: \"num_channels\", 2: \"height\", 3: \"width\"}),\n (\"pixel_mask\", {0: \"batch\"}),\n ])\n\n\n\n\n\n\n\n @property\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: Optional[int])\t\t->\t\t\t\t\tfloat:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n return 1e-5\n\n\n\n\n\n\n\n @property\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: Union[str, Any])\t\t->\t\t\t\t\tint:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n return 12\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":63,"string":"63"},"style_context":{"kind":"string","value":"from __future__ import annotations\n\nimport random\nimport unittest\n\nfrom transformers import TransfoXLConfig, is_tf_available\nfrom transformers.testing_utils import require_tf, slow\n\nfrom ...test_configuration_common import ConfigTester\nfrom ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor\nfrom ...test_pipeline_mixin import PipelineTesterMixin\n\n\nif is_tf_available():\n import tensorflow as tf\n\n from transformers import (\n TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,\n TFTransfoXLForSequenceClassification,\n TFTransfoXLLMHeadModel,\n TFTransfoXLModel,\n )\n\n\n\n\nclass \t\t\t_UpperCAmelCase\t:\n\n\n\n '''simple docstring'''\n\n\n\n\n def __init__(\t\t\t\t\t\tself\t\t\t\t\t\t\t: Optional[Any] ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Optional[Any] ,\t\t\t\t)\t\t->\t\t\t\t\tOptional[Any]:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n _UpperCamelCase = parent\n _UpperCamelCase = 13\n _UpperCamelCase = 7\n _UpperCamelCase = 30\n _UpperCamelCase = self.seq_length + self.mem_len\n _UpperCamelCase = 15\n _UpperCamelCase = True\n _UpperCamelCase = True\n _UpperCamelCase = 99\n _UpperCamelCase = [10, 50, 80]\n _UpperCamelCase = 32\n _UpperCamelCase = 32\n _UpperCamelCase = 4\n _UpperCamelCase = 8\n _UpperCamelCase = 128\n _UpperCamelCase = 2\n _UpperCamelCase = 2\n _UpperCamelCase = None\n _UpperCamelCase = 1\n _UpperCamelCase = 0\n _UpperCamelCase = 3\n _UpperCamelCase = self.vocab_size - 1\n _UpperCamelCase = 0.01\n\n\n\n\n\n\n\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: Dict)\t\t->\t\t\t\t\tOptional[int]:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] ,\t\t\t\tself.vocab_size)\n _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] ,\t\t\t\tself.vocab_size)\n\n _UpperCamelCase = None\n if self.use_labels:\n _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] ,\t\t\t\tself.vocab_size)\n\n _UpperCamelCase = TransfoXLConfig(\n vocab_size=self.vocab_size ,\t\t\t\tmem_len=self.mem_len ,\t\t\t\tclamp_len=self.clamp_len ,\t\t\t\tcutoffs=self.cutoffs ,\t\t\t\td_model=self.hidden_size ,\t\t\t\td_embed=self.d_embed ,\t\t\t\tn_head=self.num_attention_heads ,\t\t\t\td_head=self.d_head ,\t\t\t\td_inner=self.d_inner ,\t\t\t\tdiv_val=self.div_val ,\t\t\t\tn_layer=self.num_hidden_layers ,\t\t\t\teos_token_id=self.eos_token_id ,\t\t\t\tpad_token_id=self.vocab_size - 1 ,\t\t\t\tinit_range=self.init_range ,\t\t\t\tnum_labels=self.num_labels ,\t\t\t\t)\n\n return (config, input_ids_a, input_ids_a, lm_labels)\n\n\n\n\n\n\n\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: Union[str, Any])\t\t->\t\t\t\t\tTuple:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n random.seed(self.seed)\n tf.random.set_seed(self.seed)\n\n\n\n\n\n\n\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: int ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Optional[int] ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Tuple ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Optional[Any] ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Optional[Any])\t\t->\t\t\t\t\tUnion[str, Any]:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n _UpperCamelCase = TFTransfoXLModel(lowercase_)\n\n _UpperCamelCase ,\t\t_UpperCamelCase = model(lowercase_).to_tuple()\n\n _UpperCamelCase = {\"input_ids\": input_ids_a, \"mems\": mems_a}\n\n _UpperCamelCase ,\t\t_UpperCamelCase = model(lowercase_).to_tuple()\n\n self.parent.assertEqual(hidden_states_a.shape ,\t\t\t\t(self.batch_size, self.seq_length, self.hidden_size))\n self.parent.assertEqual(hidden_states_a.shape ,\t\t\t\t(self.batch_size, self.seq_length, self.hidden_size))\n self.parent.assertListEqual(\n [mem.shape for mem in mems_a] ,\t\t\t\t[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,\t\t\t\t)\n self.parent.assertListEqual(\n [mem.shape for mem in mems_a] ,\t\t\t\t[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,\t\t\t\t)\n\n\n\n\n\n\n\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: Dict ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: str ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: str ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Dict ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: List[Any])\t\t->\t\t\t\t\tUnion[str, Any]:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n _UpperCamelCase = TFTransfoXLLMHeadModel(lowercase_)\n\n _UpperCamelCase ,\t\t_UpperCamelCase = model(lowercase_).to_tuple()\n\n _UpperCamelCase = {\"input_ids\": input_ids_a, \"labels\": lm_labels}\n _UpperCamelCase ,\t\t_UpperCamelCase = model(lowercase_).to_tuple()\n\n _UpperCamelCase ,\t\t_UpperCamelCase = model([input_ids_a, mems_a]).to_tuple()\n\n _UpperCamelCase = {\"input_ids\": input_ids_a, \"mems\": mems_a, \"labels\": lm_labels}\n\n _UpperCamelCase ,\t\t_UpperCamelCase = model(lowercase_).to_tuple()\n\n self.parent.assertEqual(lm_logits_a.shape ,\t\t\t\t(self.batch_size, self.seq_length, self.vocab_size))\n self.parent.assertListEqual(\n [mem.shape for mem in mems_a] ,\t\t\t\t[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,\t\t\t\t)\n\n self.parent.assertEqual(lm_logits_a.shape ,\t\t\t\t(self.batch_size, self.seq_length, self.vocab_size))\n self.parent.assertListEqual(\n [mem.shape for mem in mems_a] ,\t\t\t\t[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,\t\t\t\t)\n\n\n\n\n\n\n\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: Optional[Any] ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: List[Any] ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: List[Any] ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Optional[Any] ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Dict)\t\t->\t\t\t\t\tstr:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n _UpperCamelCase = TFTransfoXLForSequenceClassification(lowercase_)\n _UpperCamelCase = model(lowercase_)\n self.parent.assertEqual(result.logits.shape ,\t\t\t\t(self.batch_size, self.num_labels))\n\n\n\n\n\n\n\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: Dict)\t\t->\t\t\t\t\tList[Any]:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n _UpperCamelCase = self.prepare_config_and_inputs()\n ((_UpperCamelCase) ,\t\t(_UpperCamelCase) ,\t\t(_UpperCamelCase) ,\t\t(_UpperCamelCase)) = config_and_inputs\n _UpperCamelCase = {\"input_ids\": input_ids_a}\n return config, inputs_dict\n\n\n\n\n@require_tf\nclass \t\t\t_UpperCAmelCase\t( lowerCAmelCase,\tlowerCAmelCase,\tunittest.TestCase ):\n\n\n\n '''simple docstring'''\n\n\n\n\n __A\t =\t\t\t(\n (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()\n )\n __A\t =\t\t\t() if is_tf_available() else ()\n __A\t =\t\t\t(\n {\n '''feature-extraction''': TFTransfoXLModel,\n '''text-classification''': TFTransfoXLForSequenceClassification,\n '''text-generation''': TFTransfoXLLMHeadModel,\n '''zero-shot''': TFTransfoXLForSequenceClassification,\n }\n if is_tf_available()\n else {}\n )\n # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented\n __A\t =\t\t\tFalse\n __A\t =\t\t\tFalse\n __A\t =\t\t\tFalse\n __A\t =\t\t\tFalse\n\n\n\n\n\n\n\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: List[Any] ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Dict ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Tuple ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Dict ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: Any ,\t\t\t\tlowercase_\t\t\t\t\t\t\t: List[str])\t\t->\t\t\t\t\tAny:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n if pipeline_test_casse_name == \"TextGenerationPipelineTests\":\n # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.\n # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple\n # tokenizer.\n return True\n\n return False\n\n\n\n\n\n\n\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: Optional[Any])\t\t->\t\t\t\t\tint:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n _UpperCamelCase = TFTransfoXLModelTester(self)\n _UpperCamelCase = ConfigTester(self ,\t\t\t\tconfig_class=lowercase_ ,\t\t\t\td_embed=37)\n\n\n\n\n\n\n\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: Dict)\t\t->\t\t\t\t\tOptional[int]:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n self.config_tester.run_common_tests()\n\n\n\n\n\n\n\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: Union[str, Any])\t\t->\t\t\t\t\tList[str]:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n self.model_tester.set_seed()\n _UpperCamelCase = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_transfo_xl_model(*lowercase_)\n\n\n\n\n\n\n\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: Optional[Any])\t\t->\t\t\t\t\tList[Any]:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n self.model_tester.set_seed()\n _UpperCamelCase = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_transfo_xl_lm_head(*lowercase_)\n\n\n\n\n\n\n\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: List[str])\t\t->\t\t\t\t\tList[Any]:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n _UpperCamelCase = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowercase_)\n\n\n\n\n\n\n\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: Dict)\t\t->\t\t\t\t\tint:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n _UpperCamelCase ,\t\t_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()\n _UpperCamelCase = [TFTransfoXLForSequenceClassification]\n\n for model_class in self.all_model_classes:\n _UpperCamelCase = model_class(lowercase_)\n assert isinstance(model.get_input_embeddings() ,\t\t\t\ttf.keras.layers.Layer)\n if model_class in list_other_models_with_output_ebd:\n _UpperCamelCase = model.get_output_embeddings()\n assert isinstance(lowercase_ ,\t\t\t\ttf.keras.layers.Layer)\n _UpperCamelCase = model.get_bias()\n assert name is None\n else:\n _UpperCamelCase = model.get_output_embeddings()\n assert x is None\n _UpperCamelCase = model.get_bias()\n assert name is None\n\n\n\n\n\n\n\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: Optional[int])\t\t->\t\t\t\t\tAny:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n pass\n\n\n\n\n\n\n\n @slow\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: List[str])\t\t->\t\t\t\t\tTuple:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n _UpperCamelCase = TFTransfoXLModel.from_pretrained(lowercase_)\n self.assertIsNotNone(lowercase_)\n\n\n\n\n\n\n\n @unittest.skip(reason=\"This model doesn't play well with fit() due to not returning a single loss.\")\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: Union[str, Any])\t\t->\t\t\t\t\tTuple:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n pass\n\n\n\n\n@require_tf\nclass \t\t\t_UpperCAmelCase\t( unittest.TestCase ):\n\n\n\n '''simple docstring'''\n\n\n\n\n @unittest.skip(\"Skip test until #12651 is resolved.\")\n @slow\n def \t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t\t: Optional[Any])\t\t->\t\t\t\t\tDict:\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n _UpperCamelCase = TFTransfoXLLMHeadModel.from_pretrained(\"transfo-xl-wt103\")\n # fmt: off\n _UpperCamelCase = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] ,\t\t\t\tdtype=tf.intaa) # noqa: E231\n # fmt: on\n # In 1991 , the remains of Russian Tsar Nicholas II and his family\n # ( except for Alexei and Maria ) are discovered .\n # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the\n # remainder of the story . 1883 Western Siberia ,\n # a young Grigori Rasputin is asked by his father and a group of men to perform magic .\n # Rasputin has a vision and denounces one of the men as a horse thief . Although his\n # father initially slaps him for making such an accusation , Rasputin watches as the\n # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of\n # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,\n # with people , even a bishop , begging for his blessing . \n\n # fmt: off\n _UpperCamelCase = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231\n # fmt: on\n # In 1991, the remains of Russian Tsar Nicholas II and his family (\n # except for Alexei and Maria ) are discovered. The voice of young son,\n # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.\n # 1883 Western Siberia, a young Grigori Rasputin is asked by his father\n # and a group of men to perform magic. Rasputin has a vision and\n # denounces one of the men as a horse thief. Although his father initially\n # slaps him for making such an accusation, Rasputin watches as the man\n # is chased outside and beaten. Twenty years later, Rasputin sees a vision\n # of the Virgin Mary, prompting him to become a priest.\n # Rasputin quickly becomes famous, with people, even a bishop, begging for\n # his blessing. In the 1990s, the remains of Russian Tsar\n # Nicholas II and his family were discovered. The voice of young son,\n # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.\n\n _UpperCamelCase = model.generate(lowercase_ ,\t\t\t\tmax_length=200 ,\t\t\t\tdo_sample=lowercase_)\n self.assertListEqual(output_ids[0].numpy().tolist() ,\t\t\t\tlowercase_)\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":63,"string":"63"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":740,"cells":{"code":{"kind":"string","value":"\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\n\r\n\r\nlowercase_ \t\t= {\r\n \"google/tapas-base-finetuned-sqa\": (\r\n \"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json\"\r\n ),\r\n \"google/tapas-base-finetuned-wtq\": (\r\n \"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json\"\r\n ),\r\n \"google/tapas-base-finetuned-wikisql-supervised\": (\r\n \"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json\"\r\n ),\r\n \"google/tapas-base-finetuned-tabfact\": (\r\n \"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json\"\r\n ),\r\n}\r\n\r\n\r\nclass A\t\t\t\t\t\t(\t\t\t\t\t\t\t_UpperCAmelCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n lowerCamelCase\t\t\t=\t\t\t\t\t\t\t'tapas'\r\n\r\n\r\n def __init__( self : Optional[Any],lowercase_ : Union[str, Any]=3_0_5_2_2,lowercase_ : List[str]=7_6_8,lowercase_ : int=1_2,lowercase_ : Optional[Any]=1_2,lowercase_ : str=3_0_7_2,lowercase_ : Optional[Any]=\"gelu\",lowercase_ : Union[str, Any]=0.1,lowercase_ : str=0.1,lowercase_ : Optional[int]=1_0_2_4,lowercase_ : int=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0],lowercase_ : Any=0.02,lowercase_ : int=1E-12,lowercase_ : Union[str, Any]=0,lowercase_ : Dict=10.0,lowercase_ : Any=0,lowercase_ : Any=1.0,lowercase_ : Any=None,lowercase_ : Optional[Any]=1.0,lowercase_ : Tuple=False,lowercase_ : Any=None,lowercase_ : Optional[int]=1.0,lowercase_ : Dict=1.0,lowercase_ : Any=False,lowercase_ : Optional[int]=False,lowercase_ : Union[str, Any]=\"ratio\",lowercase_ : Tuple=None,lowercase_ : Tuple=None,lowercase_ : Tuple=6_4,lowercase_ : Optional[int]=3_2,lowercase_ : Dict=False,lowercase_ : List[Any]=True,lowercase_ : Optional[int]=False,lowercase_ : Optional[Any]=False,lowercase_ : List[Any]=True,lowercase_ : List[Any]=False,lowercase_ : int=None,lowercase_ : Optional[int]=None,**lowercase_ : int,)-> str:\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n super().__init__(pad_token_id=lowercase_,**lowercase_\t\t)\r\n\r\n # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)\r\n A__ = vocab_size\r\n A__ = hidden_size\r\n A__ = num_hidden_layers\r\n A__ = num_attention_heads\r\n A__ = hidden_act\r\n A__ = intermediate_size\r\n A__ = hidden_dropout_prob\r\n A__ = attention_probs_dropout_prob\r\n A__ = max_position_embeddings\r\n A__ = type_vocab_sizes\r\n A__ = initializer_range\r\n A__ = layer_norm_eps\r\n\r\n # Fine-tuning task hyperparameters\r\n A__ = positive_label_weight\r\n A__ = num_aggregation_labels\r\n A__ = aggregation_loss_weight\r\n A__ = use_answer_as_supervision\r\n A__ = answer_loss_importance\r\n A__ = use_normalized_answer_loss\r\n A__ = huber_loss_delta\r\n A__ = temperature\r\n A__ = aggregation_temperature\r\n A__ = use_gumbel_for_cells\r\n A__ = use_gumbel_for_aggregation\r\n A__ = average_approximation_function\r\n A__ = cell_selection_preference\r\n A__ = answer_loss_cutoff\r\n A__ = max_num_rows\r\n A__ = max_num_columns\r\n A__ = average_logits_per_cell\r\n A__ = select_one_column\r\n A__ = allow_empty_column_selection\r\n A__ = init_cell_selection_weights_to_zero\r\n A__ = reset_position_index_per_cell\r\n A__ = disable_per_token_loss\r\n\r\n # Aggregation hyperparameters\r\n A__ = aggregation_labels\r\n A__ = no_aggregation_label_index\r\n\r\n if isinstance(self.aggregation_labels,lowercase_\t\t):\r\n A__ = {int(lowercase_\t\t): v for k, v in aggregation_labels.items()}\r\n\r\n"},"code_codestyle":{"kind":"number","value":7,"string":"7"},"style_context":{"kind":"string","value":"\r\r\rimport gc\rimport inspect\rimport unittest\r\rimport torch\rfrom parameterized import parameterized\r\rfrom diffusers import PriorTransformer\rfrom diffusers.utils import floats_tensor, slow, torch_all_close, torch_device\rfrom diffusers.utils.testing_utils import enable_full_determinism\r\rfrom .test_modeling_common import ModelTesterMixin\r\r\renable_full_determinism()\r\r\r\r\r\rclass A ( A_\t\t\t\t\t,\t\t\t\tunittest.TestCase ):\r\t\t\t\t\t\t\tUpperCamelCase_ :\t\t\t\t\t\t\tAny =PriorTransformer\r\t\t\t\t\t\t\tUpperCamelCase_ :\t\t\t\t\t\t\tList[str] ='''hidden_states'''\r\r\r\t\t\t\t\t\t\t@property\r\t\t\t\t\t\t\tdef \t\t\t_A (self\t\t\t\t\t):\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\t4\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\t8\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\t7\r\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\tfloats_tensor((batch_size, embedding_dim)\t\t\t\t\t).to(lowerCAmelCase\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\tfloats_tensor((batch_size, embedding_dim)\t\t\t\t\t).to(lowerCAmelCase\t\t\t\t\t)\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\tfloats_tensor((batch_size, num_embeddings, embedding_dim)\t\t\t\t\t).to(lowerCAmelCase\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\t\treturn {\r\t\t\t\t\t\t\t\t\t \"hidden_states\": hidden_states,\r\t\t\t\t\t\t\t\t\t \"timestep\": 2,\r\t\t\t\t\t\t\t\t\t \"proj_embedding\": proj_embedding,\r\t\t\t\t\t\t\t\t\t \"encoder_hidden_states\": encoder_hidden_states,\r\t\t\t\t\t\t\t\t\t}\r\r\r\t\t\t\t\t\t\tdef \t\t\t_A (self ,\t\t\t\tlowerCAmelCase=0\t\t\t\t\t):\r\t\t\t\t\t\t\t\t\ttorch.manual_seed(lowerCAmelCase\t\t\t\t\t)\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\t4\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\t8\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\t7\r\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\ttorch.randn((batch_size, embedding_dim)\t\t\t\t\t).to(lowerCAmelCase\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\ttorch.randn((batch_size, embedding_dim)\t\t\t\t\t).to(lowerCAmelCase\t\t\t\t\t)\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\ttorch.randn((batch_size, num_embeddings, embedding_dim)\t\t\t\t\t).to(lowerCAmelCase\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\t\treturn {\r\t\t\t\t\t\t\t\t\t \"hidden_states\": hidden_states,\r\t\t\t\t\t\t\t\t\t \"timestep\": 2,\r\t\t\t\t\t\t\t\t\t \"proj_embedding\": proj_embedding,\r\t\t\t\t\t\t\t\t\t \"encoder_hidden_states\": encoder_hidden_states,\r\t\t\t\t\t\t\t\t\t}\r\r\r\t\t\t\t\t\t\t@property\r\t\t\t\t\t\t\tdef \t\t\t_A (self\t\t\t\t\t):\r\t\t\t\t\t\t\t\t\treturn (4, 8)\r\r\r\t\t\t\t\t\t\t@property\r\t\t\t\t\t\t\tdef \t\t\t_A (self\t\t\t\t\t):\r\t\t\t\t\t\t\t\t\treturn (4, 8)\r\r\r\t\t\t\t\t\t\tdef \t\t\t_A (self\t\t\t\t\t):\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\t{\r\t\t\t\t\t\t\t\t\t 'num_attention_heads': 2,\r\t\t\t\t\t\t\t\t\t 'attention_head_dim': 4,\r\t\t\t\t\t\t\t\t\t 'num_layers': 2,\r\t\t\t\t\t\t\t\t\t 'embedding_dim': 8,\r\t\t\t\t\t\t\t\t\t 'num_embeddings': 7,\r\t\t\t\t\t\t\t\t\t 'additional_embeddings': 4,\r\t\t\t\t\t\t\t\t\t}\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\tself.dummy_input\r\t\t\t\t\t\t\t\t\treturn init_dict, inputs_dict\r\r\r\t\t\t\t\t\t\tdef \t\t\t_A (self\t\t\t\t\t):\r\t\t\t\t\t\t\t\t\t__lowercase,\t\t\t\t\t\t\t__lowercase=\t\t\tPriorTransformer.from_pretrained(\r\t\t\t\t\t\t\t\t\t 'hf-internal-testing/prior-dummy' ,\t\t\t\toutput_loading_info=lowerCAmelCase\t\t\t\t\t)\r\t\t\t\t\t\t\t\t\tself.assertIsNotNone(lowerCAmelCase\t\t\t\t\t)\r\t\t\t\t\t\t\t\t\tself.assertEqual(len(loading_info['missing_keys']\t\t\t\t\t) ,\t\t\t\t0\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\t\tmodel.to(lowerCAmelCase\t\t\t\t\t)\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\tmodel(**self.dummy_input\t\t\t\t\t)[0]\r\r\t\t\t\t\t\t\t\t\tassert hidden_states is not None, \"Make sure output is not None\"\r\r\r\t\t\t\t\t\t\tdef \t\t\t_A (self\t\t\t\t\t):\r\t\t\t\t\t\t\t\t\t__lowercase,\t\t\t\t\t\t\t__lowercase=\t\t\tself.prepare_init_args_and_inputs_for_common()\r\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\tself.model_class(**lowerCAmelCase\t\t\t\t\t)\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\tinspect.signature(model.forward\t\t\t\t\t)\r\t\t\t\t\t\t\t\t\t# signature.parameters is an OrderedDict => so arg_names order is deterministic\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\t[*signature.parameters.keys()]\r\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\t['hidden_states', 'timestep']\r\t\t\t\t\t\t\t\t\tself.assertListEqual(arg_names[:2] ,\t\t\t\tlowerCAmelCase\t\t\t\t\t)\r\r\r\r\r\r\r\r\t\t\t\t\t\t\tdef \t\t\t_A (self\t\t\t\t\t):\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\tPriorTransformer.from_pretrained('hf-internal-testing/prior-dummy'\t\t\t\t\t)\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\tmodel.to(lowerCAmelCase\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\t\tif hasattr(lowerCAmelCase ,\t\t\t\t'set_default_attn_processor'\t\t\t\t\t):\r\t\t\t\t\t\t\t\t\t\t\tmodel.set_default_attn_processor()\r\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\tself.get_dummy_seed_input()\r\r\t\t\t\t\t\t\t\t\twith torch.no_grad():\r\t\t\t\t\t\t\t\t\t\t\t__lowercase=\t\t\tmodel(**lowerCAmelCase\t\t\t\t\t)[0]\r\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\toutput[0, :5].flatten().cpu()\r\t\t\t\t\t\t\t\t\tprint(lowerCAmelCase\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\t\t# Since the VAE Gaussian prior's generator is seeded on the appropriate device,\r\t\t\t\t\t\t\t\t\t# the expected output slices are not the same for CPU and GPU.\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\ttorch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39]\t\t\t\t\t)\r\t\t\t\t\t\t\t\t\tself.assertTrue(torch_all_close(lowerCAmelCase ,\t\t\t\tlowerCAmelCase ,\t\t\t\trtol=1E-2\t\t\t\t\t)\t\t\t\t\t)\r\r\r\r\r\r@slow\rclass A ( unittest.TestCase ):\r\r\r\t\t\t\t\t\t\tdef \t\t\t_A (self ,\t\t\t\tlowerCAmelCase=1 ,\t\t\t\tlowerCAmelCase=7_6_8 ,\t\t\t\tlowerCAmelCase=7_7 ,\t\t\t\tlowerCAmelCase=0\t\t\t\t\t):\r\t\t\t\t\t\t\t\t\ttorch.manual_seed(lowerCAmelCase\t\t\t\t\t)\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\tbatch_size\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\tembedding_dim\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\tnum_embeddings\r\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\ttorch.randn((batch_size, embedding_dim)\t\t\t\t\t).to(lowerCAmelCase\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\ttorch.randn((batch_size, embedding_dim)\t\t\t\t\t).to(lowerCAmelCase\t\t\t\t\t)\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\ttorch.randn((batch_size, num_embeddings, embedding_dim)\t\t\t\t\t).to(lowerCAmelCase\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\t\treturn {\r\t\t\t\t\t\t\t\t\t \"hidden_states\": hidden_states,\r\t\t\t\t\t\t\t\t\t \"timestep\": 2,\r\t\t\t\t\t\t\t\t\t \"proj_embedding\": proj_embedding,\r\t\t\t\t\t\t\t\t\t \"encoder_hidden_states\": encoder_hidden_states,\r\t\t\t\t\t\t\t\t\t}\r\r\r\t\t\t\t\t\t\tdef \t\t\t_A (self\t\t\t\t\t):\r\t\t\t\t\t\t\t\t\t# clean up the VRAM after each test\r\t\t\t\t\t\t\t\t\tsuper().tearDown()\r\t\t\t\t\t\t\t\t\tgc.collect()\r\t\t\t\t\t\t\t\t\ttorch.cuda.empty_cache()\r\r\r\r\r\r\r\r\t\t\t\t\t\t\t@parameterized.expand(\r\t\t\t\t\t\t\t [\r\t\t\t\t\t\t\t # fmt: off\r\t\t\t\t\t\t\t [1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],\r\t\t\t\t\t\t\t [3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],\r\t\t\t\t\t\t\t # fmt: on\r\t\t\t\t\t\t\t ]\t\t\t\t\t)\r\t\t\t\t\t\t\tdef \t\t\t_A (self ,\t\t\t\tlowerCAmelCase ,\t\t\t\tlowerCAmelCase\t\t\t\t\t):\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\tPriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' ,\t\t\t\tsubfolder='prior'\t\t\t\t\t)\r\t\t\t\t\t\t\t\t\tmodel.to(lowerCAmelCase\t\t\t\t\t)\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\tself.get_dummy_seed_input(seed=lowerCAmelCase\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\t\twith torch.no_grad():\r\t\t\t\t\t\t\t\t\t\t\t__lowercase=\t\t\tmodel(**lowerCAmelCase\t\t\t\t\t)[0]\r\r\t\t\t\t\t\t\t\t\tassert list(sample.shape\t\t\t\t\t) == [1, 7_6_8]\r\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\tsample[0, :8].flatten().cpu()\r\t\t\t\t\t\t\t\t\tprint(lowerCAmelCase\t\t\t\t\t)\r\t\t\t\t\t\t\t\t\t__lowercase=\t\t\ttorch.tensor(lowerCAmelCase\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\t\tassert torch_all_close(lowerCAmelCase ,\t\t\t\tlowerCAmelCase ,\t\t\t\tatol=1E-3\t\t\t\t\t)\r\r\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":295,"string":"295"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":741,"cells":{"code":{"kind":"string","value":"\r\rfrom typing import Dict, List, Optional\r\rfrom ...tokenization_utils import AddedToken, PreTrainedTokenizer\rfrom ...utils import logging\r\r\rlowerCAmelCase\t\t\t\t\t\t\t: List[str] =\t\t\t\t\t\tlogging.get_logger(__name__)\r\r\rlowerCAmelCase\t\t\t\t\t\t\t: Tuple =\t\t\t\t\t\t{\r \"\"\"nielsr/canine-s\"\"\": 2048,\r}\r\r# Unicode defines 1,114,112 total “codepoints”\rlowerCAmelCase\t\t\t\t\t\t\t: Any =\t\t\t\t\t\t1114112\r\r# Below: Constants defining canonical codepoints for special, pseudo-characters.\r# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py\rlowerCAmelCase\t\t\t\t\t\t\t: Dict =\t\t\t\t\t\t0\r\rlowerCAmelCase\t\t\t\t\t\t\t: Dict =\t\t\t\t\t\t0xe000\rlowerCAmelCase\t\t\t\t\t\t\t: str =\t\t\t\t\t\t0xe001\rlowerCAmelCase\t\t\t\t\t\t\t: str =\t\t\t\t\t\t0xe002\rlowerCAmelCase\t\t\t\t\t\t\t: Optional[int] =\t\t\t\t\t\t0xe003\rlowerCAmelCase\t\t\t\t\t\t\t: List[Any] =\t\t\t\t\t\t0xe004\r\r# Maps special codepoints to human-readable names.\rlowerCAmelCase\t\t\t\t\t\t\t: Dict[int, str] =\t\t\t\t\t\t{\r # Special symbols are represented using codepoints values that are valid,\r # but designated as \"Private Use\", meaning that they will never be assigned\r # characters by the Unicode Consortium, and are thus safe for use here.\r #\r # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly\r # excluded and should fail with a hard error.\r CLS: \"[CLS]\",\r SEP: \"[SEP]\",\r BOS: \"[BOS]\",\r MASK: \"[MASK]\",\r PAD: \"[PAD]\",\r RESERVED: \"[RESERVED]\",\r}\r\r# Maps special codepoint human-readable names to their codepoint values.\rlowerCAmelCase\t\t\t\t\t\t\t: Dict[str, int] =\t\t\t\t\t\t{name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}\r\r\r\rclass __lowercase (\t\t\tUpperCAmelCase_ ):\r\r\r\r\r\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r\t\t\t_UpperCAmelCase :\t\t\t\t\t\t\tList[str] \t\t\t\t\t\t\t=\t\t\t\t\tPRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\r\r\r\r\t\t\tdef __init__(\t\t\tself :\t\t\t\t\tOptional[Any] ,\t\t\t\t\t\tlowerCAmelCase__ :\t\t\t\t\tTuple=chr(lowerCAmelCase__) ,\t\t\t\t\t\tlowerCAmelCase__ :\t\t\t\t\tTuple=chr(lowerCAmelCase__) ,\t\t\t\t\t\tlowerCAmelCase__ :\t\t\t\t\tstr=chr(lowerCAmelCase__) ,\t\t\t\t\t\tlowerCAmelCase__ :\t\t\t\t\tList[Any]=chr(lowerCAmelCase__) ,\t\t\t\t\t\tlowerCAmelCase__ :\t\t\t\t\tOptional[int]=chr(lowerCAmelCase__) ,\t\t\t\t\t\tlowerCAmelCase__ :\t\t\t\t\tUnion[str, Any]=chr(lowerCAmelCase__) ,\t\t\t\t\t\tlowerCAmelCase__ :\t\t\t\t\tUnion[str, Any]=False ,\t\t\t\t\t\tlowerCAmelCase__ :\t\t\t\t\tint=2048 ,\t\t\t\t\t\t**lowerCAmelCase__ :\t\t\t\t\tTuple ,\t\t\t\t\t\t):\r\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_:\t\t\tUnion[str, Any] \t\t=\tAddedToken(lowerCAmelCase__ ,\t\t\t\t\t\tlstrip=lowerCAmelCase__ ,\t\t\t\t\t\trstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ ,\t\t\t\t\t\tlowerCAmelCase__) else bos_token\r\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_:\t\t\tList[Any] \t\t=\tAddedToken(lowerCAmelCase__ ,\t\t\t\t\t\tlstrip=lowerCAmelCase__ ,\t\t\t\t\t\trstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ ,\t\t\t\t\t\tlowerCAmelCase__) else eos_token\r\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_:\t\t\tList[Any] \t\t=\tAddedToken(lowerCAmelCase__ ,\t\t\t\t\t\tlstrip=lowerCAmelCase__ ,\t\t\t\t\t\trstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ ,\t\t\t\t\t\tlowerCAmelCase__) else sep_token\r\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_:\t\t\tAny \t\t=\tAddedToken(lowerCAmelCase__ ,\t\t\t\t\t\tlstrip=lowerCAmelCase__ ,\t\t\t\t\t\trstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ ,\t\t\t\t\t\tlowerCAmelCase__) else cls_token\r\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_:\t\t\tstr \t\t=\tAddedToken(lowerCAmelCase__ ,\t\t\t\t\t\tlstrip=lowerCAmelCase__ ,\t\t\t\t\t\trstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ ,\t\t\t\t\t\tlowerCAmelCase__) else pad_token\r\r\t\t\t\t\t\t\t\t\t# Mask token behave like a normal word, i.e. include the space before it\r\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_:\t\t\tDict \t\t=\tAddedToken(lowerCAmelCase__ ,\t\t\t\t\t\tlstrip=lowerCAmelCase__ ,\t\t\t\t\t\trstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ ,\t\t\t\t\t\tlowerCAmelCase__) else mask_token\r\r\t\t\t\t\t\t\t\t\tsuper().__init__(\r\t\t\t\t\t\t\t\t\t bos_token=lowerCAmelCase__ ,\t\t\t\t\t\teos_token=lowerCAmelCase__ ,\t\t\t\t\t\tsep_token=lowerCAmelCase__ ,\t\t\t\t\t\tcls_token=lowerCAmelCase__ ,\t\t\t\t\t\tpad_token=lowerCAmelCase__ ,\t\t\t\t\t\tmask_token=lowerCAmelCase__ ,\t\t\t\t\t\tadd_prefix_space=lowerCAmelCase__ ,\t\t\t\t\t\tmodel_max_length=lowerCAmelCase__ ,\t\t\t\t\t\t**lowerCAmelCase__ ,\t\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\t\t# Creates a mapping for looking up the IDs of special symbols.\r\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_:\t\t\tDict[str, int] \t\t=\t{}\r\t\t\t\t\t\t\t\t\tfor codepoint, name in SPECIAL_CODEPOINTS.items():\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_:\t\t\tList[Any] \t\t=\tcodepoint\r\r\t\t\t\t\t\t\t\t\t# Creates a mapping for looking up the string forms of special symbol IDs.\r\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_:\t\t\tDict[int, str] \t\t=\t{\r\t\t\t\t\t\t\t\t\t codepoint: name for name, codepoint in self._special_codepoints.items()\r\t\t\t\t\t\t\t\t\t}\r\r\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_:\t\t\tList[str] \t\t=\tUNICODE_VOCAB_SIZE\r\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_:\t\t\tTuple \t\t=\tlen(self._special_codepoints)\r\r\r\r\t\t\t@property\r\t\t\tdef \t_SCREAMING_SNAKE_CASE (\t\t\tself :\t\t\t\t\tOptional[int]):\r\t\t\t\t\t\t\t\t\treturn self._unicode_vocab_size\r\r\r\r\t\t\tdef \t_SCREAMING_SNAKE_CASE (\t\t\tself :\t\t\t\t\tDict ,\t\t\t\t\t\tlowerCAmelCase__ :\t\t\t\t\tstr):\r\t\t\t\t\t\t\t\t\treturn list(lowerCAmelCase__)\r\r\r\r\t\t\tdef \t_SCREAMING_SNAKE_CASE (\t\t\tself :\t\t\t\t\tList[str] ,\t\t\t\t\t\tlowerCAmelCase__ :\t\t\t\t\tstr):\r\t\t\t\t\t\t\t\t\ttry:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn ord(lowerCAmelCase__)\r\t\t\t\t\t\t\t\t\texcept TypeError:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(F\"invalid token: '{token}'\")\r\r\r\r\t\t\tdef \t_SCREAMING_SNAKE_CASE (\t\t\tself :\t\t\t\t\tOptional[int] ,\t\t\t\t\t\tlowerCAmelCase__ :\t\t\t\t\tint):\r\t\t\t\t\t\t\t\t\ttry:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif index in SPECIAL_CODEPOINTS:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn SPECIAL_CODEPOINTS[index]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn chr(lowerCAmelCase__)\r\t\t\t\t\t\t\t\t\texcept TypeError:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(F\"invalid id: {index}\")\r\r\r\r\t\t\tdef \t_SCREAMING_SNAKE_CASE (\t\t\tself :\t\t\t\t\tList[str] ,\t\t\t\t\t\tlowerCAmelCase__ :\t\t\t\t\tDict):\r\t\t\t\t\t\t\t\t\treturn \"\".join(lowerCAmelCase__)\r\r\r\r\t\t\tdef \t_SCREAMING_SNAKE_CASE (\t\t\tself :\t\t\t\t\tOptional[int] ,\t\t\t\t\t\tlowerCAmelCase__ :\t\t\t\t\tList[int] ,\t\t\t\t\t\tlowerCAmelCase__ :\t\t\t\t\tOptional[List[int]] = None):\r\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_:\t\t\tList[str] \t\t=\t[self.sep_token_id]\r\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_:\t\t\tTuple \t\t=\t[self.cls_token_id]\r\r\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_:\t\t\tint \t\t=\tcls + token_ids_a + sep\r\t\t\t\t\t\t\t\t\tif token_ids_a is not None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tresult += token_ids_a + sep\r\t\t\t\t\t\t\t\t\treturn result\r\r\r\r\t\t\tdef \t_SCREAMING_SNAKE_CASE (\t\t\tself :\t\t\t\t\tUnion[str, Any] ,\t\t\t\t\t\tlowerCAmelCase__ :\t\t\t\t\tList[int] ,\t\t\t\t\t\tlowerCAmelCase__ :\t\t\t\t\tOptional[List[int]] = None ,\t\t\t\t\t\tlowerCAmelCase__ :\t\t\t\t\tbool = False):\r\t\t\t\t\t\t\t\t\tif already_has_special_tokens:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn super().get_special_tokens_mask(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t token_ids_a=lowerCAmelCase__ ,\t\t\t\t\t\ttoken_ids_a=lowerCAmelCase__ ,\t\t\t\t\t\talready_has_special_tokens=lowerCAmelCase__)\r\r\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_:\t\t\tAny \t\t=\t[1] + ([0] * len(lowerCAmelCase__)) + [1]\r\t\t\t\t\t\t\t\t\tif token_ids_a is not None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tresult += ([0] * len(lowerCAmelCase__)) + [1]\r\t\t\t\t\t\t\t\t\treturn result\r\r\r\r\t\t\tdef \t_SCREAMING_SNAKE_CASE (\t\t\tself :\t\t\t\t\tDict ,\t\t\t\t\t\tlowerCAmelCase__ :\t\t\t\t\tList[int] ,\t\t\t\t\t\tlowerCAmelCase__ :\t\t\t\t\tOptional[List[int]] = None):\r\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_:\t\t\tDict \t\t=\t[self.sep_token_id]\r\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_:\t\t\tint \t\t=\t[self.cls_token_id]\r\r\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_:\t\t\tList[str] \t\t=\tlen(cls + token_ids_a + sep) * [0]\r\t\t\t\t\t\t\t\t\tif token_ids_a is not None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tresult += len(token_ids_a + sep) * [1]\r\t\t\t\t\t\t\t\t\treturn result\r\r\r\r\r\t\t\tdef \t_SCREAMING_SNAKE_CASE (\t\t\tself :\t\t\t\t\tstr ,\t\t\t\t\t\tlowerCAmelCase__ :\t\t\t\t\tstr ,\t\t\t\t\t\tlowerCAmelCase__ :\t\t\t\t\tOptional[str] = None):\r\t\t\t\t\t\t\t\t\treturn ()\r\r\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":127,"string":"127"},"style_context":{"kind":"string","value":"\r\rimport torch\r\rfrom diffusers import StableDiffusionPipeline\r\r\rlowerCAmelCase\t\t\t\t\t\t\t: Any =\t\t\t\t\t\t\"\"\"path-to-your-trained-model\"\"\"\rlowerCAmelCase\t\t\t\t\t\t\t: int =\t\t\t\t\t\tStableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to(\"\"\"cuda\"\"\")\r\rlowerCAmelCase\t\t\t\t\t\t\t: Union[str, Any] =\t\t\t\t\t\t\"\"\"A photo of sks dog in a bucket\"\"\"\rlowerCAmelCase\t\t\t\t\t\t\t: Any =\t\t\t\t\t\tpipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]\r\rimage.save(\"\"\"dog-bucket.png\"\"\")\r\r\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":127,"string":"127"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":742,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport time\r\nfrom contextlib import contextmanager\r\nfrom pathlib import Path\r\n\r\nimport pytest\r\nimport requests\r\nfrom huggingface_hub.hf_api import HfApi, HfFolder\r\n\r\n\r\n_a \t= '''__DUMMY_TRANSFORMERS_USER__'''\r\n_a \t= '''Dummy User'''\r\n_a \t= '''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'''\r\n\r\n_a \t= '''https://hub-ci.huggingface.co'''\r\n_a \t= CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''\r\n_a \t= CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''\r\n_a \t= Path('''~/.huggingface/hub_ci_token''').expanduser()\r\n\r\n\r\n@pytest.fixture\r\ndef __A ( __lowerCAmelCase\t)-> Optional[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\t\t\t\t\t\tmonkeypatch.setattr(\r\n\t\t\t\t\t\t 'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' ,\t\t__lowerCAmelCase\t)\r\n\r\n\r\n@pytest.fixture\r\ndef __A ( __lowerCAmelCase\t)-> Union[str, Any]:\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\t\t\t\t\t\tmonkeypatch.setattr('datasets.config.HF_ENDPOINT' ,\t\t__lowerCAmelCase\t)\r\n\t\t\t\t\t\tmonkeypatch.setattr('datasets.config.HUB_DATASETS_URL' ,\t\t__lowerCAmelCase\t)\r\n\r\n\r\n@pytest.fixture\r\ndef __A ( __lowerCAmelCase\t)-> Optional[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\t\t\t\t\t\tmonkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' ,\t\t__lowerCAmelCase\t)\r\n\r\n\r\n@pytest.fixture\r\ndef __A ( __lowerCAmelCase ,\t\t__lowerCAmelCase\t)-> Tuple:\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\t\t\t\t\t\tHfFolder.save_token(__lowerCAmelCase\t)\r\n\t\t\t\t\t\tyield\r\n\t\t\t\t\t\tHfFolder.delete_token()\r\n\r\n\r\n@pytest.fixture(scope='session'\t)\r\ndef __A ( )-> Tuple:\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\t\t\t\t\t\treturn HfApi(endpoint=__lowerCAmelCase\t)\r\n\r\n\r\n@pytest.fixture(scope='session'\t)\r\ndef __A ( __lowerCAmelCase\t)-> Union[str, Any]:\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\t\t\t\t\t\t_UpperCAmelCase\t\t\t\t\t\t\t\t\t=\t\t\tHfFolder.get_token()\r\n\t\t\t\t\t\tHfFolder.save_token(__lowerCAmelCase\t)\r\n\t\t\t\t\t\tyield CI_HUB_USER_TOKEN\r\n\t\t\t\t\t\tif previous_token is not None:\r\n\t\t\t\t\t\t\t\t\t\t\t\tHfFolder.save_token(__lowerCAmelCase\t)\r\n\r\n\r\n@pytest.fixture\r\ndef __A ( __lowerCAmelCase\t)-> Optional[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef _cleanup_repo(__lowerCAmelCase\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\thf_api.delete_repo(__lowerCAmelCase ,\t\ttoken=__lowerCAmelCase ,\t\trepo_type='dataset'\t)\r\n\r\n\t\t\t\t\t\treturn _cleanup_repo\r\n\r\n\r\n@pytest.fixture\r\ndef __A ( __lowerCAmelCase\t)-> Union[str, Any]:\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\t\t\t\t\t\t@contextmanager\r\n\t\t\t\t\t\tdef _temporary_repo(__lowerCAmelCase\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tyield repo_id\r\n\t\t\t\t\t\t\t\t\t\t\t\tfinally:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcleanup_repo(__lowerCAmelCase\t)\r\n\r\n\t\t\t\t\t\treturn _temporary_repo\r\n\r\n\r\n@pytest.fixture(scope='session'\t)\r\ndef __A ( __lowerCAmelCase ,\t\t__lowerCAmelCase ,\t\t__lowerCAmelCase\t)-> Dict:\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\t\t\t\t\t\t_UpperCAmelCase\t\t\t\t\t\t\t\t\t=\t\t\tF\"\"\"repo_txt_data-{int(time.time() * 10E3\t)}\"\"\"\r\n\t\t\t\t\t\t_UpperCAmelCase\t\t\t\t\t\t\t\t\t=\t\t\tF\"\"\"{CI_HUB_USER}/{repo_name}\"\"\"\r\n\t\t\t\t\t\thf_api.create_repo(__lowerCAmelCase ,\t\ttoken=__lowerCAmelCase ,\t\trepo_type='dataset' ,\t\tprivate=__lowerCAmelCase\t)\r\n\t\t\t\t\t\thf_api.upload_file(\r\n\t\t\t\t\t\t token=__lowerCAmelCase ,\t\tpath_or_fileobj=str(__lowerCAmelCase\t) ,\t\tpath_in_repo='data/text_data.txt' ,\t\trepo_id=__lowerCAmelCase ,\t\trepo_type='dataset' ,\t\t)\r\n\t\t\t\t\t\tyield repo_id\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\thf_api.delete_repo(__lowerCAmelCase ,\t\ttoken=__lowerCAmelCase ,\t\trepo_type='dataset'\t)\r\n\t\t\t\t\t\texcept (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error\r\n\t\t\t\t\t\t\t\t\t\t\t\tpass\r\n\r\n\r\n@pytest.fixture()\r\ndef __A ( __lowerCAmelCase ,\t\t__lowerCAmelCase ,\t\t__lowerCAmelCase\t)-> List[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\t\t\t\t\t\treturn hf_private_dataset_repo_txt_data_\r\n\r\n\r\n@pytest.fixture(scope='session'\t)\r\ndef __A ( __lowerCAmelCase ,\t\t__lowerCAmelCase ,\t\t__lowerCAmelCase\t)-> int:\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\t\t\t\t\t\t_UpperCAmelCase\t\t\t\t\t\t\t\t\t=\t\t\tF\"\"\"repo_zipped_txt_data-{int(time.time() * 10E3\t)}\"\"\"\r\n\t\t\t\t\t\t_UpperCAmelCase\t\t\t\t\t\t\t\t\t=\t\t\tF\"\"\"{CI_HUB_USER}/{repo_name}\"\"\"\r\n\t\t\t\t\t\thf_api.create_repo(__lowerCAmelCase ,\t\ttoken=__lowerCAmelCase ,\t\trepo_type='dataset' ,\t\tprivate=__lowerCAmelCase\t)\r\n\t\t\t\t\t\thf_api.upload_file(\r\n\t\t\t\t\t\t token=__lowerCAmelCase ,\t\tpath_or_fileobj=str(__lowerCAmelCase\t) ,\t\tpath_in_repo='data.zip' ,\t\trepo_id=__lowerCAmelCase ,\t\trepo_type='dataset' ,\t\t)\r\n\t\t\t\t\t\tyield repo_id\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\thf_api.delete_repo(__lowerCAmelCase ,\t\ttoken=__lowerCAmelCase ,\t\trepo_type='dataset'\t)\r\n\t\t\t\t\t\texcept (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error\r\n\t\t\t\t\t\t\t\t\t\t\t\tpass\r\n\r\n\r\n@pytest.fixture()\r\ndef __A ( __lowerCAmelCase ,\t\t__lowerCAmelCase ,\t\t__lowerCAmelCase\t)-> List[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\t\t\t\t\t\treturn hf_private_dataset_repo_zipped_txt_data_\r\n\r\n\r\n@pytest.fixture(scope='session'\t)\r\ndef __A ( __lowerCAmelCase ,\t\t__lowerCAmelCase ,\t\t__lowerCAmelCase\t)-> Dict:\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\t\t\t\t\t\t_UpperCAmelCase\t\t\t\t\t\t\t\t\t=\t\t\tF\"\"\"repo_zipped_img_data-{int(time.time() * 10E3\t)}\"\"\"\r\n\t\t\t\t\t\t_UpperCAmelCase\t\t\t\t\t\t\t\t\t=\t\t\tF\"\"\"{CI_HUB_USER}/{repo_name}\"\"\"\r\n\t\t\t\t\t\thf_api.create_repo(__lowerCAmelCase ,\t\ttoken=__lowerCAmelCase ,\t\trepo_type='dataset' ,\t\tprivate=__lowerCAmelCase\t)\r\n\t\t\t\t\t\thf_api.upload_file(\r\n\t\t\t\t\t\t token=__lowerCAmelCase ,\t\tpath_or_fileobj=str(__lowerCAmelCase\t) ,\t\tpath_in_repo='data.zip' ,\t\trepo_id=__lowerCAmelCase ,\t\trepo_type='dataset' ,\t\t)\r\n\t\t\t\t\t\tyield repo_id\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\thf_api.delete_repo(__lowerCAmelCase ,\t\ttoken=__lowerCAmelCase ,\t\trepo_type='dataset'\t)\r\n\t\t\t\t\t\texcept (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error\r\n\t\t\t\t\t\t\t\t\t\t\t\tpass\r\n\r\n\r\n@pytest.fixture()\r\ndef __A ( __lowerCAmelCase ,\t\t__lowerCAmelCase ,\t\t__lowerCAmelCase\t)-> int:\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\t\t\t\t\t\treturn hf_private_dataset_repo_zipped_img_data_\r\n"},"code_codestyle":{"kind":"number","value":39,"string":"39"},"style_context":{"kind":"string","value":"\n\n'''simple docstring'''\n\n\n\ndef a__ ( a__ , a__\t\t\t\t\t\t):\n\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n _enforce_args(a__ , a__\t\t\t\t\t\t)\n if n == 0:\n return 0\n __SCREAMING_SNAKE_CASE = float(\"\"\"-inf\"\"\"\t\t\t\t\t\t)\n for i in range(1 , n + 1\t\t\t\t\t\t):\n __SCREAMING_SNAKE_CASE = max(\n a__ , prices[i - 1] + naive_cut_rod_recursive(n - i , a__\t\t\t\t\t\t)\t\t\t\t\t\t)\n\n return max_revue\n\n\n\n\n\n\n\ndef a__ ( a__ , a__\t\t\t\t\t\t):\n\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n _enforce_args(a__ , a__\t\t\t\t\t\t)\n __SCREAMING_SNAKE_CASE = [float(\"\"\"-inf\"\"\"\t\t\t\t\t\t) for _ in range(n + 1\t\t\t\t\t\t)]\n return _top_down_cut_rod_recursive(a__ , a__ , a__\t\t\t\t\t\t)\n\n\n\n\n\n\n\ndef a__ ( a__ , a__ , a__\t\t\t\t\t\t):\n\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n if max_rev[n] >= 0:\n return max_rev[n]\n elif n == 0:\n return 0\n else:\n __SCREAMING_SNAKE_CASE = float(\"\"\"-inf\"\"\"\t\t\t\t\t\t)\n for i in range(1 , n + 1\t\t\t\t\t\t):\n __SCREAMING_SNAKE_CASE = max(\n a__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , a__ , a__\t\t\t\t\t\t) , )\n\n __SCREAMING_SNAKE_CASE = max_revenue\n\n return max_rev[n]\n\n\n\n\n\n\n\ndef a__ ( a__ , a__\t\t\t\t\t\t):\n\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n _enforce_args(a__ , a__\t\t\t\t\t\t)\n\n # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of\n # length 0.\n __SCREAMING_SNAKE_CASE = [float(\"\"\"-inf\"\"\"\t\t\t\t\t\t) for _ in range(n + 1\t\t\t\t\t\t)]\n __SCREAMING_SNAKE_CASE = 0\n\n for i in range(1 , n + 1\t\t\t\t\t\t):\n __SCREAMING_SNAKE_CASE = max_rev[i]\n for j in range(1 , i + 1\t\t\t\t\t\t):\n __SCREAMING_SNAKE_CASE = max(a__ , prices[j - 1] + max_rev[i - j]\t\t\t\t\t\t)\n\n __SCREAMING_SNAKE_CASE = max_revenue_i\n\n return max_rev[n]\n\n\n\n\n\n\n\ndef a__ ( a__ , a__\t\t\t\t\t\t):\n\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n if n < 0:\n __SCREAMING_SNAKE_CASE = F'n must be greater than or equal to 0. Got n = {n}'\n raise ValueError(a__\t\t\t\t\t\t)\n\n if n > len(a__\t\t\t\t\t\t):\n __SCREAMING_SNAKE_CASE = (\n \"\"\"Each integral piece of rod must have a corresponding price. \"\"\"\n F'Got n = {n} but length of prices = {len(a__\t\t\t\t\t\t)}'\n )\n raise ValueError(a__\t\t\t\t\t\t)\n\n\n\n\n\n\n\ndef a__ ( ):\n\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n __SCREAMING_SNAKE_CASE = [6, 10, 12, 15, 20, 23]\n __SCREAMING_SNAKE_CASE = len(a__\t\t\t\t\t\t)\n\n # the best revenue comes from cutting the rod into 6 pieces, each\n # of length 1 resulting in a revenue of 6 * 6 = 36.\n __SCREAMING_SNAKE_CASE = 36\n\n __SCREAMING_SNAKE_CASE = top_down_cut_rod(a__ , a__\t\t\t\t\t\t)\n __SCREAMING_SNAKE_CASE = bottom_up_cut_rod(a__ , a__\t\t\t\t\t\t)\n __SCREAMING_SNAKE_CASE = naive_cut_rod_recursive(a__ , a__\t\t\t\t\t\t)\n\n assert expected_max_revenue == max_rev_top_down\n assert max_rev_top_down == max_rev_bottom_up\n assert max_rev_bottom_up == max_rev_naive\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":267,"string":"267"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":743,"cells":{"code":{"kind":"string","value":"\n\n\n\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\nimport copy\n\nfrom ...configuration_utils import PretrainedConfig\nfrom ...utils import logging\nfrom ..auto import CONFIG_MAPPING\n\n\nUpperCAmelCase :\t\t\t\t\tOptional[Any] \t\t\t\t\t\t=\t\tlogging.get_logger(__name__)\n\nUpperCAmelCase :\t\t\t\t\tAny \t\t\t\t\t\t=\t\t{\n 'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',\n # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr\n}\n\n\n\n\n\nclass \t\t\t\t\tlowerCamelCase__ (\t\t\t\t\tA ):\n\n\n\n\n\n\n\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\t\t\t\t\t__a \t=\t\t\t\t\t\t\t\"\"\"deformable_detr\"\"\"\n\t\t\t\t\t__a \t=\t\t\t\t\t\t\t{\n\t\t\t\t\t \"\"\"hidden_size\"\"\": \"\"\"d_model\"\"\",\n\t\t\t\t\t \"\"\"num_attention_heads\"\"\": \"\"\"encoder_attention_heads\"\"\",\n\t\t\t\t\t}\n\n\n\n\n\n\n\t\t\t\t\tdef __init__(\tself :\t\t\tstr\t\t\t,\tUpperCamelCase :\t\t\tint=True\t\t\t,\tUpperCamelCase :\t\t\tUnion[str, Any]=None\t\t\t,\tUpperCamelCase :\t\t\tList[Any]=3\t\t\t,\tUpperCamelCase :\t\t\tint=300\t\t\t,\tUpperCamelCase :\t\t\tList[Any]=1_024\t\t\t,\tUpperCamelCase :\t\t\tTuple=6\t\t\t,\tUpperCamelCase :\t\t\tOptional[Any]=1_024\t\t\t,\tUpperCamelCase :\t\t\tstr=8\t\t\t,\tUpperCamelCase :\t\t\tTuple=6\t\t\t,\tUpperCamelCase :\t\t\tOptional[Any]=1_024\t\t\t,\tUpperCamelCase :\t\t\tint=8\t\t\t,\tUpperCamelCase :\t\t\tUnion[str, Any]=0.0\t\t\t,\tUpperCamelCase :\t\t\tAny=True\t\t\t,\tUpperCamelCase :\t\t\tint=\"relu\"\t\t\t,\tUpperCamelCase :\t\t\tTuple=256\t\t\t,\tUpperCamelCase :\t\t\tTuple=0.1\t\t\t,\tUpperCamelCase :\t\t\tUnion[str, Any]=0.0\t\t\t,\tUpperCamelCase :\t\t\tstr=0.0\t\t\t,\tUpperCamelCase :\t\t\tAny=0.02\t\t\t,\tUpperCamelCase :\t\t\tAny=1.0\t\t\t,\tUpperCamelCase :\t\t\tUnion[str, Any]=True\t\t\t,\tUpperCamelCase :\t\t\tDict=False\t\t\t,\tUpperCamelCase :\t\t\tTuple=\"sine\"\t\t\t,\tUpperCamelCase :\t\t\tUnion[str, Any]=\"resnet50\"\t\t\t,\tUpperCamelCase :\t\t\tOptional[Any]=True\t\t\t,\tUpperCamelCase :\t\t\tOptional[int]=False\t\t\t,\tUpperCamelCase :\t\t\tint=4\t\t\t,\tUpperCamelCase :\t\t\tUnion[str, Any]=4\t\t\t,\tUpperCamelCase :\t\t\tint=4\t\t\t,\tUpperCamelCase :\t\t\tUnion[str, Any]=False\t\t\t,\tUpperCamelCase :\t\t\tUnion[str, Any]=300\t\t\t,\tUpperCamelCase :\t\t\tDict=False\t\t\t,\tUpperCamelCase :\t\t\tint=1\t\t\t,\tUpperCamelCase :\t\t\tOptional[Any]=5\t\t\t,\tUpperCamelCase :\t\t\tOptional[Any]=2\t\t\t,\tUpperCamelCase :\t\t\tDict=1\t\t\t,\tUpperCamelCase :\t\t\tAny=1\t\t\t,\tUpperCamelCase :\t\t\tOptional[int]=5\t\t\t,\tUpperCamelCase :\t\t\tOptional[Any]=2\t\t\t,\tUpperCamelCase :\t\t\tList[Any]=0.1\t\t\t,\tUpperCamelCase :\t\t\tstr=0.25\t\t\t,\tUpperCamelCase :\t\t\tint=False\t\t\t,\t**UpperCamelCase :\t\t\tint\t\t\t,\t):\n\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\t\t\t\tif backbone_config is not None and use_timm_backbone:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"You can't specify both `backbone_config` and `use_timm_backbone`.\"\"\" )\n\n\t\t\t\t\t\t\t\t\t\t\t\tif not use_timm_backbone:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif backbone_config is None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.info(\"\"\"`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.\"\"\" )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t\t\t\t\t= CONFIG_MAPPING[\"\"\"resnet\"\"\"](out_features=[\"\"\"stage4\"\"\"] )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif isinstance(UpperCamelCase\t\t\t,\tUpperCamelCase ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t= backbone_config.get(\"\"\"model_type\"\"\" )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tTuple\t\t\t\t\t\t\t\t\t\t\t= CONFIG_MAPPING[backbone_model_type]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tstr\t\t\t\t\t\t\t\t\t\t\t= config_class.from_dict(UpperCamelCase )\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tint\t\t\t\t\t\t\t\t\t\t\t= use_timm_backbone\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tDict\t\t\t\t\t\t\t\t\t\t\t= backbone_config\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t= num_channels\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tint\t\t\t\t\t\t\t\t\t\t\t= num_queries\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tAny\t\t\t\t\t\t\t\t\t\t\t= max_position_embeddings\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tTuple\t\t\t\t\t\t\t\t\t\t\t= d_model\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tAny\t\t\t\t\t\t\t\t\t\t\t= encoder_ffn_dim\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t\t\t\t\t= encoder_layers\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t= encoder_attention_heads\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tTuple\t\t\t\t\t\t\t\t\t\t\t= decoder_ffn_dim\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t\t\t\t\t= decoder_layers\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t= decoder_attention_heads\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t\t\t\t\t= dropout\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t= attention_dropout\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tstr\t\t\t\t\t\t\t\t\t\t\t= activation_dropout\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tint\t\t\t\t\t\t\t\t\t\t\t= activation_function\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t= init_std\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tAny\t\t\t\t\t\t\t\t\t\t\t= init_xavier_std\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t= encoder_layerdrop\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tTuple\t\t\t\t\t\t\t\t\t\t\t= auxiliary_loss\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t= position_embedding_type\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t\t\t\t\t= backbone\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tint\t\t\t\t\t\t\t\t\t\t\t= use_pretrained_backbone\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tstr\t\t\t\t\t\t\t\t\t\t\t= dilation\n\t\t\t\t\t\t\t\t\t\t\t\t# deformable attributes\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tDict\t\t\t\t\t\t\t\t\t\t\t= num_feature_levels\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tint\t\t\t\t\t\t\t\t\t\t\t= encoder_n_points\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tAny\t\t\t\t\t\t\t\t\t\t\t= decoder_n_points\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tDict\t\t\t\t\t\t\t\t\t\t\t= two_stage\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t= two_stage_num_proposals\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t= with_box_refine\n\t\t\t\t\t\t\t\t\t\t\t\tif two_stage is True and with_box_refine is False:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"If two_stage is True, with_box_refine must be True.\"\"\" )\n\t\t\t\t\t\t\t\t\t\t\t\t# Hungarian matcher\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tAny\t\t\t\t\t\t\t\t\t\t\t= class_cost\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tstr\t\t\t\t\t\t\t\t\t\t\t= bbox_cost\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tstr\t\t\t\t\t\t\t\t\t\t\t= giou_cost\n\t\t\t\t\t\t\t\t\t\t\t\t# Loss coefficients\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tstr\t\t\t\t\t\t\t\t\t\t\t= mask_loss_coefficient\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t\t\t\t\t= dice_loss_coefficient\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t\t\t\t\t= bbox_loss_coefficient\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tDict\t\t\t\t\t\t\t\t\t\t\t= giou_loss_coefficient\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tstr\t\t\t\t\t\t\t\t\t\t\t= eos_coefficient\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t= focal_alpha\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t= disable_custom_kernels\n\t\t\t\t\t\t\t\t\t\t\t\tsuper().__init__(is_encoder_decoder=UpperCamelCase\t\t\t,\t**UpperCamelCase )\n\n\n\n\n\n\n\t\t\t\t\t@property\n\t\t\t\t\tdef lowerCamelCase__\t\t\t\t\t\t\t(\tself :\t\t\tDict ):\n\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\t\t\t\treturn self.encoder_attention_heads\n\n\n\n\n\n\n\t\t\t\t\t@property\n\t\t\t\t\tdef lowerCamelCase__\t\t\t\t\t\t\t(\tself :\t\t\tList[Any] ):\n\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\t\t\t\treturn self.d_model\n\n\n\n\n\n\n\t\t\t\t\tdef lowerCamelCase__\t\t\t\t\t\t\t(\tself :\t\t\tint ):\n\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t\t\t\t\t= copy.deepcopy(self.__dict__ )\n\t\t\t\t\t\t\t\t\t\t\t\tif self.backbone_config is not None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t= self.backbone_config.to_dict()\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t\t\t\t\t= self.__class__.model_type\n\t\t\t\t\t\t\t\t\t\t\t\treturn output\n"},"code_codestyle":{"kind":"number","value":320,"string":"320"},"style_context":{"kind":"string","value":"\n\n\n\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\nfrom typing import Dict, List, Optional, Union\n\nimport numpy as np\n\nfrom ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict\nfrom ...image_transforms import (\n center_crop,\n convert_to_rgb,\n get_resize_output_image_size,\n normalize,\n rescale,\n resize,\n to_channel_dimension_format,\n)\nfrom ...image_utils import (\n OPENAI_CLIP_MEAN,\n OPENAI_CLIP_STD,\n ChannelDimension,\n ImageInput,\n PILImageResampling,\n make_list_of_images,\n to_numpy_array,\n valid_images,\n)\nfrom ...utils import TensorType, is_vision_available, logging\n\n\nUpperCAmelCase :\t\t\t\t\tOptional[int] \t\t\t\t\t\t=\t\tlogging.get_logger(__name__)\n\n\nif is_vision_available():\n\t\t\t\t\t\t\timport PIL\n\n\n\n\n\nclass \t\t\t\t\tlowerCamelCase__ (\t\t\t\t\tA ):\n\n\n\n\n\n\n\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\t\t\t\t\t__a \t=\t\t\t\t\t\t\t[\"\"\"pixel_values\"\"\"]\n\n\n\n\n\n\n\t\t\t\t\tdef __init__(\tself :\t\t\tTuple\t\t\t,\tUpperCamelCase :\t\t\tbool = True\t\t\t,\tUpperCamelCase :\t\t\tDict[str, int] = None\t\t\t,\tUpperCamelCase :\t\t\tPILImageResampling = PILImageResampling.BICUBIC\t\t\t,\tUpperCamelCase :\t\t\tbool = True\t\t\t,\tUpperCamelCase :\t\t\tDict[str, int] = None\t\t\t,\tUpperCamelCase :\t\t\tbool = True\t\t\t,\tUpperCamelCase :\t\t\tUnion[int, float] = 1 / 255\t\t\t,\tUpperCamelCase :\t\t\tbool = True\t\t\t,\tUpperCamelCase :\t\t\tOptional[Union[float, List[float]]] = None\t\t\t,\tUpperCamelCase :\t\t\tOptional[Union[float, List[float]]] = None\t\t\t,\tUpperCamelCase :\t\t\tbool = True\t\t\t,\t**UpperCamelCase :\t\t\tstr\t\t\t,\t):\n\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\t\t\t\tsuper().__init__(**UpperCamelCase )\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t= size if size is not None else {\"\"\"shortest_edge\"\"\": 224}\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tstr\t\t\t\t\t\t\t\t\t\t\t= get_size_dict(UpperCamelCase\t\t\t,\tdefault_to_square=UpperCamelCase )\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tstr\t\t\t\t\t\t\t\t\t\t\t= crop_size if crop_size is not None else {\"\"\"height\"\"\": 224, \"\"\"width\"\"\": 224}\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tstr\t\t\t\t\t\t\t\t\t\t\t= get_size_dict(UpperCamelCase\t\t\t,\tdefault_to_square=UpperCamelCase\t\t\t,\tparam_name=\"\"\"crop_size\"\"\" )\n\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tint\t\t\t\t\t\t\t\t\t\t\t= do_resize\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tTuple\t\t\t\t\t\t\t\t\t\t\t= size\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t= resample\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tAny\t\t\t\t\t\t\t\t\t\t\t= do_center_crop\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tint\t\t\t\t\t\t\t\t\t\t\t= crop_size\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t\t\t\t\t= do_rescale\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t\t\t\t\t= rescale_factor\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tTuple\t\t\t\t\t\t\t\t\t\t\t= do_normalize\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tAny\t\t\t\t\t\t\t\t\t\t\t= image_mean if image_mean is not None else OPENAI_CLIP_MEAN\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t\t\t\t\t= image_std if image_std is not None else OPENAI_CLIP_STD\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t\t\t\t\t= do_convert_rgb\n\n\n\n\n\n\n\t\t\t\t\tdef lowerCamelCase__\t\t\t\t\t\t\t(\tself :\t\t\tList[Any]\t\t\t,\tUpperCamelCase :\t\t\tnp.ndarray\t\t\t,\tUpperCamelCase :\t\t\tDict[str, int]\t\t\t,\tUpperCamelCase :\t\t\tPILImageResampling = PILImageResampling.BICUBIC\t\t\t,\tUpperCamelCase :\t\t\tOptional[Union[str, ChannelDimension]] = None\t\t\t,\t**UpperCamelCase :\t\t\tList[Any]\t\t\t,\t):\n\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tDict\t\t\t\t\t\t\t\t\t\t\t= get_size_dict(UpperCamelCase\t\t\t,\tdefault_to_square=UpperCamelCase )\n\t\t\t\t\t\t\t\t\t\t\t\tif \"shortest_edge\" not in size:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tint\t\t\t\t\t\t\t\t\t\t\t= get_resize_output_image_size(UpperCamelCase\t\t\t,\tsize=size[\"\"\"shortest_edge\"\"\"]\t\t\t,\tdefault_to_square=UpperCamelCase )\n\t\t\t\t\t\t\t\t\t\t\t\treturn resize(UpperCamelCase\t\t\t,\tsize=UpperCamelCase\t\t\t,\tresample=UpperCamelCase\t\t\t,\tdata_format=UpperCamelCase\t\t\t,\t**UpperCamelCase )\n\n\n\n\n\n\n\t\t\t\t\tdef lowerCamelCase__\t\t\t\t\t\t\t(\tself :\t\t\tList[Any]\t\t\t,\tUpperCamelCase :\t\t\tnp.ndarray\t\t\t,\tUpperCamelCase :\t\t\tDict[str, int]\t\t\t,\tUpperCamelCase :\t\t\tOptional[Union[str, ChannelDimension]] = None\t\t\t,\t**UpperCamelCase :\t\t\tDict\t\t\t,\t):\n\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t\t\t\t\t= get_size_dict(UpperCamelCase )\n\t\t\t\t\t\t\t\t\t\t\t\tif \"height\" not in size or \"width\" not in size:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )\n\t\t\t\t\t\t\t\t\t\t\t\treturn center_crop(UpperCamelCase\t\t\t,\tsize=(size[\"\"\"height\"\"\"], size[\"\"\"width\"\"\"])\t\t\t,\tdata_format=UpperCamelCase\t\t\t,\t**UpperCamelCase )\n\n\n\n\n\n\n\t\t\t\t\tdef lowerCamelCase__\t\t\t\t\t\t\t(\tself :\t\t\tAny\t\t\t,\tUpperCamelCase :\t\t\tnp.ndarray\t\t\t,\tUpperCamelCase :\t\t\tUnion[int, float]\t\t\t,\tUpperCamelCase :\t\t\tOptional[Union[str, ChannelDimension]] = None\t\t\t,\t**UpperCamelCase :\t\t\tAny\t\t\t,\t):\n\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\t\t\t\treturn rescale(UpperCamelCase\t\t\t,\tscale=UpperCamelCase\t\t\t,\tdata_format=UpperCamelCase\t\t\t,\t**UpperCamelCase )\n\n\n\n\n\n\n\t\t\t\t\tdef lowerCamelCase__\t\t\t\t\t\t\t(\tself :\t\t\tAny\t\t\t,\tUpperCamelCase :\t\t\tnp.ndarray\t\t\t,\tUpperCamelCase :\t\t\tUnion[float, List[float]]\t\t\t,\tUpperCamelCase :\t\t\tUnion[float, List[float]]\t\t\t,\tUpperCamelCase :\t\t\tOptional[Union[str, ChannelDimension]] = None\t\t\t,\t**UpperCamelCase :\t\t\tAny\t\t\t,\t):\n\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\t\t\t\treturn normalize(UpperCamelCase\t\t\t,\tmean=UpperCamelCase\t\t\t,\tstd=UpperCamelCase\t\t\t,\tdata_format=UpperCamelCase\t\t\t,\t**UpperCamelCase )\n\n\n\n\n\n\n\t\t\t\t\tdef lowerCamelCase__\t\t\t\t\t\t\t(\tself :\t\t\tTuple\t\t\t,\tUpperCamelCase :\t\t\tImageInput\t\t\t,\tUpperCamelCase :\t\t\tbool = None\t\t\t,\tUpperCamelCase :\t\t\tDict[str, int] = None\t\t\t,\tUpperCamelCase :\t\t\tPILImageResampling = None\t\t\t,\tUpperCamelCase :\t\t\tbool = None\t\t\t,\tUpperCamelCase :\t\t\tint = None\t\t\t,\tUpperCamelCase :\t\t\tbool = None\t\t\t,\tUpperCamelCase :\t\t\tfloat = None\t\t\t,\tUpperCamelCase :\t\t\tbool = None\t\t\t,\tUpperCamelCase :\t\t\tOptional[Union[float, List[float]]] = None\t\t\t,\tUpperCamelCase :\t\t\tOptional[Union[float, List[float]]] = None\t\t\t,\tUpperCamelCase :\t\t\tbool = None\t\t\t,\tUpperCamelCase :\t\t\tOptional[Union[str, TensorType]] = None\t\t\t,\tUpperCamelCase :\t\t\tOptional[ChannelDimension] = ChannelDimension.FIRST\t\t\t,\t**UpperCamelCase :\t\t\tAny\t\t\t,\t):\n\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tstr\t\t\t\t\t\t\t\t\t\t\t= do_resize if do_resize is not None else self.do_resize\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tDict\t\t\t\t\t\t\t\t\t\t\t= size if size is not None else self.size\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t= get_size_dict(UpperCamelCase\t\t\t,\tparam_name=\"\"\"size\"\"\"\t\t\t,\tdefault_to_square=UpperCamelCase )\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tDict\t\t\t\t\t\t\t\t\t\t\t= resample if resample is not None else self.resample\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t\t\t\t\t= do_center_crop if do_center_crop is not None else self.do_center_crop\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tstr\t\t\t\t\t\t\t\t\t\t\t= crop_size if crop_size is not None else self.crop_size\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tDict\t\t\t\t\t\t\t\t\t\t\t= get_size_dict(UpperCamelCase\t\t\t,\tparam_name=\"\"\"crop_size\"\"\"\t\t\t,\tdefault_to_square=UpperCamelCase )\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t= do_rescale if do_rescale is not None else self.do_rescale\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tTuple\t\t\t\t\t\t\t\t\t\t\t= rescale_factor if rescale_factor is not None else self.rescale_factor\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t= do_normalize if do_normalize is not None else self.do_normalize\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tAny\t\t\t\t\t\t\t\t\t\t\t= image_mean if image_mean is not None else self.image_mean\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tAny\t\t\t\t\t\t\t\t\t\t\t= image_std if image_std is not None else self.image_std\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t= do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb\n\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t= make_list_of_images(UpperCamelCase )\n\n\t\t\t\t\t\t\t\t\t\t\t\tif not valid_images(UpperCamelCase ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, \"\"\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"torch.Tensor, tf.Tensor or jax.ndarray.\"\"\" )\n\n\t\t\t\t\t\t\t\t\t\t\t\tif do_resize and size is None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"Size must be specified if do_resize is True.\"\"\" )\n\n\t\t\t\t\t\t\t\t\t\t\t\tif do_center_crop and crop_size is None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"Crop size must be specified if do_center_crop is True.\"\"\" )\n\n\t\t\t\t\t\t\t\t\t\t\t\tif do_rescale and rescale_factor is None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"Rescale factor must be specified if do_rescale is True.\"\"\" )\n\n\t\t\t\t\t\t\t\t\t\t\t\tif do_normalize and (image_mean is None or image_std is None):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"Image mean and std must be specified if do_normalize is True.\"\"\" )\n\n\t\t\t\t\t\t\t\t\t\t\t\t# PIL RGBA images are converted to RGB\n\t\t\t\t\t\t\t\t\t\t\t\tif do_convert_rgb:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tint\t\t\t\t\t\t\t\t\t\t\t= [convert_to_rgb(UpperCamelCase ) for image in images]\n\n\t\t\t\t\t\t\t\t\t\t\t\t# All transformations expect numpy arrays.\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tTuple\t\t\t\t\t\t\t\t\t\t\t= [to_numpy_array(UpperCamelCase ) for image in images]\n\n\t\t\t\t\t\t\t\t\t\t\t\tif do_resize:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t\t\t\t\t= [self.resize(image=UpperCamelCase\t\t\t,\tsize=UpperCamelCase\t\t\t,\tresample=UpperCamelCase ) for image in images]\n\n\t\t\t\t\t\t\t\t\t\t\t\tif do_center_crop:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tint\t\t\t\t\t\t\t\t\t\t\t= [self.center_crop(image=UpperCamelCase\t\t\t,\tsize=UpperCamelCase ) for image in images]\n\n\t\t\t\t\t\t\t\t\t\t\t\tif do_rescale:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tDict\t\t\t\t\t\t\t\t\t\t\t= [self.rescale(image=UpperCamelCase\t\t\t,\tscale=UpperCamelCase ) for image in images]\n\n\t\t\t\t\t\t\t\t\t\t\t\tif do_normalize:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t= [self.normalize(image=UpperCamelCase\t\t\t,\tmean=UpperCamelCase\t\t\t,\tstd=UpperCamelCase ) for image in images]\n\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tAny\t\t\t\t\t\t\t\t\t\t\t= [to_channel_dimension_format(UpperCamelCase\t\t\t,\tUpperCamelCase ) for image in images]\n\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\t\t\t\t\tAny\t\t\t\t\t\t\t\t\t\t\t= {\"\"\"pixel_values\"\"\": images}\n\t\t\t\t\t\t\t\t\t\t\t\treturn BatchFeature(data=UpperCamelCase\t\t\t,\ttensor_type=UpperCamelCase )\n"},"style_context_codestyle":{"kind":"number","value":320,"string":"320"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":744,"cells":{"code":{"kind":"string","value":"\n\n\n\n\"\"\"simple docstring\"\"\"\nfrom typing import TYPE_CHECKING\n\nfrom ...utils import (\n OptionalDependencyNotAvailable,\n _LazyModule,\n is_torch_available,\n)\n\n\n__a =\t\t\t\t{\n \"configuration_falcon\": [\"FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP\", \"FalconConfig\"],\n}\n\ntry:\n\t\t\t\t\t\tif not is_torch_available():\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n\t\t\t\t\t\tpass\nelse:\n\t\t\t\t\t\t__a =\t\t\t\t[\n\t\t\t\t\t\t \"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST\",\n\t\t\t\t\t\t \"FalconForCausalLM\",\n\t\t\t\t\t\t \"FalconModel\",\n\t\t\t\t\t\t \"FalconPreTrainedModel\",\n\t\t\t\t\t\t \"FalconForSequenceClassification\",\n\t\t\t\t\t\t \"FalconForTokenClassification\",\n\t\t\t\t\t\t \"FalconForQuestionAnswering\",\n\t\t\t\t\t\t]\n\n\nif TYPE_CHECKING:\n\t\t\t\t\t\tfrom .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig\n\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\t\tif not is_torch_available():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\n\t\t\t\t\t\texcept OptionalDependencyNotAvailable:\n\t\t\t\t\t\t\t\t\t\t\t\tpass\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\tfrom .modeling_falcon import (\n\t\t\t\t\t\t\t\t\t\t\t\t FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,\n\t\t\t\t\t\t\t\t\t\t\t\t FalconForCausalLM,\n\t\t\t\t\t\t\t\t\t\t\t\t FalconForQuestionAnswering,\n\t\t\t\t\t\t\t\t\t\t\t\t FalconForSequenceClassification,\n\t\t\t\t\t\t\t\t\t\t\t\t FalconForTokenClassification,\n\t\t\t\t\t\t\t\t\t\t\t\t FalconModel,\n\t\t\t\t\t\t\t\t\t\t\t\t FalconPreTrainedModel,\n\t\t\t\t\t\t\t\t\t\t\t\t)\n\n\nelse:\n\t\t\t\t\t\timport sys\n\n\t\t\t\t\t\t__a =\t\t\t\t_LazyModule(__name__, globals()[\"__file__\"], _import_structure, module_spec=__spec__)\n"},"code_codestyle":{"kind":"number","value":66,"string":"66"},"style_context":{"kind":"string","value":"\n\n\n\n'''simple docstring'''\n\n\n\n\nimport unittest\n\nimport numpy as np\n\nfrom transformers import DistilBertConfig, is_flax_available\nfrom transformers.testing_utils import require_flax, slow\n\nfrom ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask\n\n\nif is_flax_available():\n\t\t\t\t\t\timport jax.numpy as jnp\n\n\t\t\t\t\t\tfrom transformers.models.distilbert.modeling_flax_distilbert import (\n\t\t\t\t\t\t FlaxDistilBertForMaskedLM,\n\t\t\t\t\t\t FlaxDistilBertForMultipleChoice,\n\t\t\t\t\t\t FlaxDistilBertForQuestionAnswering,\n\t\t\t\t\t\t FlaxDistilBertForSequenceClassification,\n\t\t\t\t\t\t FlaxDistilBertForTokenClassification,\n\t\t\t\t\t\t FlaxDistilBertModel,\n\t\t\t\t\t\t)\n\n\n\n\n\n\n\nclass \t_lowerCAmelCase\t\t\t\t\t\t\t(\t\t\tunittest.TestCase ):\n\n\n\tdef __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase=\"gelu\" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=4 , ):\n\t\t\t\tA_\t:\t\t\t\t\t\tList[Any]\t= parent\n\t\t\t\tA_\t:\t\t\t\t\t\tstr\t= batch_size\n\t\t\t\tA_\t:\t\t\t\t\t\tList[Any]\t= seq_length\n\t\t\t\tA_\t:\t\t\t\t\t\tDict\t= is_training\n\t\t\t\tA_\t:\t\t\t\t\t\tList[Any]\t= use_attention_mask\n\t\t\t\tA_\t:\t\t\t\t\t\tAny\t= use_token_type_ids\n\t\t\t\tA_\t:\t\t\t\t\t\tOptional[int]\t= use_labels\n\t\t\t\tA_\t:\t\t\t\t\t\tTuple\t= vocab_size\n\t\t\t\tA_\t:\t\t\t\t\t\tList[str]\t= hidden_size\n\t\t\t\tA_\t:\t\t\t\t\t\tList[str]\t= num_hidden_layers\n\t\t\t\tA_\t:\t\t\t\t\t\tOptional[Any]\t= num_attention_heads\n\t\t\t\tA_\t:\t\t\t\t\t\tint\t= intermediate_size\n\t\t\t\tA_\t:\t\t\t\t\t\tOptional[Any]\t= hidden_act\n\t\t\t\tA_\t:\t\t\t\t\t\tList[Any]\t= hidden_dropout_prob\n\t\t\t\tA_\t:\t\t\t\t\t\tOptional[Any]\t= attention_probs_dropout_prob\n\t\t\t\tA_\t:\t\t\t\t\t\tAny\t= max_position_embeddings\n\t\t\t\tA_\t:\t\t\t\t\t\tUnion[str, Any]\t= type_vocab_size\n\t\t\t\tA_\t:\t\t\t\t\t\tint\t= type_sequence_label_size\n\t\t\t\tA_\t:\t\t\t\t\t\tAny\t= initializer_range\n\t\t\t\tA_\t:\t\t\t\t\t\tList[str]\t= num_choices\n\n\n\tdef _a\t(self\t\t\t\t\t\t\t):\n\t\t\t\tA_\t:\t\t\t\t\t\tList[str]\t= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size\t\t\t\t\t\t\t)\n\n\t\t\t\tA_\t:\t\t\t\t\t\tAny\t= None\n\t\t\t\tif self.use_attention_mask:\n\t\t\t\t\t\t\tA_\t:\t\t\t\t\t\tAny\t= random_attention_mask([self.batch_size, self.seq_length]\t\t\t\t\t\t\t)\n\n\t\t\t\tA_\t:\t\t\t\t\t\tUnion[str, Any]\t= DistilBertConfig(\n\t\t\t\t vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowercase , )\n\n\t\t\t\treturn config, input_ids, attention_mask\n\n\n\tdef _a\t(self\t\t\t\t\t\t\t):\n\t\t\t\tA_\t:\t\t\t\t\t\tList[str]\t= self.prepare_config_and_inputs()\n\t\t\t\tA_, A_, A_\t:\t\t\t\t\t\tstr\t= config_and_inputs\n\t\t\t\tA_\t:\t\t\t\t\t\tAny\t= {\"\"\"input_ids\"\"\": input_ids, \"\"\"attention_mask\"\"\": attention_mask}\n\t\t\t\treturn config, inputs_dict\n\n\n\n\n\n\n\n@require_flax\nclass \t_lowerCAmelCase\t\t\t\t\t\t\t(\t\t\t__UpperCAmelCase ,\t\t\tunittest.TestCase ):\n\t__SCREAMING_SNAKE_CASE :\t\tOptional[Any]\t\t\t\t\t=\t\t\t\t\t(\n\t (\n\t FlaxDistilBertModel,\n\t FlaxDistilBertForMaskedLM,\n\t FlaxDistilBertForMultipleChoice,\n\t FlaxDistilBertForQuestionAnswering,\n\t FlaxDistilBertForSequenceClassification,\n\t FlaxDistilBertForTokenClassification,\n\t FlaxDistilBertForQuestionAnswering,\n\t )\n\t if is_flax_available()\n\t else ()\n\t)\n\n\n\tdef _a\t(self\t\t\t\t\t\t\t):\n\t\t\t\tA_\t:\t\t\t\t\t\tTuple\t= FlaxDistilBertModelTester(self\t\t\t\t\t\t\t)\n\n\n\t@slow\n\tdef _a\t(self\t\t\t\t\t\t\t):\n\t\t\t\tfor model_class_name in self.all_model_classes:\n\t\t\t\t\t\t\tA_\t:\t\t\t\t\t\tUnion[str, Any]\t= model_class_name.from_pretrained(\"\"\"distilbert-base-uncased\"\"\"\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\tA_\t:\t\t\t\t\t\tAny\t= model(np.ones((1, 1)\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\tself.assertIsNotNone(lowercase\t\t\t\t\t\t\t)\n\n\n\n\n\n\n\n@require_flax\nclass \t_lowerCAmelCase\t\t\t\t\t\t\t(\t\t\tunittest.TestCase ):\n\n\n\t@slow\n\tdef _a\t(self\t\t\t\t\t\t\t):\n\t\t\t\tA_\t:\t\t\t\t\t\tList[str]\t= FlaxDistilBertModel.from_pretrained(\"\"\"distilbert-base-uncased\"\"\"\t\t\t\t\t\t\t)\n\t\t\t\tA_\t:\t\t\t\t\t\tOptional[Any]\t= np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]\t\t\t\t\t\t\t)\n\t\t\t\tA_\t:\t\t\t\t\t\tint\t= np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]\t\t\t\t\t\t\t)\n\t\t\t\tA_\t:\t\t\t\t\t\tOptional[int]\t= model(lowercase , attention_mask=lowercase\t\t\t\t\t\t\t)[0]\n\t\t\t\tA_\t:\t\t\t\t\t\tOptional[Any]\t= (1, 11, 768)\n\t\t\t\tself.assertEqual(output.shape , lowercase\t\t\t\t\t\t\t)\n\t\t\t\tA_\t:\t\t\t\t\t\tUnion[str, Any]\t= np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]]\t\t\t\t\t\t\t)\n\n\t\t\t\tself.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowercase , atol=1E-4\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)"},"style_context_codestyle":{"kind":"number","value":206,"string":"206"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":745,"cells":{"code":{"kind":"string","value":"\r\r\r'''simple docstring'''\r\r\r\r\r\r\r\rdef \t\t\t\tlowerCAmelCase__\t( ):\r return [\r a * b * (1000 - a - b)\r for a in range(1 ,999 )\r for b in range(lowerCamelCase ,999 )\r if (a * a + b * b == (1000 - a - b) ** 2)\r ][0]\r\r\rif __name__ == \"__main__\":\r print(f\"\"\"{solution() = }\"\"\")"},"code_codestyle":{"kind":"number","value":359,"string":"359"},"style_context":{"kind":"string","value":"\r\r\r'''simple docstring'''\r\r\r\r\r\r\r\rfrom typing import TYPE_CHECKING\r\rfrom ...utils import (\r OptionalDependencyNotAvailable,\r _LazyModule,\r is_tf_available,\r is_torch_available,\r is_vision_available,\r)\r\r\rA\t\t\t\t:\t\t\t\t\tstr \t\t\t\t\t\t=\t\t\t{\r '''configuration_blip''': [\r '''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',\r '''BlipConfig''',\r '''BlipTextConfig''',\r '''BlipVisionConfig''',\r ],\r '''processing_blip''': ['''BlipProcessor'''],\r}\r\rtry:\r if not is_vision_available():\r raise OptionalDependencyNotAvailable()\rexcept OptionalDependencyNotAvailable:\r pass\relse:\r A\t\t\t\t:\t\t\t\t\tOptional[Any] \t\t\t\t\t\t=\t\t\t['''BlipImageProcessor''']\r\r\rtry:\r if not is_torch_available():\r raise OptionalDependencyNotAvailable()\rexcept OptionalDependencyNotAvailable:\r pass\relse:\r A\t\t\t\t:\t\t\t\t\tList[Any] \t\t\t\t\t\t=\t\t\t[\r '''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',\r '''BlipModel''',\r '''BlipPreTrainedModel''',\r '''BlipForConditionalGeneration''',\r '''BlipForQuestionAnswering''',\r '''BlipVisionModel''',\r '''BlipTextModel''',\r '''BlipForImageTextRetrieval''',\r ]\r\rtry:\r if not is_tf_available():\r raise OptionalDependencyNotAvailable()\rexcept OptionalDependencyNotAvailable:\r pass\relse:\r A\t\t\t\t:\t\t\t\t\tUnion[str, Any] \t\t\t\t\t\t=\t\t\t[\r '''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',\r '''TFBlipModel''',\r '''TFBlipPreTrainedModel''',\r '''TFBlipForConditionalGeneration''',\r '''TFBlipForQuestionAnswering''',\r '''TFBlipVisionModel''',\r '''TFBlipTextModel''',\r '''TFBlipForImageTextRetrieval''',\r ]\r\rif TYPE_CHECKING:\r from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig\r from .processing_blip import BlipProcessor\r\r try:\r if not is_vision_available():\r raise OptionalDependencyNotAvailable()\r except OptionalDependencyNotAvailable:\r pass\r else:\r from .image_processing_blip import BlipImageProcessor\r\r try:\r if not is_torch_available():\r raise OptionalDependencyNotAvailable()\r except OptionalDependencyNotAvailable:\r pass\r else:\r from .modeling_blip import (\r BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,\r BlipForConditionalGeneration,\r BlipForImageTextRetrieval,\r BlipForQuestionAnswering,\r BlipModel,\r BlipPreTrainedModel,\r BlipTextModel,\r BlipVisionModel,\r )\r\r try:\r if not is_tf_available():\r raise OptionalDependencyNotAvailable()\r except OptionalDependencyNotAvailable:\r pass\r else:\r from .modeling_tf_blip import (\r TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,\r TFBlipForConditionalGeneration,\r TFBlipForImageTextRetrieval,\r TFBlipForQuestionAnswering,\r TFBlipModel,\r TFBlipPreTrainedModel,\r TFBlipTextModel,\r TFBlipVisionModel,\r )\r\relse:\r import sys\r\r A\t\t\t\t:\t\t\t\t\tOptional[int] \t\t\t\t\t\t=\t\t\t_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)\r\r\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":227,"string":"227"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":746,"cells":{"code":{"kind":"string","value":"\rimport csv\r\rimport tweepy\r\r# Twitter API credentials\rSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:List[str]\t\t\t\t\t\t\t =\t\t\t\t''\rSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:List[str]\t\t\t\t\t\t\t =\t\t\t\t''\rSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:int\t\t\t\t\t\t\t =\t\t\t\t''\rSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:Dict\t\t\t\t\t\t\t =\t\t\t\t''\r\r\rdef \t\t\t\t\t\tUpperCAmelCase\t\t\t(\ta_\t\t\t\t\t\t)\t\t->\tNone:\r\r\r\r\r\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\t\t\t\t__A \t\t\t= tweepy.OAuthHandler(a_\t\t, a_\t\t\t\t\t\t)\r\t\t\t\tauth.set_access_token(a_\t\t, a_\t\t\t\t\t\t)\r\t\t\t\t__A \t\t\t= tweepy.API(a_\t\t\t\t\t\t)\r\r\t\t\t\t# initialize a list to hold all the tweepy Tweets\r\t\t\t\t__A \t\t\t= []\r\r\t\t\t\t# make initial request for most recent tweets (200 is the maximum allowed count)\r\t\t\t\t__A \t\t\t= api.user_timeline(screen_name=a_\t\t, count=2_0_0\t\t\t\t\t\t)\r\r\t\t\t\t# save most recent tweets\r\t\t\t\talltweets.extend(a_\t\t\t\t\t\t)\r\r\t\t\t\t# save the id of the oldest tweet less one\r\t\t\t\t__A \t\t\t= alltweets[-1].id - 1\r\r\t\t\t\t# keep grabbing tweets until there are no tweets left to grab\r\t\t\t\twhile len(a_\t\t\t\t\t\t) > 0:\r\t\t\t\t\t\t\t\tprint(F'''getting tweets before {oldest}'''\t\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\t# all subsequent requests use the max_id param to prevent duplicates\r\t\t\t\t\t\t\t\t__A \t\t\t= api.user_timeline(\r\t\t\t\t\t\t\t\t screen_name=a_\t\t, count=2_0_0\t\t, max_id=a_\t\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\t# save most recent tweets\r\t\t\t\t\t\t\t\talltweets.extend(a_\t\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\t# update the id of the oldest tweet less one\r\t\t\t\t\t\t\t\t__A \t\t\t= alltweets[-1].id - 1\r\r\t\t\t\t\t\t\t\tprint(F'''...{len(a_\t\t\t\t\t\t)} tweets downloaded so far'''\t\t\t\t\t\t)\r\r\t\t\t\t# transform the tweepy tweets into a 2D array that will populate the csv\r\t\t\t\t__A \t\t\t= [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]\r\r\t\t\t\t# write the csv\r\t\t\t\twith open(F'''new_{screen_name}_tweets.csv'''\t\t, \"w\"\t\t\t\t\t\t) as f:\r\t\t\t\t\t\t\t\t__A \t\t\t= csv.writer(a_\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\twriter.writerow([\"id\", \"created_at\", \"text\"]\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\twriter.writerows(a_\t\t\t\t\t\t)\r\r\rif __name__ == \"__main__\":\r\t\t\t\t\t# pass in the username of the account you want to download\r\t\t\t\t\tget_all_tweets('FirePing32')\r"},"code_codestyle":{"kind":"number","value":15,"string":"15"},"style_context":{"kind":"string","value":"\rimport math\r\r\rdef \t\t\t\t\t\tUpperCAmelCase\t\t\t(\ta_\t\t, a_ = 0\t\t, a_ = 0\t\t\t\t\t\t)\t\t->\tlist:\r\r\r\r\r\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\t\t\t\t__A \t\t\t= end or len(a_\t\t\t\t\t\t)\r\t\t\t\tfor i in range(a_\t\t, a_\t\t\t\t\t\t):\r\t\t\t\t\t\t\t\t__A \t\t\t= i\r\t\t\t\t\t\t\t\t__A \t\t\t= array[i]\r\t\t\t\t\t\t\t\twhile temp_index != start and temp_index_value < array[temp_index - 1]:\r\t\t\t\t\t\t\t\t\t\t\t\t__A \t\t\t= array[temp_index - 1]\r\t\t\t\t\t\t\t\t\t\t\t\ttemp_index -= 1\r\t\t\t\t\t\t\t\t__A \t\t\t= temp_index_value\r\t\t\t\treturn array\r\r\rdef \t\t\t\t\t\tUpperCAmelCase\t\t\t(\ta_\t\t, a_\t\t, a_\t\t\t\t\t\t)\t\t->\tNone: # Max Heap\r\r\r\r\r\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\t\t\t\t__A \t\t\t= index\r\t\t\t\t__A \t\t\t= 2 * index + 1 # Left Node\r\t\t\t\t__A \t\t\t= 2 * index + 2 # Right Node\r\r\t\t\t\tif left_index < heap_size and array[largest] < array[left_index]:\r\t\t\t\t\t\t\t\t__A \t\t\t= left_index\r\r\t\t\t\tif right_index < heap_size and array[largest] < array[right_index]:\r\t\t\t\t\t\t\t\t__A \t\t\t= right_index\r\r\t\t\t\tif largest != index:\r\t\t\t\t\t\t\t\t__A\t\t\t,\t\t\t\t\t\t\t__A \t\t\t= array[largest], array[index]\r\t\t\t\t\t\t\t\theapify(a_\t\t, a_\t\t, a_\t\t\t\t\t\t)\r\r\rdef \t\t\t\t\t\tUpperCAmelCase\t\t\t(\ta_\t\t\t\t\t\t)\t\t->\tlist:\r\r\r\r\r\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\t\t\t\t__A \t\t\t= len(a_\t\t\t\t\t\t)\r\r\t\t\t\tfor i in range(n // 2\t\t, -1\t\t, -1\t\t\t\t\t\t):\r\t\t\t\t\t\t\t\theapify(a_\t\t, a_\t\t, a_\t\t\t\t\t\t)\r\r\t\t\t\tfor i in range(n - 1\t\t, 0\t\t, -1\t\t\t\t\t\t):\r\t\t\t\t\t\t\t\t__A\t\t\t,\t\t\t\t\t\t\t__A \t\t\t= array[0], array[i]\r\t\t\t\t\t\t\t\theapify(a_\t\t, 0\t\t, a_\t\t\t\t\t\t)\r\r\t\t\t\treturn array\r\r\rdef \t\t\t\t\t\tUpperCAmelCase\t\t\t(\ta_\t\t, a_\t\t, a_\t\t, a_\t\t\t\t\t\t)\t\t->\tint:\r\r\r\r\r\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\t\t\t\tif (array[first_index] > array[middle_index]) != (\r\t\t\t\t array[first_index] > array[last_index]\r\t\t\t\t):\r\t\t\t\t\t\t\t\treturn array[first_index]\r\t\t\t\telif (array[middle_index] > array[first_index]) != (\r\t\t\t\t array[middle_index] > array[last_index]\r\t\t\t\t):\r\t\t\t\t\t\t\t\treturn array[middle_index]\r\t\t\t\telse:\r\t\t\t\t\t\t\t\treturn array[last_index]\r\r\rdef \t\t\t\t\t\tUpperCAmelCase\t\t\t(\ta_\t\t, a_\t\t, a_\t\t, a_\t\t\t\t\t\t)\t\t->\tint:\r\r\r\r\r\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\t\t\t\t__A \t\t\t= low\r\t\t\t\t__A \t\t\t= high\r\t\t\t\twhile True:\r\t\t\t\t\t\t\t\twhile array[i] < pivot:\r\t\t\t\t\t\t\t\t\t\t\t\ti += 1\r\t\t\t\t\t\t\t\tj -= 1\r\t\t\t\t\t\t\t\twhile pivot < array[j]:\r\t\t\t\t\t\t\t\t\t\t\t\tj -= 1\r\t\t\t\t\t\t\t\tif i >= j:\r\t\t\t\t\t\t\t\t\t\t\t\treturn i\r\t\t\t\t\t\t\t\t__A\t\t\t,\t\t\t\t\t\t\t__A \t\t\t= array[j], array[i]\r\t\t\t\t\t\t\t\ti += 1\r\r\rdef \t\t\t\t\t\tUpperCAmelCase\t\t\t(\ta_\t\t\t\t\t\t)\t\t->\tlist:\r\r\r\r\r\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\t\t\t\tif len(a_\t\t\t\t\t\t) == 0:\r\t\t\t\t\t\t\t\treturn array\r\t\t\t\t__A \t\t\t= 2 * math.ceil(math.loga(len(a_\t\t\t\t\t\t)\t\t\t\t\t\t)\t\t\t\t\t\t)\r\t\t\t\t__A \t\t\t= 1_6\r\t\t\t\treturn intro_sort(a_\t\t, 0\t\t, len(a_\t\t\t\t\t\t)\t\t, a_\t\t, a_\t\t\t\t\t\t)\r\r\rdef \t\t\t\t\t\tUpperCAmelCase\t\t\t(\ta_\t\t, a_\t\t, a_\t\t, a_\t\t, a_\t\t\t\t\t\t)\t\t->\tlist:\r\r\r\r\r\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\t\t\t\twhile end - start > size_threshold:\r\t\t\t\t\t\t\t\tif max_depth == 0:\r\t\t\t\t\t\t\t\t\t\t\t\treturn heap_sort(a_\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tmax_depth -= 1\r\t\t\t\t\t\t\t\t__A \t\t\t= median_of_a(a_\t\t, a_\t\t, start + ((end - start) // 2) + 1\t\t, end - 1\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\t__A \t\t\t= partition(a_\t\t, a_\t\t, a_\t\t, a_\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tintro_sort(a_\t\t, a_\t\t, a_\t\t, a_\t\t, a_\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\t__A \t\t\t= p\r\t\t\t\treturn insertion_sort(a_\t\t, a_\t\t, a_\t\t\t\t\t\t)\r\r\rif __name__ == \"__main__\":\r\t\t\t\t\timport doctest\r\r\t\t\t\t\tdoctest.testmod()\r\r\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:List[Any]\t\t\t\t\t\t\t =\t\t\t\tinput('Enter numbers separated by a comma : ').strip()\r\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:str\t\t\t\t\t\t\t =\t\t\t\t[float(item) for item in user_input.split(',')]\r\t\t\t\t\tprint(sort(unsorted))\r"},"style_context_codestyle":{"kind":"number","value":15,"string":"15"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":747,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import List, Union\r\n\r\nfrom ..utils import (\r\n add_end_docstrings,\r\n is_tf_available,\r\n is_torch_available,\r\n is_vision_available,\r\n logging,\r\n requires_backends,\r\n)\r\nfrom .base import PIPELINE_INIT_ARGS, Pipeline\r\n\r\n\r\nif is_vision_available():\r\n\t\t\tfrom PIL import Image\r\n\r\n\t\t\tfrom ..image_utils import load_image\r\n\r\nif is_tf_available():\r\n\t\t\timport tensorflow as tf\r\n\r\n\t\t\tfrom ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING\r\n\t\t\tfrom ..tf_utils import stable_softmax\r\n\r\nif is_torch_available():\r\n\t\t\tfrom ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING\r\n\r\nlowercase__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tlogging.get_logger(__name__)\r\n\r\n\r\n\r\n\r\n\r\n\r\n@add_end_docstrings(lowercase\t\t\t\t\t\t)\r\nclass lowerCAmelCase__ (\t\t\t\t\tlowercase\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\t\t\tdef __init__( self ,\t\t\t\t*lowercase ,\t\t\t\t**lowercase ):\r\n\t\t\t\t\t\t\tsuper().__init__(*lowercase ,\t\t\t\t**lowercase )\r\n\t\t\t\t\t\t\trequires_backends(self ,\t\t\t\t'vision' )\r\n\t\t\t\t\t\t\tself.check_model_type(\r\n\t\t\t\t\t\t\t TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING\r\n\t\t\t\t\t\t\t if self.framework == 'tf'\r\n\t\t\t\t\t\t\t else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )\r\n\t\t\tdef A_ ( self ,\t\t\t\tlowercase=None ):\r\n\t\t\t\t\t\t\t_lowerCamelCase\t\t\t\t\t: Optional[int] = {}\r\n\t\t\t\t\t\t\tif top_k is not None:\r\n\t\t\t\t\t\t\t\t\t\t\t_lowerCamelCase\t\t\t\t\t: Union[str, Any] = top_k\r\n\t\t\t\t\t\t\treturn {}, {}, postprocess_params\r\n\t\t\tdef __call__( self ,\t\t\t\tlowercase ,\t\t\t\t**lowercase ):\r\n\t\t\t\t\t\t\treturn super().__call__(lowercase ,\t\t\t\t**lowercase )\r\n\t\t\tdef A_ ( self ,\t\t\t\tlowercase ):\r\n\t\t\t\t\t\t\t_lowerCamelCase\t\t\t\t\t: Optional[int] = load_image(lowercase )\r\n\t\t\t\t\t\t\t_lowerCamelCase\t\t\t\t\t: int = self.image_processor(images=lowercase ,\t\t\t\treturn_tensors=self.framework )\r\n\t\t\t\t\t\t\treturn model_inputs\r\n\t\t\tdef A_ ( self ,\t\t\t\tlowercase ):\r\n\t\t\t\t\t\t\t_lowerCamelCase\t\t\t\t\t: str = self.model(**lowercase )\r\n\t\t\t\t\t\t\treturn model_outputs\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef A_ ( self ,\t\t\t\tlowercase ,\t\t\t\tlowercase=5 ):\r\n\t\t\t\t\t\t\tif top_k > self.model.config.num_labels:\r\n\t\t\t\t\t\t\t\t\t\t\t_lowerCamelCase\t\t\t\t\t: Union[str, Any] = self.model.config.num_labels\r\n\r\n\t\t\t\t\t\t\tif self.framework == \"pt\":\r\n\t\t\t\t\t\t\t\t\t\t\t_lowerCamelCase\t\t\t\t\t: Any = model_outputs.logits.softmax(-1 )[0]\r\n\t\t\t\t\t\t\t\t\t\t\t_lowerCamelCase\t\t\t\t\t: int = probs.topk(lowercase )\r\n\t\t\t\t\t\t\telif self.framework == \"tf\":\r\n\t\t\t\t\t\t\t\t\t\t\t_lowerCamelCase\t\t\t\t\t: List[Any] = stable_softmax(model_outputs.logits ,\t\t\t\taxis=-1 )[0]\r\n\t\t\t\t\t\t\t\t\t\t\t_lowerCamelCase\t\t\t\t\t: int = tf.math.top_k(lowercase ,\t\t\t\tk=lowercase )\r\n\t\t\t\t\t\t\t\t\t\t\t_lowerCamelCase\t\t\t\t\t: Union[str, Any] = topk.values.numpy(), topk.indices.numpy()\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\traise ValueError(F'''Unsupported framework: {self.framework}''' )\r\n\r\n\t\t\t\t\t\t\t_lowerCamelCase\t\t\t\t\t: Any = scores.tolist()\r\n\t\t\t\t\t\t\t_lowerCamelCase\t\t\t\t\t: Optional[int] = ids.tolist()\r\n\t\t\t\t\t\t\treturn [{\"score\": score, \"label\": self.model.config.idalabel[_id]} for score, _id in zip(lowercase ,\t\t\t\tlowercase )]"},"code_codestyle":{"kind":"number","value":355,"string":"355"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport argparse\r\n\r\nimport torch\r\n\r\nfrom diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\tlowercase__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\targparse.ArgumentParser()\r\n\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--checkpoint_path\"\"\", default=None, type=str, required=True, help=\"\"\"Path to the checkpoint to convert.\"\"\"\r\n\t\t\t)\r\n\t\t\t# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--original_config_file\"\"\",\r\n\t\t\t default=None,\r\n\t\t\t type=str,\r\n\t\t\t help=\"\"\"The YAML config file corresponding to the original architecture.\"\"\",\r\n\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--num_in_channels\"\"\",\r\n\t\t\t default=None,\r\n\t\t\t type=int,\r\n\t\t\t help=\"\"\"The number of input channels. If `None` number of input channels will be automatically inferred.\"\"\",\r\n\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--scheduler_type\"\"\",\r\n\t\t\t default=\"\"\"pndm\"\"\",\r\n\t\t\t type=str,\r\n\t\t\t help=\"\"\"Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']\"\"\",\r\n\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--pipeline_type\"\"\",\r\n\t\t\t default=None,\r\n\t\t\t type=str,\r\n\t\t\t help=(\r\n\t\t\t \"\"\"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'\"\"\"\r\n\t\t\t \"\"\". If `None` pipeline will be automatically inferred.\"\"\"\r\n\t\t\t ),\r\n\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--image_size\"\"\",\r\n\t\t\t default=None,\r\n\t\t\t type=int,\r\n\t\t\t help=(\r\n\t\t\t \"\"\"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2\"\"\"\r\n\t\t\t \"\"\" Base. Use 768 for Stable Diffusion v2.\"\"\"\r\n\t\t\t ),\r\n\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--prediction_type\"\"\",\r\n\t\t\t default=None,\r\n\t\t\t type=str,\r\n\t\t\t help=(\r\n\t\t\t \"\"\"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable\"\"\"\r\n\t\t\t \"\"\" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2.\"\"\"\r\n\t\t\t ),\r\n\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--extract_ema\"\"\",\r\n\t\t\t action=\"\"\"store_true\"\"\",\r\n\t\t\t help=(\r\n\t\t\t \"\"\"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights\"\"\"\r\n\t\t\t \"\"\" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield\"\"\"\r\n\t\t\t \"\"\" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.\"\"\"\r\n\t\t\t ),\r\n\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--upcast_attention\"\"\",\r\n\t\t\t action=\"\"\"store_true\"\"\",\r\n\t\t\t help=(\r\n\t\t\t \"\"\"Whether the attention computation should always be upcasted. This is necessary when running stable\"\"\"\r\n\t\t\t \"\"\" diffusion 2.1.\"\"\"\r\n\t\t\t ),\r\n\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--from_safetensors\"\"\",\r\n\t\t\t action=\"\"\"store_true\"\"\",\r\n\t\t\t help=\"\"\"If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.\"\"\",\r\n\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--to_safetensors\"\"\",\r\n\t\t\t action=\"\"\"store_true\"\"\",\r\n\t\t\t help=\"\"\"Whether to store pipeline in safetensors format or not.\"\"\",\r\n\t\t\t)\r\n\t\t\tparser.add_argument(\"\"\"--dump_path\"\"\", default=None, type=str, required=True, help=\"\"\"Path to the output model.\"\"\")\r\n\t\t\tparser.add_argument(\"\"\"--device\"\"\", type=str, help=\"\"\"Device to use (e.g. cpu, cuda:0, cuda:1, etc.)\"\"\")\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--stable_unclip\"\"\",\r\n\t\t\t type=str,\r\n\t\t\t default=None,\r\n\t\t\t required=False,\r\n\t\t\t help=\"\"\"Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.\"\"\",\r\n\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--stable_unclip_prior\"\"\",\r\n\t\t\t type=str,\r\n\t\t\t default=None,\r\n\t\t\t required=False,\r\n\t\t\t help=\"\"\"Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.\"\"\",\r\n\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--clip_stats_path\"\"\",\r\n\t\t\t type=str,\r\n\t\t\t help=\"\"\"Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.\"\"\",\r\n\t\t\t required=False,\r\n\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--controlnet\"\"\", action=\"\"\"store_true\"\"\", default=None, help=\"\"\"Set flag if this is a controlnet checkpoint.\"\"\"\r\n\t\t\t)\r\n\t\t\tparser.add_argument(\"\"\"--half\"\"\", action=\"\"\"store_true\"\"\", help=\"\"\"Save weights in half precision.\"\"\")\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--vae_path\"\"\",\r\n\t\t\t type=str,\r\n\t\t\t default=None,\r\n\t\t\t required=False,\r\n\t\t\t help=\"\"\"Set to a path, hub id to an already converted vae to not convert it again.\"\"\",\r\n\t\t\t)\r\n\t\t\tlowercase__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tparser.parse_args()\r\n\r\n\t\t\tlowercase__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tdownload_from_original_stable_diffusion_ckpt(\r\n\t\t\t checkpoint_path=args.checkpoint_path,\r\n\t\t\t original_config_file=args.original_config_file,\r\n\t\t\t image_size=args.image_size,\r\n\t\t\t prediction_type=args.prediction_type,\r\n\t\t\t model_type=args.pipeline_type,\r\n\t\t\t extract_ema=args.extract_ema,\r\n\t\t\t scheduler_type=args.scheduler_type,\r\n\t\t\t num_in_channels=args.num_in_channels,\r\n\t\t\t upcast_attention=args.upcast_attention,\r\n\t\t\t from_safetensors=args.from_safetensors,\r\n\t\t\t device=args.device,\r\n\t\t\t stable_unclip=args.stable_unclip,\r\n\t\t\t stable_unclip_prior=args.stable_unclip_prior,\r\n\t\t\t clip_stats_path=args.clip_stats_path,\r\n\t\t\t controlnet=args.controlnet,\r\n\t\t\t vae_path=args.vae_path,\r\n\t\t\t)\r\n\r\n\t\t\tif args.half:\r\n\t\t\t\t\t\tpipe.to(torch_dtype=torch.floataa)\r\n\r\n\t\t\tif args.controlnet:\r\n\t\t\t\t\t\t# only save the controlnet model\r\n\t\t\t\t\t\tpipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)\r\n\t\t\telse:\r\n\t\t\t\t\t\tpipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)"},"style_context_codestyle":{"kind":"number","value":12,"string":"12"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":748,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n_A\t:\t\t\tList[str] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''\r\n\r\n\r\n\r\n\r\n\r\ndef SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> bytes:\r\n\t\t\t\t\t\t\t# Make sure the supplied data is a bytes-like object\r\n\t\t\t\t\t\t\tif not isinstance(UpperCamelCase\t\t\t\t,\t\t\tUpperCamelCase ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase__\t\t\t\t\t:\tDict = f'''a bytes-like object is required, not \\'{data.__class__.__name__}\\''''\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\traise TypeError(UpperCamelCase )\r\n\r\n\t\t\t\t\t\t\tlowerCamelCase__\t\t\t\t\t:\tint = \"\"\"\"\"\".join(bin(UpperCamelCase )[2:].zfill(8 ) for byte in data )\r\n\r\n\t\t\t\t\t\t\tlowerCamelCase__\t\t\t\t\t:\tint = len(UpperCamelCase ) % 6 != 0\r\n\r\n\t\t\t\t\t\t\tif padding_needed:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# The padding that will be added later\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase__\t\t\t\t\t:\tOptional[Any] = b\"\"\"=\"\"\" * ((6 - len(UpperCamelCase ) % 6) // 2)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Append binary_stream with arbitrary binary digits (0's by default) to make its\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# length a multiple of 6.\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tbinary_stream += \"0\" * (6 - len(UpperCamelCase ) % 6)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase__\t\t\t\t\t:\tOptional[int] = b\"\"\"\"\"\"\r\n\r\n\t\t\t\t\t\t\t# Encode every 6 binary digits to their corresponding Base64 character\r\n\t\t\t\t\t\t\treturn (\r\n\t\t\t\t\t\t\t \"\".join(\r\n\t\t\t\t\t\t\t B64_CHARSET[int(binary_stream[index : index + 6]\t\t\t\t,\t\t\t2 )]\r\n\t\t\t\t\t\t\t for index in range(0\t\t\t\t,\t\t\tlen(UpperCamelCase )\t\t\t\t,\t\t\t6 ) ).encode()\r\n\t\t\t\t\t\t\t + padding\r\n\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\ndef SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> bytes:\r\n\t\t\t\t\t\t\t# Make sure encoded_data is either a string or a bytes-like object\r\n\t\t\t\t\t\t\tif not isinstance(UpperCamelCase\t\t\t\t,\t\t\tUpperCamelCase ) and not isinstance(UpperCamelCase\t\t\t\t,\t\t\tUpperCamelCase ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase__\t\t\t\t\t:\tint = (\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"argument should be a bytes-like object or ASCII string, \"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t f'''not \\'{encoded_data.__class__.__name__}\\''''\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\traise TypeError(UpperCamelCase )\r\n\r\n\t\t\t\t\t\t\t# In case encoded_data is a bytes-like object, make sure it contains only\r\n\t\t\t\t\t\t\t# ASCII characters so we convert it to a string object\r\n\t\t\t\t\t\t\tif isinstance(UpperCamelCase\t\t\t\t,\t\t\tUpperCamelCase ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase__\t\t\t\t\t:\tstr = encoded_data.decode(\"\"\"utf-8\"\"\" )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\texcept UnicodeDecodeError:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"base64 encoded data should only contain ASCII characters\"\"\" )\r\n\r\n\t\t\t\t\t\t\tlowerCamelCase__\t\t\t\t\t:\tTuple = encoded_data.count(\"\"\"=\"\"\" )\r\n\r\n\t\t\t\t\t\t\t# Check if the encoded string contains non base64 characters\r\n\t\t\t\t\t\t\tif padding:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tassert all(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t char in B64_CHARSET for char in encoded_data[:-padding] ), \"Invalid base64 character(s) found.\"\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tassert all(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t char in B64_CHARSET for char in encoded_data ), \"Invalid base64 character(s) found.\"\r\n\r\n\t\t\t\t\t\t\t# Check the padding\r\n\t\t\t\t\t\t\tassert len(UpperCamelCase ) % 4 == 0 and padding < 3, \"Incorrect padding\"\r\n\r\n\t\t\t\t\t\t\tif padding:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Remove padding if there is one\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase__\t\t\t\t\t:\tAny = encoded_data[:-padding]\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase__\t\t\t\t\t:\tint = \"\"\"\"\"\".join(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t bin(B64_CHARSET.index(UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase__\t\t\t\t\t:\tTuple = \"\"\"\"\"\".join(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t bin(B64_CHARSET.index(UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )\r\n\r\n\t\t\t\t\t\t\tlowerCamelCase__\t\t\t\t\t:\tList[Any] = [\r\n\t\t\t\t\t\t\t int(binary_stream[index : index + 8]\t\t\t\t,\t\t\t2 )\r\n\t\t\t\t\t\t\t for index in range(0\t\t\t\t,\t\t\tlen(UpperCamelCase )\t\t\t\t,\t\t\t8 )\r\n\t\t\t\t\t\t\t]\r\n\r\n\t\t\t\t\t\t\treturn bytes(UpperCamelCase )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\timport doctest\r\n\r\n\t\t\tdoctest.testmod()\r\n\r\n"},"code_codestyle":{"kind":"number","value":41,"string":"41"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass _lowercase\t:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tdef __init__(\t\t\t\t\t\t\tself:\t\t\t\tTuple\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__:\t\t\t\tlist[int] ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase__\t\t\t\t\t:\tUnion[str, Any] = len(UpperCamelCase__ )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase__\t\t\t\t\t:\tUnion[str, Any] = [0] * len_array\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif len_array > 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase__\t\t\t\t\t:\tUnion[str, Any] = array[0]\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor i in range(1\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__ ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase__\t\t\t\t\t:\tTuple = self.prefix_sum[i - 1] + array[i]\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tdef \t\tlowerCamelCase_\t\t\t\t(\t\t\t\t\t\t\tself:\t\t\t\tTuple\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__:\t\t\t\tint\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__:\t\t\t\tint ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif start == 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn self.prefix_sum[end]\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn self.prefix_sum[end] - self.prefix_sum[start - 1]\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tdef \t\tlowerCamelCase_\t\t\t\t(\t\t\t\t\t\t\tself:\t\t\t\tOptional[int]\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__:\t\t\t\tint ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase__\t\t\t\t\t:\tDict = {0}\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor sum_item in self.prefix_sum:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif sum_item - target_sum in sums:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn True\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsums.add(UpperCamelCase__ )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn False\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\timport doctest\r\n\r\n\t\t\tdoctest.testmod()\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":41,"string":"41"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":749,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\r'''simple docstring'''\r\rimport hashlib\rimport unittest\r\rfrom transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available\rfrom transformers.pipelines import DepthEstimationPipeline, pipeline\rfrom transformers.testing_utils import (\r is_pipeline_test,\r nested_simplify,\r require_tf,\r require_timm,\r require_torch,\r require_vision,\r slow,\r)\r\rfrom .test_pipelines_common import ANY\r\r\rif is_torch_available():\r import torch\r\rif is_vision_available():\r from PIL import Image\relse:\r class __lowerCAmelCase\t\t\t\t\t:\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r @staticmethod\r def snake_case__\t\t\t\t\t\t(\t\t\t\t\t\t\t*lowerCAmelCase__ :\t\tTuple\t,\t\t\t**lowerCAmelCase__ :\t\tAny\t\t\t\t\t) ->\t\t\t\t\tList[str]:\r\r '''simple docstring'''\r\r\r\r\r pass\r\r\r\r\r\r\r\rdef a__ ( lowercase :\t\t\t\t\tImage\t\t\t\t)\t\t->\t\tstr:\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r _UpperCamelCase =\thashlib.mda(image.tobytes()\t\t\t\t)\r return m.hexdigest()\r\r\r\r\r@is_pipeline_test\r@require_vision\r@require_timm\r@require_torch\rclass __lowerCAmelCase\t\t\t\t\t( unittest.TestCase\t\t\t):\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r _snake_case :\t\t\t\t\tstr\t\t\t\t\t\t = MODEL_FOR_DEPTH_ESTIMATION_MAPPING\r\r\r\r\r\r def snake_case__\t\t\t\t\t\t(\t\t\t\t\t\t\tself :\t\tTuple\t,\t\t\tlowerCAmelCase__ :\t\tint\t,\t\t\tlowerCAmelCase__ :\t\tOptional[int]\t,\t\t\tlowerCAmelCase__ :\t\tUnion[str, Any]\t\t\t\t\t) ->\t\t\t\t\tOptional[int]:\r\r '''simple docstring'''\r\r\r\r\r _UpperCamelCase =\tDepthEstimationPipeline(model=lowerCAmelCase__\t,\t\t\timage_processor=lowerCAmelCase__\t\t\t\t\t)\r return depth_estimator, [\r \"./tests/fixtures/tests_samples/COCO/000000039769.png\",\r \"./tests/fixtures/tests_samples/COCO/000000039769.png\",\r ]\r\r\r\r\r\r def snake_case__\t\t\t\t\t\t(\t\t\t\t\t\t\tself :\t\tOptional[Any]\t,\t\t\tlowerCAmelCase__ :\t\tUnion[str, Any]\t,\t\t\tlowerCAmelCase__ :\t\tList[Any]\t\t\t\t\t) ->\t\t\t\t\tTuple:\r\r '''simple docstring'''\r\r\r\r\r _UpperCamelCase =\tdepth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png'''\t\t\t\t\t)\r self.assertEqual({'''predicted_depth''': ANY(torch.Tensor\t\t\t\t\t), '''depth''': ANY(Image.Image\t\t\t\t\t)}\t,\t\t\tlowerCAmelCase__\t\t\t\t\t)\r import datasets\r\r _UpperCamelCase =\tdatasets.load_dataset('''hf-internal-testing/fixtures_image_utils'''\t,\t\t\t'''image'''\t,\t\t\tsplit='''test'''\t\t\t\t\t)\r _UpperCamelCase =\tdepth_estimator(\r [\r Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''\t\t\t\t\t),\r '''http://images.cocodataset.org/val2017/000000039769.jpg''',\r # RGBA\r dataset[0]['''file'''],\r # LA\r dataset[1]['''file'''],\r # L\r dataset[2]['''file'''],\r ]\t\t\t\t\t)\r self.assertEqual(\r [\r {'''predicted_depth''': ANY(torch.Tensor\t\t\t\t\t), '''depth''': ANY(Image.Image\t\t\t\t\t)},\r {'''predicted_depth''': ANY(torch.Tensor\t\t\t\t\t), '''depth''': ANY(Image.Image\t\t\t\t\t)},\r {'''predicted_depth''': ANY(torch.Tensor\t\t\t\t\t), '''depth''': ANY(Image.Image\t\t\t\t\t)},\r {'''predicted_depth''': ANY(torch.Tensor\t\t\t\t\t), '''depth''': ANY(Image.Image\t\t\t\t\t)},\r {'''predicted_depth''': ANY(torch.Tensor\t\t\t\t\t), '''depth''': ANY(Image.Image\t\t\t\t\t)},\r ]\t,\t\t\tlowerCAmelCase__\t,\t\t\t)\r\r\r\r\r\r @require_tf\r @unittest.skip('''Depth estimation is not implemented in TF'''\t\t\t\t\t)\r def snake_case__\t\t\t\t\t\t(\t\t\t\t\t\t\tself :\t\tOptional[int]\t\t\t\t\t) ->\t\t\t\t\tint:\r\r '''simple docstring'''\r\r\r\r\r pass\r\r\r\r\r\r @slow\r @require_torch\r def snake_case__\t\t\t\t\t\t(\t\t\t\t\t\t\tself :\t\tOptional[Any]\t\t\t\t\t) ->\t\t\t\t\tint:\r\r '''simple docstring'''\r\r\r\r\r _UpperCamelCase =\t'''Intel/dpt-large'''\r _UpperCamelCase =\tpipeline('''depth-estimation'''\t,\t\t\tmodel=lowerCAmelCase__\t\t\t\t\t)\r _UpperCamelCase =\tdepth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg'''\t\t\t\t\t)\r _UpperCamelCase =\thashimage(outputs['''depth''']\t\t\t\t\t)\r\r # This seems flaky.\r # self.assertEqual(outputs[\"depth\"], \"1a39394e282e9f3b0741a90b9f108977\")\r self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item()\t\t\t\t\t)\t,\t\t\t29.304\t\t\t\t\t)\r self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item()\t\t\t\t\t)\t,\t\t\t2.662\t\t\t\t\t)\r\r\r\r\r\r\r\r @require_torch\r def snake_case__\t\t\t\t\t\t(\t\t\t\t\t\t\tself :\t\tAny\t\t\t\t\t) ->\t\t\t\t\tUnion[str, Any]:\r\r '''simple docstring'''\r\r\r\r\r self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT'''\t\t\t\t\t)\r\r\r\r"},"code_codestyle":{"kind":"number","value":287,"string":"287"},"style_context":{"kind":"string","value":"\r\r\r\r\r\r'''simple docstring'''\r\rimport math\r\r\r\r\r\r\r\rdef a__ ( lowercase :\t\t\t\t\tfloat, lowercase :\t\t\t\t\tfloat\t\t\t\t)\t\t->\t\tfloat:\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r if initial_intensity < 0:\r raise ValueError('''The value of intensity cannot be negative'''\t\t\t\t)\r # handling of negative values of initial intensity\r if angle < 0 or angle > 360:\r raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees'''\t\t\t\t)\r # handling of values out of allowed range\r return initial_intensity * (math.cos(math.radians(lowercase\t\t\t\t)\t\t\t\t) ** 2)\r\r\rif __name__ == \"__main__\":\r import doctest\r\r doctest.testmod(name='malus_law')\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":287,"string":"287"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":750,"cells":{"code":{"kind":"string","value":"\r\r\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\rfrom ...configuration_utils import PretrainedConfig\rfrom ...utils import logging\r\r\rlowerCAmelCase__ \t\t\t\t= logging.get_logger(__name__)\r\rlowerCAmelCase__ \t\t\t\t= {\r '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',\r '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',\r}\r\r\rclass SCREAMING_SNAKE_CASE__\t\t\t\t(\t\tlowercase\t\t\t\t):\r \"\"\"simple docstring\"\"\"\r\r\r a :\t\t\t\tOptional[int] =\"markuplm\"\r\r def __init__(\t\t\t\t\t\tself\t\t, snake_case__=30_522\t\t, snake_case__=768\t\t, snake_case__=12\t\t, snake_case__=12\t\t, snake_case__=3_072\t\t, snake_case__=\"gelu\"\t\t, snake_case__=0.1\t\t, snake_case__=0.1\t\t, snake_case__=512\t\t, snake_case__=2\t\t, snake_case__=0.02\t\t, snake_case__=1e-12\t\t, snake_case__=0\t\t, snake_case__=0\t\t, snake_case__=2\t\t, snake_case__=256\t\t, snake_case__=1_024\t\t, snake_case__=216\t\t, snake_case__=1_001\t\t, snake_case__=32\t\t, snake_case__=50\t\t, snake_case__=\"absolute\"\t\t, snake_case__=True\t\t, snake_case__=None\t\t, **snake_case__\t\t, ):\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r super().__init__(\r pad_token_id=snake_case__\t\t, bos_token_id=snake_case__\t\t, eos_token_id=snake_case__\t\t, **snake_case__\t\t, )\r lowerCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\tvocab_size\r lowerCAmelCase : Tuple\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\thidden_size\r lowerCAmelCase : Any\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\tnum_hidden_layers\r lowerCAmelCase : Optional[Any]\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\tnum_attention_heads\r lowerCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\thidden_act\r lowerCAmelCase : List[Any]\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\tintermediate_size\r lowerCAmelCase : int\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\thidden_dropout_prob\r lowerCAmelCase : int\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\tattention_probs_dropout_prob\r lowerCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\tmax_position_embeddings\r lowerCAmelCase : Tuple\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\ttype_vocab_size\r lowerCAmelCase : int\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\tinitializer_range\r lowerCAmelCase : Tuple\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\tlayer_norm_eps\r lowerCAmelCase : str\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\tposition_embedding_type\r lowerCAmelCase : List[str]\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\tuse_cache\r lowerCAmelCase : Optional[Any]\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\tclassifier_dropout\r # additional properties\r lowerCAmelCase : Tuple\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\tmax_depth\r lowerCAmelCase : Optional[Any]\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\tmax_xpath_tag_unit_embeddings\r lowerCAmelCase : List[str]\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\tmax_xpath_subs_unit_embeddings\r lowerCAmelCase : List[Any]\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\ttag_pad_id\r lowerCAmelCase : Optional[Any]\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\tsubs_pad_id\r lowerCAmelCase : List[str]\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\txpath_unit_hidden_size\r\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":108,"string":"108"},"style_context":{"kind":"string","value":"\r\r# Algorithm for the pigeonhole sorting\r\r\r\r\rdef _UpperCAmelCase\t\t\t\t\t\t\t(\t\ta__):\r\r\t\t\t'''simple docstring'''\r\r\r\r\t\t\ta_\t\t:\t\tList[Any] \t\t\t\t= min(a__) # min() finds the minimum value\r\t\t\ta_\t\t:\t\tList[str] \t\t\t\t= max(a__) # max() finds the maximum value\r\r\t\t\ta_\t\t:\t\tstr \t\t\t\t= max_val - min_val + 1 # size is difference of max and min values plus one\r\r\t\t\t# list of pigeonholes of size equal to the variable size\r\t\t\ta_\t\t:\t\tAny \t\t\t\t= [0] * size\r\r\t\t\t# Populate the pigeonholes.\r\t\t\tfor x in a:\r\t\t\t\t\t\tassert isinstance(a__ , a__), \"integers only please\"\r\t\t\t\t\t\tholes[x - min_val] += 1\r\r\t\t\t# Putting the elements back into the array in an order.\r\t\t\ta_\t\t:\t\tTuple \t\t\t\t= 0\r\t\t\tfor count in range(a__):\r\t\t\t\t\t\twhile holes[count] > 0:\r\t\t\t\t\t\t\t\t\tholes[count] -= 1\r\t\t\t\t\t\t\t\t\ta_\t\t:\t\tOptional[Any] \t\t\t\t= count + min_val\r\t\t\t\t\t\t\t\t\ti += 1\r\r\rdef _UpperCAmelCase\t\t\t\t\t\t\t(\t\t):\r\r\t\t\t'''simple docstring'''\r\r\r\r\t\t\ta_\t\t:\t\tList[Any] \t\t\t\t= [8, 3, 2, 7, 4, 6, 8]\r\t\t\tpigeonhole_sort(a__)\r\t\t\tprint(\"\"\"Sorted order is:\"\"\" , \"\"\" \"\"\".join(a__))\r\r\rif __name__ == \"__main__\":\r\tmain()\r\r\r"},"style_context_codestyle":{"kind":"number","value":248,"string":"248"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":751,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\rimport inspect\rimport tempfile\rimport unittest\r\rfrom huggingface_hub import hf_hub_download\r\rfrom transformers import is_torch_available\rfrom transformers.testing_utils import is_flaky, require_torch, slow, torch_device\r\rfrom ...test_configuration_common import ConfigTester\rfrom ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor\rfrom ...test_pipeline_mixin import PipelineTesterMixin\r\r\r_A\t\t\t\t\t\t\t= 1E-4\r\rif is_torch_available():\r import torch\r\r from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel\r from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder\r\r@require_torch\rclass \t\t\t\t\t\tlowercase_ :\r\r def __init__( self ,\t\t__UpperCamelCase ,\t\t__UpperCamelCase=1_6 ,\t\t__UpperCamelCase=1_3 ,\t\t__UpperCamelCase=7 ,\t\t__UpperCamelCase=1_4 ,\t\t__UpperCamelCase=1_0 ,\t\t__UpperCamelCase=1_9 ,\t\t__UpperCamelCase=5 ,\t\t__UpperCamelCase=4 ,\t\t__UpperCamelCase=True ,\t\t__UpperCamelCase=1_6 ,\t\t__UpperCamelCase=2 ,\t\t__UpperCamelCase=4 ,\t\t__UpperCamelCase=4 ,\t\t__UpperCamelCase=\"gelu\" ,\t\t__UpperCamelCase=0.1 ,\t\t__UpperCamelCase=0.1 ,\t\t__UpperCamelCase=[1, 2, 3, 4, 5] ,\t\t__UpperCamelCase=2_5 ,\t\t__UpperCamelCase=5 ,\t\t):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r UpperCamelCase_\t\t\t\t\t\t\t = d_model\r UpperCamelCase_\t\t\t\t\t\t\t = parent\r UpperCamelCase_\t\t\t\t\t\t\t = batch_size\r UpperCamelCase_\t\t\t\t\t\t\t = prediction_length\r UpperCamelCase_\t\t\t\t\t\t\t = context_length\r UpperCamelCase_\t\t\t\t\t\t\t = cardinality\r UpperCamelCase_\t\t\t\t\t\t\t = num_time_features\r UpperCamelCase_\t\t\t\t\t\t\t = lags_sequence\r UpperCamelCase_\t\t\t\t\t\t\t = embedding_dimension\r UpperCamelCase_\t\t\t\t\t\t\t = is_training\r UpperCamelCase_\t\t\t\t\t\t\t = hidden_size\r UpperCamelCase_\t\t\t\t\t\t\t = num_hidden_layers\r UpperCamelCase_\t\t\t\t\t\t\t = num_attention_heads\r UpperCamelCase_\t\t\t\t\t\t\t = intermediate_size\r UpperCamelCase_\t\t\t\t\t\t\t = hidden_act\r UpperCamelCase_\t\t\t\t\t\t\t = hidden_dropout_prob\r UpperCamelCase_\t\t\t\t\t\t\t = attention_probs_dropout_prob\r\r UpperCamelCase_\t\t\t\t\t\t\t = context_length\r UpperCamelCase_\t\t\t\t\t\t\t = prediction_length + label_length\r UpperCamelCase_\t\t\t\t\t\t\t = label_length\r\r UpperCamelCase_\t\t\t\t\t\t\t = moving_average\r UpperCamelCase_\t\t\t\t\t\t\t = autocorrelation_factor\r\r def lowerCamelCase_\t\t\t( self ):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r return AutoformerConfig(\r d_model=self.d_model ,\t\tencoder_layers=self.num_hidden_layers ,\t\tdecoder_layers=self.num_hidden_layers ,\t\tencoder_attention_heads=self.num_attention_heads ,\t\tdecoder_attention_heads=self.num_attention_heads ,\t\tencoder_ffn_dim=self.intermediate_size ,\t\tdecoder_ffn_dim=self.intermediate_size ,\t\tdropout=self.hidden_dropout_prob ,\t\tattention_dropout=self.attention_probs_dropout_prob ,\t\tprediction_length=self.prediction_length ,\t\tcontext_length=self.context_length ,\t\tlabel_length=self.label_length ,\t\tlags_sequence=self.lags_sequence ,\t\tnum_time_features=self.num_time_features ,\t\tnum_static_categorical_features=1 ,\t\tcardinality=[self.cardinality] ,\t\tembedding_dimension=[self.embedding_dimension] ,\t\tmoving_average=self.moving_average ,\t\t)\r\r def lowerCamelCase_\t\t\t( self ,\t\t__UpperCamelCase ):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r UpperCamelCase_\t\t\t\t\t\t\t = config.context_length + max(config.lags_sequence )\r\r UpperCamelCase_\t\t\t\t\t\t\t = ids_tensor([self.batch_size, 1] ,\t\tconfig.cardinality[0] )\r UpperCamelCase_\t\t\t\t\t\t\t = floats_tensor([self.batch_size, _past_length, config.num_time_features] )\r UpperCamelCase_\t\t\t\t\t\t\t = floats_tensor([self.batch_size, _past_length] )\r UpperCamelCase_\t\t\t\t\t\t\t = floats_tensor([self.batch_size, _past_length] ) > 0.5\r\r # decoder inputs\r UpperCamelCase_\t\t\t\t\t\t\t = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )\r UpperCamelCase_\t\t\t\t\t\t\t = floats_tensor([self.batch_size, config.prediction_length] )\r\r UpperCamelCase_\t\t\t\t\t\t\t = {\r \"\"\"past_values\"\"\": past_values,\r \"\"\"static_categorical_features\"\"\": static_categorical_features,\r \"\"\"past_time_features\"\"\": past_time_features,\r \"\"\"past_observed_mask\"\"\": past_observed_mask,\r \"\"\"future_time_features\"\"\": future_time_features,\r \"\"\"future_values\"\"\": future_values,\r }\r return inputs_dict\r\r def lowerCamelCase_\t\t\t( self ):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r UpperCamelCase_\t\t\t\t\t\t\t = self.get_config()\r UpperCamelCase_\t\t\t\t\t\t\t = self.prepare_autoformer_inputs_dict(__UpperCamelCase )\r return config, inputs_dict\r\r def lowerCamelCase_\t\t\t( self ):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r UpperCamelCase_\t\t\t\t\t\t\t = self.prepare_config_and_inputs()\r return config, inputs_dict\r\r def lowerCamelCase_\t\t\t( self ,\t\t__UpperCamelCase ,\t\t__UpperCamelCase ):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r UpperCamelCase_\t\t\t\t\t\t\t = AutoformerModel(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()\r UpperCamelCase_\t\t\t\t\t\t\t = model(**__UpperCamelCase )\r\r UpperCamelCase_\t\t\t\t\t\t\t = outputs.encoder_last_hidden_state\r UpperCamelCase_\t\t\t\t\t\t\t = outputs.last_hidden_state\r\r with tempfile.TemporaryDirectory() as tmpdirname:\r UpperCamelCase_\t\t\t\t\t\t\t = model.get_encoder()\r encoder.save_pretrained(__UpperCamelCase )\r UpperCamelCase_\t\t\t\t\t\t\t = AutoformerEncoder.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )\r\r UpperCamelCase_\t\t\t\t\t\t\t = model.create_network_inputs(**__UpperCamelCase )\r UpperCamelCase_\t\t\t\t\t\t\t = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )\r\r UpperCamelCase_\t\t\t\t\t\t\t = torch.cat(\r (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) ,\t\tdim=-1 ,\t\t)\r UpperCamelCase_\t\t\t\t\t\t\t = encoder(inputs_embeds=__UpperCamelCase )[0]\r self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )\r\r UpperCamelCase_\t\t\t\t\t\t\t = (\r torch.mean(transformer_inputs[:, : config.context_length, ...] ,\t\tdim=1 )\r .unsqueeze(1 )\r .repeat(1 ,\t\tconfig.prediction_length ,\t\t1 )\r )\r UpperCamelCase_\t\t\t\t\t\t\t = torch.zeros(\r [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] ,\t\tdevice=enc_input.device ,\t\t)\r\r UpperCamelCase_\t\t\t\t\t\t\t = torch.cat(\r (\r torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) ,\t\tdim=1 ),\r feature[:, config.context_length - config.label_length :, ...],\r ) ,\t\tdim=-1 ,\t\t)\r UpperCamelCase_\t\t\t\t\t\t\t = torch.cat(\r (\r torch.cat((trend_input[:, -config.label_length :, ...], mean) ,\t\tdim=1 ),\r feature[:, config.context_length - config.label_length :, ...],\r ) ,\t\tdim=-1 ,\t\t)\r\r with tempfile.TemporaryDirectory() as tmpdirname:\r UpperCamelCase_\t\t\t\t\t\t\t = model.get_decoder()\r decoder.save_pretrained(__UpperCamelCase )\r UpperCamelCase_\t\t\t\t\t\t\t = AutoformerDecoder.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )\r\r UpperCamelCase_\t\t\t\t\t\t\t = decoder(\r trend=__UpperCamelCase ,\t\tinputs_embeds=__UpperCamelCase ,\t\tencoder_hidden_states=__UpperCamelCase ,\t\t)[0]\r\r self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )\r\r\r\r@require_torch\rclass \t\t\t\t\t\tlowercase_ ( _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t,\t\t_SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t,\t\tunittest.TestCase\t\t\t):\r A__ :\t\t\tOptional[int]\t\t\t\t = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()\r A__ :\t\t\tOptional[Any]\t\t\t\t = (AutoformerForPrediction,) if is_torch_available() else ()\r A__ :\t\t\tOptional[int]\t\t\t\t = {\"\"\"feature-extraction\"\"\": AutoformerModel} if is_torch_available() else {}\r A__ :\t\t\tOptional[Any]\t\t\t\t = False\r A__ :\t\t\tOptional[Any]\t\t\t\t = False\r A__ :\t\t\tint\t\t\t\t = False\r A__ :\t\t\tList[str]\t\t\t\t = False\r A__ :\t\t\tList[Any]\t\t\t\t = False\r A__ :\t\t\tTuple\t\t\t\t = False\r\r def lowerCamelCase_\t\t\t( self ):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r UpperCamelCase_\t\t\t\t\t\t\t = AutoformerModelTester(self )\r UpperCamelCase_\t\t\t\t\t\t\t = ConfigTester(self ,\t\tconfig_class=__UpperCamelCase ,\t\thas_text_modality=__UpperCamelCase )\r\r def lowerCamelCase_\t\t\t( self ):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r self.config_tester.run_common_tests()\r\r def lowerCamelCase_\t\t\t( self ):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r UpperCamelCase_\t\t\t\t\t\t\t = self.model_tester.prepare_config_and_inputs()\r for model_class in self.all_model_classes:\r UpperCamelCase_\t\t\t\t\t\t\t = model_class(__UpperCamelCase )\r\r with tempfile.TemporaryDirectory() as tmpdirname:\r model.save_pretrained(__UpperCamelCase )\r UpperCamelCase_\t\t\t\t\t\t\t = model_class.from_pretrained(__UpperCamelCase ,\t\toutput_loading_info=__UpperCamelCase )\r self.assertEqual(info[\"\"\"missing_keys\"\"\"] ,\t\t[] )\r\r def lowerCamelCase_\t\t\t( self ):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r UpperCamelCase_\t\t\t\t\t\t\t = self.model_tester.prepare_config_and_inputs_for_common()\r self.model_tester.check_encoder_decoder_model_standalone(*__UpperCamelCase )\r\r @unittest.skip(reason=\"\"\"Model has no tokens embeddings\"\"\" )\r def lowerCamelCase_\t\t\t( self ):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r pass\r\r def lowerCamelCase_\t\t\t( self ):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r UpperCamelCase_\t\t\t\t\t\t\t = inspect.signature(getattr(__UpperCamelCase ,\t\t\"\"\"forward\"\"\" ) )\r # The main input is the name of the argument after `self`\r UpperCamelCase_\t\t\t\t\t\t\t = list(model_signature.parameters.keys() )[1]\r self.assertEqual(AutoformerModel.main_input_name ,\t\t__UpperCamelCase )\r\r def lowerCamelCase_\t\t\t( self ):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r UpperCamelCase_\t\t\t\t\t\t\t = self.model_tester.prepare_config_and_inputs_for_common()\r\r for model_class in self.all_model_classes:\r UpperCamelCase_\t\t\t\t\t\t\t = model_class(__UpperCamelCase )\r UpperCamelCase_\t\t\t\t\t\t\t = inspect.signature(model.forward )\r # signature.parameters is an OrderedDict => so arg_names order is deterministic\r UpperCamelCase_\t\t\t\t\t\t\t = [*signature.parameters.keys()]\r\r UpperCamelCase_\t\t\t\t\t\t\t = [\r \"\"\"past_values\"\"\",\r \"\"\"past_time_features\"\"\",\r \"\"\"past_observed_mask\"\"\",\r \"\"\"static_categorical_features\"\"\",\r \"\"\"static_real_features\"\"\",\r \"\"\"future_values\"\"\",\r \"\"\"future_time_features\"\"\",\r ]\r\r if model.__class__.__name__ in [\"AutoformerForPrediction\"]:\r expected_arg_names.append(\"\"\"future_observed_mask\"\"\" )\r\r expected_arg_names.extend(\r [\r \"\"\"decoder_attention_mask\"\"\",\r \"\"\"head_mask\"\"\",\r \"\"\"decoder_head_mask\"\"\",\r \"\"\"cross_attn_head_mask\"\"\",\r \"\"\"encoder_outputs\"\"\",\r \"\"\"past_key_values\"\"\",\r \"\"\"output_hidden_states\"\"\",\r \"\"\"output_attentions\"\"\",\r \"\"\"use_cache\"\"\",\r \"\"\"return_dict\"\"\",\r ] )\r\r self.assertListEqual(arg_names[: len(__UpperCamelCase )] ,\t\t__UpperCamelCase )\r\r def lowerCamelCase_\t\t\t( self ):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r UpperCamelCase_\t\t\t\t\t\t\t = self.model_tester.prepare_config_and_inputs_for_common()\r UpperCamelCase_\t\t\t\t\t\t\t = True\r\r UpperCamelCase_\t\t\t\t\t\t\t = getattr(self.model_tester ,\t\t\"\"\"seq_length\"\"\" ,\t\t__UpperCamelCase )\r UpperCamelCase_\t\t\t\t\t\t\t = getattr(self.model_tester ,\t\t\"\"\"decoder_seq_length\"\"\" ,\t\t__UpperCamelCase )\r UpperCamelCase_\t\t\t\t\t\t\t = getattr(self.model_tester ,\t\t\"\"\"encoder_seq_length\"\"\" ,\t\t__UpperCamelCase )\r UpperCamelCase_\t\t\t\t\t\t\t = getattr(self.model_tester ,\t\t\"\"\"d_model\"\"\" ,\t\t__UpperCamelCase )\r UpperCamelCase_\t\t\t\t\t\t\t = getattr(self.model_tester ,\t\t\"\"\"num_attention_heads\"\"\" ,\t\t__UpperCamelCase )\r UpperCamelCase_\t\t\t\t\t\t\t = d_model // num_attention_heads\r\r for model_class in self.all_model_classes:\r UpperCamelCase_\t\t\t\t\t\t\t = True\r UpperCamelCase_\t\t\t\t\t\t\t = False\r UpperCamelCase_\t\t\t\t\t\t\t = True\r UpperCamelCase_\t\t\t\t\t\t\t = model_class(__UpperCamelCase )\r model.to(__UpperCamelCase )\r model.eval()\r with torch.no_grad():\r UpperCamelCase_\t\t\t\t\t\t\t = model(**self._prepare_for_class(__UpperCamelCase ,\t\t__UpperCamelCase ) )\r UpperCamelCase_\t\t\t\t\t\t\t = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions\r self.assertEqual(len(__UpperCamelCase ) ,\t\tself.model_tester.num_hidden_layers )\r\r # check that output_attentions also work using config\r del inputs_dict[\"output_attentions\"]\r UpperCamelCase_\t\t\t\t\t\t\t = True\r UpperCamelCase_\t\t\t\t\t\t\t = model_class(__UpperCamelCase )\r model.to(__UpperCamelCase )\r model.eval()\r with torch.no_grad():\r UpperCamelCase_\t\t\t\t\t\t\t = model(**self._prepare_for_class(__UpperCamelCase ,\t\t__UpperCamelCase ) )\r UpperCamelCase_\t\t\t\t\t\t\t = outputs.encoder_attentions\r self.assertEqual(len(__UpperCamelCase ) ,\t\tself.model_tester.num_hidden_layers )\r\r self.assertListEqual(\r list(attentions[0].shape[-3:] ) ,\t\t[self.model_tester.num_attention_heads, encoder_seq_length, dim] ,\t\t)\r UpperCamelCase_\t\t\t\t\t\t\t = len(__UpperCamelCase )\r\r UpperCamelCase_\t\t\t\t\t\t\t = 7\r\r if \"last_hidden_state\" in outputs:\r correct_outlen += 1\r\r if \"trend\" in outputs:\r correct_outlen += 1\r\r if \"past_key_values\" in outputs:\r correct_outlen += 1 # past_key_values have been returned\r\r if \"loss\" in outputs:\r correct_outlen += 1\r\r if \"params\" in outputs:\r correct_outlen += 1\r\r self.assertEqual(__UpperCamelCase ,\t\t__UpperCamelCase )\r\r # decoder attentions\r UpperCamelCase_\t\t\t\t\t\t\t = outputs.decoder_attentions\r self.assertIsInstance(__UpperCamelCase ,\t\t(list, tuple) )\r self.assertEqual(len(__UpperCamelCase ) ,\t\tself.model_tester.num_hidden_layers )\r self.assertListEqual(\r list(decoder_attentions[0].shape[-3:] ) ,\t\t[self.model_tester.num_attention_heads, decoder_seq_length, dim] ,\t\t)\r\r # cross attentions\r UpperCamelCase_\t\t\t\t\t\t\t = outputs.cross_attentions\r self.assertIsInstance(__UpperCamelCase ,\t\t(list, tuple) )\r self.assertEqual(len(__UpperCamelCase ) ,\t\tself.model_tester.num_hidden_layers )\r self.assertListEqual(\r list(cross_attentions[0].shape[-3:] ) ,\t\t[self.model_tester.num_attention_heads, decoder_seq_length, dim] ,\t\t)\r\r # Check attention is always last and order is fine\r UpperCamelCase_\t\t\t\t\t\t\t = True\r UpperCamelCase_\t\t\t\t\t\t\t = True\r UpperCamelCase_\t\t\t\t\t\t\t = model_class(__UpperCamelCase )\r model.to(__UpperCamelCase )\r model.eval()\r with torch.no_grad():\r UpperCamelCase_\t\t\t\t\t\t\t = model(**self._prepare_for_class(__UpperCamelCase ,\t\t__UpperCamelCase ) )\r\r self.assertEqual(out_len + 2 ,\t\tlen(__UpperCamelCase ) )\r\r UpperCamelCase_\t\t\t\t\t\t\t = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions\r\r self.assertEqual(len(__UpperCamelCase ) ,\t\tself.model_tester.num_hidden_layers )\r self.assertListEqual(\r list(self_attentions[0].shape[-3:] ) ,\t\t[self.model_tester.num_attention_heads, encoder_seq_length, dim] ,\t\t)\r\r @is_flaky()\r def lowerCamelCase_\t\t\t( self ):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r super().test_retain_grad_hidden_states_attentions()\r\r\r\r\rdef \t\t\tlowerCamelCase__\t\t\t( a__\t\t\t\t\t:\t\t\t\t\tUnion[str, Any]=\"train-batch.pt\" )\t\t\t\t\t\t->\t\tDict:\r UpperCamelCase_\t\t\t\t\t\t\t = hf_hub_download(repo_id=\"\"\"hf-internal-testing/tourism-monthly-batch\"\"\"\t\t, filename=A__\t\t, repo_type=\"\"\"dataset\"\"\" )\r UpperCamelCase_\t\t\t\t\t\t\t = torch.load(A__\t\t, map_location=A__ )\r return batch\r\r\r\r\r\r\r\r@require_torch\r@slow\rclass \t\t\t\t\t\tlowercase_ ( unittest.TestCase\t\t\t):\r\r def lowerCamelCase_\t\t\t( self ):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r UpperCamelCase_\t\t\t\t\t\t\t = AutoformerModel.from_pretrained(\"\"\"huggingface/autoformer-tourism-monthly\"\"\" ).to(__UpperCamelCase )\r UpperCamelCase_\t\t\t\t\t\t\t = prepare_batch()\r\r with torch.no_grad():\r UpperCamelCase_\t\t\t\t\t\t\t = model(\r past_values=batch[\"\"\"past_values\"\"\"] ,\t\tpast_time_features=batch[\"\"\"past_time_features\"\"\"] ,\t\tpast_observed_mask=batch[\"\"\"past_observed_mask\"\"\"] ,\t\tstatic_categorical_features=batch[\"\"\"static_categorical_features\"\"\"] ,\t\tfuture_values=batch[\"\"\"future_values\"\"\"] ,\t\tfuture_time_features=batch[\"\"\"future_time_features\"\"\"] ,\t\t)[0]\r\r UpperCamelCase_\t\t\t\t\t\t\t = torch.Size(\r (6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )\r self.assertEqual(output.shape ,\t\t__UpperCamelCase )\r\r UpperCamelCase_\t\t\t\t\t\t\t = torch.tensor(\r [[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] ,\t\tdevice=__UpperCamelCase )\r self.assertTrue(torch.allclose(output[0, :3, :3] ,\t\t__UpperCamelCase ,\t\tatol=__UpperCamelCase ) )\r\r def lowerCamelCase_\t\t\t( self ):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r UpperCamelCase_\t\t\t\t\t\t\t = AutoformerForPrediction.from_pretrained(\"\"\"huggingface/autoformer-tourism-monthly\"\"\" ).to(__UpperCamelCase )\r UpperCamelCase_\t\t\t\t\t\t\t = prepare_batch(\"\"\"val-batch.pt\"\"\" )\r with torch.no_grad():\r UpperCamelCase_\t\t\t\t\t\t\t = model(\r past_values=batch[\"\"\"past_values\"\"\"] ,\t\tpast_time_features=batch[\"\"\"past_time_features\"\"\"] ,\t\tpast_observed_mask=batch[\"\"\"past_observed_mask\"\"\"] ,\t\tstatic_categorical_features=batch[\"\"\"static_categorical_features\"\"\"] ,\t\t).encoder_last_hidden_state\r UpperCamelCase_\t\t\t\t\t\t\t = torch.Size((6_4, model.config.context_length, model.config.d_model) )\r self.assertEqual(output.shape ,\t\t__UpperCamelCase )\r\r UpperCamelCase_\t\t\t\t\t\t\t = torch.tensor(\r [[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] ,\t\tdevice=__UpperCamelCase )\r self.assertTrue(torch.allclose(output[0, :3, :3] ,\t\t__UpperCamelCase ,\t\tatol=__UpperCamelCase ) )\r\r def lowerCamelCase_\t\t\t( self ):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r UpperCamelCase_\t\t\t\t\t\t\t = AutoformerForPrediction.from_pretrained(\"\"\"huggingface/autoformer-tourism-monthly\"\"\" ).to(__UpperCamelCase )\r UpperCamelCase_\t\t\t\t\t\t\t = prepare_batch(\"\"\"val-batch.pt\"\"\" )\r with torch.no_grad():\r UpperCamelCase_\t\t\t\t\t\t\t = model.generate(\r static_categorical_features=batch[\"\"\"static_categorical_features\"\"\"] ,\t\tpast_time_features=batch[\"\"\"past_time_features\"\"\"] ,\t\tpast_values=batch[\"\"\"past_values\"\"\"] ,\t\tfuture_time_features=batch[\"\"\"future_time_features\"\"\"] ,\t\tpast_observed_mask=batch[\"\"\"past_observed_mask\"\"\"] ,\t\t)\r UpperCamelCase_\t\t\t\t\t\t\t = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )\r self.assertEqual(outputs.sequences.shape ,\t\t__UpperCamelCase )\r\r UpperCamelCase_\t\t\t\t\t\t\t = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] ,\t\tdevice=__UpperCamelCase )\r UpperCamelCase_\t\t\t\t\t\t\t = outputs.sequences.mean(dim=1 )\r self.assertTrue(torch.allclose(mean_prediction[0, -3:] ,\t\t__UpperCamelCase ,\t\trtol=1e-1 ) )\r\r\r\r"},"code_codestyle":{"kind":"number","value":351,"string":"351"},"style_context":{"kind":"string","value":"\rdef \t\t\tlowerCamelCase__\t\t\t( a__\t\t\t\t\t:\t\t\t\t\tList[Any] )\t\t\t\t\t\t->\t\tOptional[int]:\r UpperCamelCase_\t\t\t\t\t\t\t = len(a__ )\r while cur > 1:\r # Find the maximum number in arr\r UpperCamelCase_\t\t\t\t\t\t\t = arr.index(max(arr[0:cur] ) )\r # Reverse from 0 to mi\r UpperCamelCase_\t\t\t\t\t\t\t = arr[mi::-1] + arr[mi + 1 : len(a__ )]\r # Reverse whole list\r UpperCamelCase_\t\t\t\t\t\t\t = arr[cur - 1 :: -1] + arr[cur : len(a__ )]\r cur -= 1\r return arr\r\r\rif __name__ == \"__main__\":\r _A\t\t\t\t\t\t\t= input('''Enter numbers separated by a comma:\\n''').strip()\r _A\t\t\t\t\t\t\t= [int(item) for item in user_input.split(''',''')]\r print(pancake_sort(unsorted))\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":261,"string":"261"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":752,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r'''simple docstring'''\r\r\r\r\rimport math\r\r\r\r\r\rdef a_\t\t\t\t\t\t\t( __snake_case\t\t\t\t\t\t\t:\t\t\tint )\t\t\t\t\t\t\t->\t\t\t\t\t\tbool:\r\r\r\r \"\"\"simple docstring\"\"\"\r return math.sqrt(__snake_case ) * math.sqrt(__snake_case ) == num\r\r\r\r\r\rdef a_\t\t\t\t\t\t\t( __snake_case\t\t\t\t\t\t\t:\t\t\tint )\t\t\t\t\t\t\t->\t\t\t\t\t\tbool:\r\r\r\r \"\"\"simple docstring\"\"\"\r lowerCamelCase_\t\t\t\t\t\t\t=0\r lowerCamelCase_\t\t\t\t\t\t\t=n\r while left <= right:\r lowerCamelCase_\t\t\t\t\t\t\t=(left + right) // 2\r if mid**2 == n:\r return True\r elif mid**2 > n:\r lowerCamelCase_\t\t\t\t\t\t\t=mid - 1\r else:\r lowerCamelCase_\t\t\t\t\t\t\t=mid + 1\r return False\r\r\rif __name__ == \"__main__\":\r import doctest\r\r doctest.testmod()\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":75,"string":"75"},"style_context":{"kind":"string","value":"\n\n\n\n\nfrom typing import Dict, List, Optional\n\nfrom ...tokenization_utils import AddedToken, PreTrainedTokenizer\nfrom ...utils import logging\n\n\n_A =\t\t\tlogging.get_logger(__name__)\n\n\n_A =\t\t\t{\n '''nielsr/canine-s''': 2_048,\n}\n\n# Unicode defines 1,114,112 total “codepoints”\n_A =\t\t\t1_114_112\n\n# Below: Constants defining canonical codepoints for special, pseudo-characters.\n# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py\n_A =\t\t\t0\n\n_A =\t\t\t0xe0_00\n_A =\t\t\t0xe0_01\n_A =\t\t\t0xe0_02\n_A =\t\t\t0xe0_03\n_A =\t\t\t0xe0_04\n\n# Maps special codepoints to human-readable names.\n_A =\t\t\t{\n # Special symbols are represented using codepoints values that are valid,\n # but designated as \"Private Use\", meaning that they will never be assigned\n # characters by the Unicode Consortium, and are thus safe for use here.\n #\n # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly\n # excluded and should fail with a hard error.\n CLS: \"[CLS]\",\n SEP: \"[SEP]\",\n BOS: \"[BOS]\",\n MASK: \"[MASK]\",\n PAD: \"[PAD]\",\n RESERVED: \"[RESERVED]\",\n}\n\n# Maps special codepoint human-readable names to their codepoint values.\n_A =\t\t\t{name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}\n\n\nclass A\t\t( __UpperCAmelCase ):\n\t\t__snake_case\t\t\t\t\t\t\t\t\t= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n\n\n\n\t\tdef __init__( self,\tUpperCamelCase__=chr(UpperCamelCase__ ),\tUpperCamelCase__=chr(UpperCamelCase__ ),\tUpperCamelCase__=chr(UpperCamelCase__ ),\tUpperCamelCase__=chr(UpperCamelCase__ ),\tUpperCamelCase__=chr(UpperCamelCase__ ),\tUpperCamelCase__=chr(UpperCamelCase__ ),\tUpperCamelCase__=False,\tUpperCamelCase__=2048,\t**UpperCamelCase__,\t):\n\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= AddedToken(UpperCamelCase__,\tlstrip=UpperCamelCase__,\trstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__,\tUpperCamelCase__ ) else bos_token\n\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= AddedToken(UpperCamelCase__,\tlstrip=UpperCamelCase__,\trstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__,\tUpperCamelCase__ ) else eos_token\n\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= AddedToken(UpperCamelCase__,\tlstrip=UpperCamelCase__,\trstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__,\tUpperCamelCase__ ) else sep_token\n\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= AddedToken(UpperCamelCase__,\tlstrip=UpperCamelCase__,\trstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__,\tUpperCamelCase__ ) else cls_token\n\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= AddedToken(UpperCamelCase__,\tlstrip=UpperCamelCase__,\trstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__,\tUpperCamelCase__ ) else pad_token\n\n\t\t\t\t\t\t\t\t# Mask token behave like a normal word, i.e. include the space before it\n\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= AddedToken(UpperCamelCase__,\tlstrip=UpperCamelCase__,\trstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__,\tUpperCamelCase__ ) else mask_token\n\n\t\t\t\t\t\t\t\tsuper().__init__(\n\t\t\t\t\t\t\t\t bos_token=UpperCamelCase__,\teos_token=UpperCamelCase__,\tsep_token=UpperCamelCase__,\tcls_token=UpperCamelCase__,\tpad_token=UpperCamelCase__,\tmask_token=UpperCamelCase__,\tadd_prefix_space=UpperCamelCase__,\tmodel_max_length=UpperCamelCase__,\t**UpperCamelCase__,\t)\n\n\t\t\t\t\t\t\t\t# Creates a mapping for looking up the IDs of special symbols.\n\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= {}\n\t\t\t\t\t\t\t\tfor codepoint, name in SPECIAL_CODEPOINTS.items():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= codepoint\n\n\t\t\t\t\t\t\t\t# Creates a mapping for looking up the string forms of special symbol IDs.\n\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= {\n\t\t\t\t\t\t\t\t codepoint: name for name, codepoint in self._special_codepoints.items()\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= UNICODE_VOCAB_SIZE\n\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= len(self._special_codepoints )\n\n\n\n\t\t@property\n\t\tdef SCREAMING_SNAKE_CASE__\t\t\t( self ):\n\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\t\t\t\t\t\t\t\treturn self._unicode_vocab_size\n\n\n\n\t\tdef SCREAMING_SNAKE_CASE__\t\t\t( self,\tUpperCamelCase__ ):\n\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\t\t\t\t\t\t\t\treturn list(UpperCamelCase__ )\n\n\n\n\t\tdef SCREAMING_SNAKE_CASE__\t\t\t( self,\tUpperCamelCase__ ):\n\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn ord(UpperCamelCase__ )\n\t\t\t\t\t\t\t\texcept TypeError:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(f\"invalid token: '{token}'\" )\n\n\n\n\t\tdef SCREAMING_SNAKE_CASE__\t\t\t( self,\tUpperCamelCase__ ):\n\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif index in SPECIAL_CODEPOINTS:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn SPECIAL_CODEPOINTS[index]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn chr(UpperCamelCase__ )\n\t\t\t\t\t\t\t\texcept TypeError:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(f\"invalid id: {index}\" )\n\n\n\n\t\tdef SCREAMING_SNAKE_CASE__\t\t\t( self,\tUpperCamelCase__ ):\n\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\t\t\t\t\t\t\t\treturn \"\".join(UpperCamelCase__ )\n\n\n\n\t\tdef SCREAMING_SNAKE_CASE__\t\t\t( self,\tUpperCamelCase__,\tUpperCamelCase__ = None ):\n\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= [self.sep_token_id]\n\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= [self.cls_token_id]\n\n\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= cls + token_ids_a + sep\n\t\t\t\t\t\t\t\tif token_ids_a is not None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tresult += token_ids_a + sep\n\t\t\t\t\t\t\t\treturn result\n\n\n\n\t\tdef SCREAMING_SNAKE_CASE__\t\t\t( self,\tUpperCamelCase__,\tUpperCamelCase__ = None,\tUpperCamelCase__ = False ):\n\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\t\t\t\t\t\t\t\tif already_has_special_tokens:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn super().get_special_tokens_mask(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t token_ids_a=UpperCamelCase__,\ttoken_ids_a=UpperCamelCase__,\talready_has_special_tokens=UpperCamelCase__ )\n\n\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= [1] + ([0] * len(UpperCamelCase__ )) + [1]\n\t\t\t\t\t\t\t\tif token_ids_a is not None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tresult += ([0] * len(UpperCamelCase__ )) + [1]\n\t\t\t\t\t\t\t\treturn result\n\n\n\n\t\tdef SCREAMING_SNAKE_CASE__\t\t\t( self,\tUpperCamelCase__,\tUpperCamelCase__ = None ):\n\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= [self.sep_token_id]\n\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= [self.cls_token_id]\n\n\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= len(cls + token_ids_a + sep ) * [0]\n\t\t\t\t\t\t\t\tif token_ids_a is not None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tresult += len(token_ids_a + sep ) * [1]\n\t\t\t\t\t\t\t\treturn result\n\n\n\n\n\n\n\n\t\tdef SCREAMING_SNAKE_CASE__\t\t\t( self,\tUpperCamelCase__,\tUpperCamelCase__ = None ):\n\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\t\t\t\t\t\t\t\treturn ()\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":278,"string":"278"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":753,"cells":{"code":{"kind":"string","value":"\r\rfrom typing import Optional\r\rimport numpy as np\rimport torch\rfrom torch import nn\rfrom transformers import GPTaConfig, GPTaLMHeadModel\rfrom transformers.modeling_utils import ModuleUtilsMixin\r\rfrom ...configuration_utils import ConfigMixin, register_to_config\rfrom ...models import ModelMixin\r\r\r\r\r\r\rclass \tUpperCAmelCase__\t\t(\t_a\t\t\t\t\t\t,\t\t\t\t\t_a\t\t\t\t\t\t,\t\t\t\t\t_a\t):\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r a\t\t\t\t\t\t\t\t\t= [r\"h\\.\\d+\\.attn\\.bias\", r\"h\\.\\d+\\.attn\\.masked_bias\"]\r\r\r\r\r\r\r @register_to_config\r def __init__(\t\t\t\t\t\t\tself\t\t\t\t\t\t:\t\tOptional[int] , __lowerCamelCase\t\t\t\t\t\t:\t\tint , __lowerCamelCase\t\t\t\t\t\t:\t\tint , __lowerCamelCase\t\t\t\t\t\t:\t\tOptional[int] = None , __lowerCamelCase\t\t\t\t\t\t:\t\tint = 5_0257 , __lowerCamelCase\t\t\t\t\t\t:\t\tint = 1024 , __lowerCamelCase\t\t\t\t\t\t:\t\tint = 768 , __lowerCamelCase\t\t\t\t\t\t:\t\tint = 12 , __lowerCamelCase\t\t\t\t\t\t:\t\tint = 12 , __lowerCamelCase\t\t\t\t\t\t:\t\tOptional[int] = None , __lowerCamelCase\t\t\t\t\t\t:\t\tstr = \"gelu_new\" , __lowerCamelCase\t\t\t\t\t\t:\t\tfloat = 0.1 , __lowerCamelCase\t\t\t\t\t\t:\t\tfloat = 0.1 , __lowerCamelCase\t\t\t\t\t\t:\t\tfloat = 0.1 , __lowerCamelCase\t\t\t\t\t\t:\t\tfloat = 1e-5 , __lowerCamelCase\t\t\t\t\t\t:\t\tfloat = 0.02 , __lowerCamelCase\t\t\t\t\t\t:\t\tbool = True , __lowerCamelCase\t\t\t\t\t\t:\t\tbool = True , __lowerCamelCase\t\t\t\t\t\t:\t\tbool = False , __lowerCamelCase\t\t\t\t\t\t:\t\tbool = False , )\t\t->\t\t\t\t\t\t\tTuple:\r super().__init__()\r\r SCREAMING_SNAKE_CASE__ = prefix_length\r\r if prefix_inner_dim != n_embd and prefix_hidden_dim is None:\r raise ValueError(\r f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''\r f''' `n_embd`: {n_embd} are not equal.''' )\r\r SCREAMING_SNAKE_CASE__ = prefix_inner_dim\r SCREAMING_SNAKE_CASE__ = prefix_hidden_dim\r\r SCREAMING_SNAKE_CASE__ = (\r nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )\r if self.prefix_hidden_dim is not None\r else nn.Identity()\r )\r SCREAMING_SNAKE_CASE__ = (\r nn.Linear(self.prefix_hidden_dim , snake_case_ ) if self.prefix_hidden_dim is not None else nn.Identity()\r )\r\r SCREAMING_SNAKE_CASE__ = GPTaConfig(\r vocab_size=snake_case_ , n_positions=snake_case_ , n_embd=snake_case_ , n_layer=snake_case_ , n_head=snake_case_ , n_inner=snake_case_ , activation_function=snake_case_ , resid_pdrop=snake_case_ , embd_pdrop=snake_case_ , attn_pdrop=snake_case_ , layer_norm_epsilon=snake_case_ , initializer_range=snake_case_ , scale_attn_weights=snake_case_ , use_cache=snake_case_ , scale_attn_by_inverse_layer_idx=snake_case_ , reorder_and_upcast_attn=snake_case_ , )\r SCREAMING_SNAKE_CASE__ = GPTaLMHeadModel(snake_case_ )\r\r\r\r\r\r\r def \t\t\t\tlowercase_\t\t(\t\t\t\t\t\t\tself\t\t\t\t\t\t:\t\tOptional[int] , __lowerCamelCase\t\t\t\t\t\t:\t\ttorch.Tensor , __lowerCamelCase\t\t\t\t\t\t:\t\ttorch.Tensor , __lowerCamelCase\t\t\t\t\t\t:\t\tOptional[torch.Tensor] = None , __lowerCamelCase\t\t\t\t\t\t:\t\tOptional[torch.Tensor] = None , )\t\t->\t\t\t\t\t\t\tDict:\r SCREAMING_SNAKE_CASE__ = self.transformer.transformer.wte(snake_case_ )\r SCREAMING_SNAKE_CASE__ = self.encode_prefix(snake_case_ )\r SCREAMING_SNAKE_CASE__ = self.decode_prefix(snake_case_ )\r SCREAMING_SNAKE_CASE__ = torch.cat((prefix_embeds, embedding_text) , dim=1 )\r\r if labels is not None:\r SCREAMING_SNAKE_CASE__ = self.get_dummy_token(input_ids.shape[0] , input_ids.device )\r SCREAMING_SNAKE_CASE__ = torch.cat((dummy_token, input_ids) , dim=1 )\r SCREAMING_SNAKE_CASE__ = self.transformer(inputs_embeds=snake_case_ , labels=snake_case_ , attention_mask=snake_case_ )\r if self.prefix_hidden_dim is not None:\r return out, hidden\r else:\r return out\r\r\r\r\r\r\r def \t\t\t\tlowercase_\t\t(\t\t\t\t\t\t\tself\t\t\t\t\t\t:\t\tOptional[Any] , __lowerCamelCase\t\t\t\t\t\t:\t\tint , __lowerCamelCase\t\t\t\t\t\t:\t\ttorch.device )\t\t->\t\t\t\t\t\t\tint:\r return torch.zeros(snake_case_ , self.prefix_length , dtype=torch.intaa , device=snake_case_ )\r\r\r\r\r\r\r def \t\t\t\tlowercase_\t\t(\t\t\t\t\t\t\tself\t\t\t\t\t\t:\t\tTuple , __lowerCamelCase\t\t\t\t\t\t:\t\tTuple )\t\t->\t\t\t\t\t\t\tint:\r return self.encode_prefix(snake_case_ )\r\r\r\r\r\r\r @torch.no_grad()\r def \t\t\t\tlowercase_\t\t(\t\t\t\t\t\t\tself\t\t\t\t\t\t:\t\tList[str] , __lowerCamelCase\t\t\t\t\t\t:\t\tOptional[int] , __lowerCamelCase\t\t\t\t\t\t:\t\tOptional[Any] , __lowerCamelCase\t\t\t\t\t\t:\t\tList[str] )\t\t->\t\t\t\t\t\t\tTuple:\r SCREAMING_SNAKE_CASE__ = torch.split(snake_case_ , 1 , dim=0 )\r SCREAMING_SNAKE_CASE__ = []\r SCREAMING_SNAKE_CASE__ = []\r for feature in features:\r SCREAMING_SNAKE_CASE__ = self.decode_prefix(feature.to(snake_case_ ) ) # back to the clip feature\r # Only support beam search for now\r SCREAMING_SNAKE_CASE__ = self.generate_beam(\r input_embeds=snake_case_ , device=snake_case_ , eos_token_id=snake_case_ )\r generated_tokens.append(output_tokens[0] )\r generated_seq_lengths.append(seq_lengths[0] )\r SCREAMING_SNAKE_CASE__ = torch.stack(snake_case_ )\r SCREAMING_SNAKE_CASE__ = torch.stack(snake_case_ )\r return generated_tokens, generated_seq_lengths\r\r\r\r\r\r\r\r @torch.no_grad()\r def \t\t\t\tlowercase_\t\t(\t\t\t\t\t\t\tself\t\t\t\t\t\t:\t\tOptional[int] , __lowerCamelCase\t\t\t\t\t\t:\t\tOptional[Any]=None , __lowerCamelCase\t\t\t\t\t\t:\t\tOptional[int]=None , __lowerCamelCase\t\t\t\t\t\t:\t\tUnion[str, Any]=None , __lowerCamelCase\t\t\t\t\t\t:\t\tint = 5 , __lowerCamelCase\t\t\t\t\t\t:\t\tint = 67 , __lowerCamelCase\t\t\t\t\t\t:\t\tfloat = 1.0 , __lowerCamelCase\t\t\t\t\t\t:\t\tOptional[int] = None , )\t\t->\t\t\t\t\t\t\tAny:\r SCREAMING_SNAKE_CASE__ = eos_token_id\r SCREAMING_SNAKE_CASE__ = None\r SCREAMING_SNAKE_CASE__ = None\r SCREAMING_SNAKE_CASE__ = torch.ones(snake_case_ , device=snake_case_ , dtype=torch.int )\r SCREAMING_SNAKE_CASE__ = torch.zeros(snake_case_ , device=snake_case_ , dtype=torch.bool )\r\r if input_embeds is not None:\r SCREAMING_SNAKE_CASE__ = input_embeds\r else:\r SCREAMING_SNAKE_CASE__ = self.transformer.transformer.wte(snake_case_ )\r\r for i in range(snake_case_ ):\r SCREAMING_SNAKE_CASE__ = self.transformer(inputs_embeds=snake_case_ )\r SCREAMING_SNAKE_CASE__ = outputs.logits\r SCREAMING_SNAKE_CASE__ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)\r SCREAMING_SNAKE_CASE__ = logits.softmax(-1 ).log()\r\r if scores is None:\r SCREAMING_SNAKE_CASE__ = logits.topk(snake_case_ , -1 )\r SCREAMING_SNAKE_CASE__ = generated.expand(snake_case_ , *generated.shape[1:] )\r SCREAMING_SNAKE_CASE__ = next_tokens.permute(1 , 0 ), scores.squeeze(0 )\r if tokens is None:\r SCREAMING_SNAKE_CASE__ = next_tokens\r else:\r SCREAMING_SNAKE_CASE__ = tokens.expand(snake_case_ , *tokens.shape[1:] )\r SCREAMING_SNAKE_CASE__ = torch.cat((tokens, next_tokens) , dim=1 )\r else:\r SCREAMING_SNAKE_CASE__ = -float(np.inf )\r SCREAMING_SNAKE_CASE__ = 0\r SCREAMING_SNAKE_CASE__ = scores[:, None] + logits\r seq_lengths[~is_stopped] += 1\r SCREAMING_SNAKE_CASE__ = scores_sum / seq_lengths[:, None]\r SCREAMING_SNAKE_CASE__ = scores_sum_average.view(-1 ).topk(snake_case_ , -1 )\r SCREAMING_SNAKE_CASE__ = next_tokens // scores_sum.shape[1]\r SCREAMING_SNAKE_CASE__ = seq_lengths[next_tokens_source]\r SCREAMING_SNAKE_CASE__ = next_tokens % scores_sum.shape[1]\r SCREAMING_SNAKE_CASE__ = next_tokens.unsqueeze(1 )\r SCREAMING_SNAKE_CASE__ = tokens[next_tokens_source]\r SCREAMING_SNAKE_CASE__ = torch.cat((tokens, next_tokens) , dim=1 )\r SCREAMING_SNAKE_CASE__ = generated[next_tokens_source]\r SCREAMING_SNAKE_CASE__ = scores_sum_average * seq_lengths\r SCREAMING_SNAKE_CASE__ = is_stopped[next_tokens_source]\r\r SCREAMING_SNAKE_CASE__ = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )\r SCREAMING_SNAKE_CASE__ = torch.cat((generated, next_token_embed) , dim=1 )\r SCREAMING_SNAKE_CASE__ = is_stopped + next_tokens.eq(snake_case_ ).squeeze()\r if is_stopped.all():\r break\r\r SCREAMING_SNAKE_CASE__ = scores / seq_lengths\r SCREAMING_SNAKE_CASE__ = scores.argsort(descending=snake_case_ )\r # tokens tensors are already padded to max_seq_length\r SCREAMING_SNAKE_CASE__ = [tokens[i] for i in order]\r SCREAMING_SNAKE_CASE__ = torch.stack(snake_case_ , dim=0 )\r SCREAMING_SNAKE_CASE__ = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )\r return output_texts, seq_lengths\r\r\r"},"code_codestyle":{"kind":"number","value":371,"string":"371"},"style_context":{"kind":"string","value":"\r\rimport importlib\rimport json\rimport os\rfrom collections import OrderedDict\rfrom typing import Dict, Optional, Union\r\r# Build the list of all image processors\rfrom ...configuration_utils import PretrainedConfig\rfrom ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code\rfrom ...image_processing_utils import ImageProcessingMixin\rfrom ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging\rfrom .auto_factory import _LazyAutoMapping\rfrom .configuration_auto import (\r CONFIG_MAPPING_NAMES,\r AutoConfig,\r model_type_to_module_name,\r replace_list_option_in_docstrings,\r)\r\r\r_SCREAMING_SNAKE_CASE\t\t\t\t\t: List[str] = logging.get_logger(__name__)\r\r_SCREAMING_SNAKE_CASE\t\t\t\t\t: Tuple = OrderedDict(\r [\r ('''align''', '''EfficientNetImageProcessor'''),\r ('''beit''', '''BeitImageProcessor'''),\r ('''bit''', '''BitImageProcessor'''),\r ('''blip''', '''BlipImageProcessor'''),\r ('''blip-2''', '''BlipImageProcessor'''),\r ('''bridgetower''', '''BridgeTowerImageProcessor'''),\r ('''chinese_clip''', '''ChineseCLIPImageProcessor'''),\r ('''clip''', '''CLIPImageProcessor'''),\r ('''clipseg''', '''ViTImageProcessor'''),\r ('''conditional_detr''', '''ConditionalDetrImageProcessor'''),\r ('''convnext''', '''ConvNextImageProcessor'''),\r ('''convnextv2''', '''ConvNextImageProcessor'''),\r ('''cvt''', '''ConvNextImageProcessor'''),\r ('''data2vec-vision''', '''BeitImageProcessor'''),\r ('''deformable_detr''', '''DeformableDetrImageProcessor'''),\r ('''deit''', '''DeiTImageProcessor'''),\r ('''deta''', '''DetaImageProcessor'''),\r ('''detr''', '''DetrImageProcessor'''),\r ('''dinat''', '''ViTImageProcessor'''),\r ('''donut-swin''', '''DonutImageProcessor'''),\r ('''dpt''', '''DPTImageProcessor'''),\r ('''efficientformer''', '''EfficientFormerImageProcessor'''),\r ('''efficientnet''', '''EfficientNetImageProcessor'''),\r ('''flava''', '''FlavaImageProcessor'''),\r ('''focalnet''', '''BitImageProcessor'''),\r ('''git''', '''CLIPImageProcessor'''),\r ('''glpn''', '''GLPNImageProcessor'''),\r ('''groupvit''', '''CLIPImageProcessor'''),\r ('''imagegpt''', '''ImageGPTImageProcessor'''),\r ('''instructblip''', '''BlipImageProcessor'''),\r ('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),\r ('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),\r ('''levit''', '''LevitImageProcessor'''),\r ('''mask2former''', '''Mask2FormerImageProcessor'''),\r ('''maskformer''', '''MaskFormerImageProcessor'''),\r ('''mgp-str''', '''ViTImageProcessor'''),\r ('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),\r ('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),\r ('''mobilevit''', '''MobileViTImageProcessor'''),\r ('''mobilevit''', '''MobileViTImageProcessor'''),\r ('''mobilevitv2''', '''MobileViTImageProcessor'''),\r ('''nat''', '''ViTImageProcessor'''),\r ('''oneformer''', '''OneFormerImageProcessor'''),\r ('''owlvit''', '''OwlViTImageProcessor'''),\r ('''perceiver''', '''PerceiverImageProcessor'''),\r ('''pix2struct''', '''Pix2StructImageProcessor'''),\r ('''poolformer''', '''PoolFormerImageProcessor'''),\r ('''regnet''', '''ConvNextImageProcessor'''),\r ('''resnet''', '''ConvNextImageProcessor'''),\r ('''sam''', '''SamImageProcessor'''),\r ('''segformer''', '''SegformerImageProcessor'''),\r ('''swiftformer''', '''ViTImageProcessor'''),\r ('''swin''', '''ViTImageProcessor'''),\r ('''swin2sr''', '''Swin2SRImageProcessor'''),\r ('''swinv2''', '''ViTImageProcessor'''),\r ('''table-transformer''', '''DetrImageProcessor'''),\r ('''timesformer''', '''VideoMAEImageProcessor'''),\r ('''tvlt''', '''TvltImageProcessor'''),\r ('''upernet''', '''SegformerImageProcessor'''),\r ('''van''', '''ConvNextImageProcessor'''),\r ('''videomae''', '''VideoMAEImageProcessor'''),\r ('''vilt''', '''ViltImageProcessor'''),\r ('''vit''', '''ViTImageProcessor'''),\r ('''vit_hybrid''', '''ViTHybridImageProcessor'''),\r ('''vit_mae''', '''ViTImageProcessor'''),\r ('''vit_msn''', '''ViTImageProcessor'''),\r ('''xclip''', '''CLIPImageProcessor'''),\r ('''yolos''', '''YolosImageProcessor'''),\r ]\r)\r\r_SCREAMING_SNAKE_CASE\t\t\t\t\t: str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)\rdef UpperCAmelCase_ (\t\t\t\t\t\t_A ):\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():\r if class_name in extractors:\r SCREAMING_SNAKE_CASE__ = model_type_to_module_name(_A )\r\r SCREAMING_SNAKE_CASE__ = importlib.import_module(F'''.{module_name}'''\t\t\t\t,\t\t'''transformers.models''' )\r try:\r return getattr(_A\t\t\t\t,\t\t_A )\r except AttributeError:\r continue\r\r for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():\r if getattr(_A\t\t\t\t,\t\t'''__name__'''\t\t\t\t,\t\t_A ) == class_name:\r return extractor\r\r # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main\r # init and we return the proper dummy to get an appropriate error message.\r SCREAMING_SNAKE_CASE__ = importlib.import_module('''transformers''' )\r if hasattr(_A\t\t\t\t,\t\t_A ):\r return getattr(_A\t\t\t\t,\t\t_A )\r\r return None\rdef UpperCAmelCase_ (\t\t\t\t\t\t_A\t\t\t\t,\t\t_A = None\t\t\t\t,\t\t_A = False\t\t\t\t,\t\t_A = False\t\t\t\t,\t\t_A = None\t\t\t\t,\t\t_A = None\t\t\t\t,\t\t_A = None\t\t\t\t,\t\t_A = False\t\t\t\t,\t\t**_A\t\t\t\t,\t\t):\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r SCREAMING_SNAKE_CASE__ = get_file_from_repo(\r _A\t\t\t\t,\t\t_A\t\t\t\t,\t\tcache_dir=_A\t\t\t\t,\t\tforce_download=_A\t\t\t\t,\t\tresume_download=_A\t\t\t\t,\t\tproxies=_A\t\t\t\t,\t\tuse_auth_token=_A\t\t\t\t,\t\trevision=_A\t\t\t\t,\t\tlocal_files_only=_A\t\t\t\t,\t\t)\r if resolved_config_file is None:\r logger.info(\r '''Could not locate the image processor configuration file, will try to use the model config instead.''' )\r return {}\r\r with open(_A\t\t\t\t,\t\tencoding='''utf-8''' ) as reader:\r return json.load(_A )\r\r\r\r\r\r\r\rclass \tUpperCAmelCase__\t\t:\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r def __init__(\t\t\t\t\t\t\tself\t\t\t\t\t\t:\t\tList[Any] )\t\t->\t\t\t\t\t\t\tint:\r raise EnvironmentError(\r '''AutoImageProcessor is designed to be instantiated '''\r '''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )\r\r\r\r\r\r\r @classmethod\r @replace_list_option_in_docstrings(__lowerCamelCase )\r def \t\t\t\tlowercase_\t\t(\t\t\t\t\t\t\tcls\t\t\t\t\t\t:\t\tOptional[int] , __lowerCamelCase\t\t\t\t\t\t:\t\tAny , **__lowerCamelCase\t\t\t\t\t\t:\t\tTuple )\t\t->\t\t\t\t\t\t\tList[str]:\r SCREAMING_SNAKE_CASE__ = kwargs.pop('''config''' , __lowerCamelCase )\r SCREAMING_SNAKE_CASE__ = kwargs.pop('''trust_remote_code''' , __lowerCamelCase )\r SCREAMING_SNAKE_CASE__ = True\r\r SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = ImageProcessingMixin.get_image_processor_dict(__lowerCamelCase , **__lowerCamelCase )\r SCREAMING_SNAKE_CASE__ = config_dict.get('''image_processor_type''' , __lowerCamelCase )\r SCREAMING_SNAKE_CASE__ = None\r if \"AutoImageProcessor\" in config_dict.get('''auto_map''' , {} ):\r SCREAMING_SNAKE_CASE__ = config_dict['''auto_map''']['''AutoImageProcessor''']\r\r # If we still don't have the image processor class, check if we're loading from a previous feature extractor config\r # and if so, infer the image processor class from there.\r if image_processor_class is None and image_processor_auto_map is None:\r SCREAMING_SNAKE_CASE__ = config_dict.pop('''feature_extractor_type''' , __lowerCamelCase )\r if feature_extractor_class is not None:\r logger.warning(\r '''Could not find image processor class in the image processor config or the model config. Loading'''\r ''' based on pattern matching with the model\\'s feature extractor configuration.''' )\r SCREAMING_SNAKE_CASE__ = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )\r if \"AutoFeatureExtractor\" in config_dict.get('''auto_map''' , {} ):\r SCREAMING_SNAKE_CASE__ = config_dict['''auto_map''']['''AutoFeatureExtractor''']\r SCREAMING_SNAKE_CASE__ = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )\r logger.warning(\r '''Could not find image processor auto map in the image processor config or the model config.'''\r ''' Loading based on pattern matching with the model\\'s feature extractor configuration.''' )\r\r # If we don't find the image processor class in the image processor config, let's try the model config.\r if image_processor_class is None and image_processor_auto_map is None:\r if not isinstance(__lowerCamelCase , __lowerCamelCase ):\r SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase )\r # It could be in `config.image_processor_type``\r SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , '''image_processor_type''' , __lowerCamelCase )\r if hasattr(__lowerCamelCase , '''auto_map''' ) and \"AutoImageProcessor\" in config.auto_map:\r SCREAMING_SNAKE_CASE__ = config.auto_map['''AutoImageProcessor''']\r\r if image_processor_class is not None:\r SCREAMING_SNAKE_CASE__ = image_processor_class_from_name(__lowerCamelCase )\r\r SCREAMING_SNAKE_CASE__ = image_processor_auto_map is not None\r SCREAMING_SNAKE_CASE__ = image_processor_class is not None or type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING\r SCREAMING_SNAKE_CASE__ = resolve_trust_remote_code(\r __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )\r\r if has_remote_code and trust_remote_code:\r SCREAMING_SNAKE_CASE__ = get_class_from_dynamic_module(\r __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )\r SCREAMING_SNAKE_CASE__ = kwargs.pop('''code_revision''' , __lowerCamelCase )\r if os.path.isdir(__lowerCamelCase ):\r image_processor_class.register_for_auto_class()\r return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )\r elif image_processor_class is not None:\r return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )\r # Last try: we use the IMAGE_PROCESSOR_MAPPING.\r elif type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING:\r SCREAMING_SNAKE_CASE__ = IMAGE_PROCESSOR_MAPPING[type(__lowerCamelCase )]\r return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )\r\r raise ValueError(\r f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''\r f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''\r f'''`model_type` keys in its {CONFIG_NAME}: {\", \".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )\r\r\r\r\r\r\r\r @staticmethod\r def \t\t\t\tlowercase_\t\t(\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t\t:\t\tDict , __lowerCamelCase\t\t\t\t\t\t:\t\tOptional[int] )\t\t->\t\t\t\t\t\t\tstr:\r IMAGE_PROCESSOR_MAPPING.register(__lowerCamelCase , __lowerCamelCase )\r\r\r"},"style_context_codestyle":{"kind":"number","value":218,"string":"218"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":754,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n'''simple docstring'''\n\n\n\n\n\nfrom unittest.mock import Mock, patch\n\nfrom file_transfer.send_file import send_file\n\n\n@patch(\"socket.socket\" )\n@patch(\"builtins.open\" )\ndef UpperCamelCase_(\tsnake_case\t\t\t\t\t\t\t: Tuple\t\t\t\t\t\t\t,\t\t\t\tsnake_case\t\t\t\t\t\t\t: Dict ):\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t= Mock()\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t= conn, Mock()\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t= iter([1, None] )\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t= lambda snake_case : next(snake_case )\n\n\t\t\t\t\t\t\t# ===== invoke =====\n\t\t\t\t\t\t\tsend_file(filename=\"mytext.txt\"\t\t\t\t\t\t\t,\t\t\t\ttesting=snake_case )\n\n\t\t\t\t\t\t\t# ===== ensurance =====\n\t\t\t\t\t\t\tsock.assert_called_once()\n\t\t\t\t\t\t\tsock.return_value.bind.assert_called_once()\n\t\t\t\t\t\t\tsock.return_value.listen.assert_called_once()\n\t\t\t\t\t\t\tsock.return_value.accept.assert_called_once()\n\t\t\t\t\t\t\tconn.recv.assert_called_once()\n\n\t\t\t\t\t\t\tfile.return_value.__enter__.assert_called_once()\n\t\t\t\t\t\t\tfile.return_value.__enter__.return_value.read.assert_called()\n\n\t\t\t\t\t\t\tconn.send.assert_called_once()\n\t\t\t\t\t\t\tconn.close.assert_called_once()\n\t\t\t\t\t\t\tsock.return_value.shutdown.assert_called_once()\n\t\t\t\t\t\t\tsock.return_value.close.assert_called_once()\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":85,"string":"85"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\nimport argparse\r\nfrom collections import defaultdict\r\n\r\nimport yaml\r\n\r\n\r\nSCREAMING_SNAKE_CASE__ = 'docs/source/en/_toctree.yml'\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef lowercase__ ( __UpperCamelCase )-> Optional[Any]:\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tdefaultdict(__UpperCamelCase )\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\t[]\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\t[]\r\n for doc in doc_list:\r\n if \"local\" in doc:\r\n counts[doc[\"local\"]] += 1\r\n\r\n if doc[\"title\"].lower() == \"overview\":\r\n overview_doc.append({\"\"\"local\"\"\": doc[\"\"\"local\"\"\"], \"\"\"title\"\"\": doc[\"\"\"title\"\"\"]} )\r\n else:\r\n new_doc_list.append(__UpperCamelCase )\r\n\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tnew_doc_list\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\t[key for key, value in counts.items() if value > 1]\r\n\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\t[]\r\n for duplicate_key in duplicates:\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tlist({doc[\"\"\"title\"\"\"] for doc in doc_list if doc[\"\"\"local\"\"\"] == duplicate_key} )\r\n if len(__UpperCamelCase ) > 1:\r\n raise ValueError(\r\n F\"{duplicate_key} is present several times in the documentation table of content at \"\r\n \"\"\"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the \"\"\"\r\n \"\"\"others.\"\"\" )\r\n # Only add this once\r\n new_doc.append({\"\"\"local\"\"\": duplicate_key, \"\"\"title\"\"\": titles[0]} )\r\n\r\n # Add none duplicate-keys\r\n new_doc.extend([doc for doc in doc_list if \"\"\"local\"\"\" not in counts or counts[doc[\"\"\"local\"\"\"]] == 1] )\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tsorted(__UpperCamelCase\t\t,\t\t\t\t\t\tkey=lambda __UpperCamelCase : s[\"title\"].lower() )\r\n\r\n # \"overview\" gets special treatment and is always first\r\n if len(__UpperCamelCase ) > 1:\r\n raise ValueError(\"\"\"{doc_list} has two 'overview' docs which is not allowed.\"\"\" )\r\n\r\n overview_doc.extend(__UpperCamelCase )\r\n\r\n # Sort\r\n return overview_doc\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef lowercase__ ( __UpperCamelCase=False )-> List[str]:\r\n with open(__UpperCamelCase\t\t,\t\t\t\t\t\tencoding=\"\"\"utf-8\"\"\" ) as f:\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tyaml.safe_load(f.read() )\r\n\r\n # Get to the API doc\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\t0\r\n while content[api_idx][\"title\"] != \"API\":\r\n api_idx += 1\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tcontent[api_idx][\"\"\"sections\"\"\"]\r\n\r\n # Then to the model doc\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\t0\r\n while api_doc[scheduler_idx][\"title\"] != \"Schedulers\":\r\n scheduler_idx += 1\r\n\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tapi_doc[scheduler_idx][\"\"\"sections\"\"\"]\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tclean_doc_toc(__UpperCamelCase )\r\n\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tFalse\r\n if new_scheduler_doc != scheduler_doc:\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tTrue\r\n if overwrite:\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tnew_scheduler_doc\r\n\r\n if diff:\r\n if overwrite:\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tapi_doc\r\n with open(__UpperCamelCase\t\t,\t\t\t\t\t\t\"\"\"w\"\"\"\t\t,\t\t\t\t\t\tencoding=\"\"\"utf-8\"\"\" ) as f:\r\n f.write(yaml.dump(__UpperCamelCase\t\t,\t\t\t\t\t\tallow_unicode=__UpperCamelCase ) )\r\n else:\r\n raise ValueError(\r\n \"\"\"The model doc part of the table of content is not properly sorted, run `make style` to fix this.\"\"\" )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef lowercase__ ( __UpperCamelCase=False )-> Tuple:\r\n with open(__UpperCamelCase\t\t,\t\t\t\t\t\tencoding=\"\"\"utf-8\"\"\" ) as f:\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tyaml.safe_load(f.read() )\r\n\r\n # Get to the API doc\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\t0\r\n while content[api_idx][\"title\"] != \"API\":\r\n api_idx += 1\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tcontent[api_idx][\"\"\"sections\"\"\"]\r\n\r\n # Then to the model doc\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\t0\r\n while api_doc[pipeline_idx][\"title\"] != \"Pipelines\":\r\n pipeline_idx += 1\r\n\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tFalse\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tapi_doc[pipeline_idx][\"\"\"sections\"\"\"]\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\t[]\r\n\r\n # sort sub pipeline docs\r\n for pipeline_doc in pipeline_docs:\r\n if \"section\" in pipeline_doc:\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tpipeline_doc[\"\"\"section\"\"\"]\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tclean_doc_toc(__UpperCamelCase )\r\n if overwrite:\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tnew_sub_pipeline_doc\r\n new_pipeline_docs.append(__UpperCamelCase )\r\n\r\n # sort overall pipeline doc\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tclean_doc_toc(__UpperCamelCase )\r\n\r\n if new_pipeline_docs != pipeline_docs:\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tTrue\r\n if overwrite:\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tnew_pipeline_docs\r\n\r\n if diff:\r\n if overwrite:\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tapi_doc\r\n with open(__UpperCamelCase\t\t,\t\t\t\t\t\t\"\"\"w\"\"\"\t\t,\t\t\t\t\t\tencoding=\"\"\"utf-8\"\"\" ) as f:\r\n f.write(yaml.dump(__UpperCamelCase\t\t,\t\t\t\t\t\tallow_unicode=__UpperCamelCase ) )\r\n else:\r\n raise ValueError(\r\n \"\"\"The model doc part of the table of content is not properly sorted, run `make style` to fix this.\"\"\" )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()\r\n parser.add_argument('--fix_and_overwrite', action="https://huggingface.co/datasets/infinityofspace/python_codestyles-mixed1-500/viewer/default/store_true", help='Whether to fix inconsistencies.')\r\n SCREAMING_SNAKE_CASE__ = parser.parse_args()\r\n\r\n check_scheduler_doc(args.fix_and_overwrite)\r\n check_pipeline_doc(args.fix_and_overwrite)\r\n"},"style_context_codestyle":{"kind":"number","value":321,"string":"321"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":755,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\nfrom collections import defaultdict\r\nfrom pathlib import Path\r\n\r\nimport pandas as pd\r\nfrom rouge_cli import calculate_rouge_path\r\n\r\nfrom utils import calculate_rouge\r\n\r\n\r\nUpperCamelCase_ \t\t\t\t\t=\t\t\t\t\t\t\t[\r\n \"Prosecutor: \\\"No videos were used in the crash investigation\\\" German papers say they saw a cell phone video of the\"\r\n \" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \\\"previous episode of severe\"\r\n \" depression\\\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.\",\r\n \"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal\"\r\n \" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's\"\r\n \" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the\"\r\n \" body.\",\r\n \"Amnesty International releases its annual report on the death penalty. The report catalogs the use of\"\r\n \" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the\"\r\n \" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital\"\r\n \" punishment.\",\r\n]\r\n\r\nUpperCamelCase_ \t\t\t\t\t=\t\t\t\t\t\t\t[\r\n \"Marseille prosecutor says \\\"so far no videos were used in the crash investigation\\\" despite media reports .\"\r\n \" Journalists at Bild and Paris Match are \\\"very confident\\\" the video clip is real, an editor says . Andreas Lubitz\"\r\n \" had informed his Lufthansa training school of an episode of severe depression, airline says .\",\r\n \"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .\"\r\n \" Israel and the United States opposed the move, which could open the door to war crimes investigations against\"\r\n \" Israelis .\",\r\n \"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to\"\r\n \" death . Organization claims that governments around the world are using the threat of terrorism to advance\"\r\n \" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death\"\r\n \" sentences up by 28% .\",\r\n]\r\n\r\n\r\n\r\n\r\ndef lowercase__(\t):\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n SCREAMING_SNAKE_CASE : List[str] \t\t\t=\t\tcalculate_rouge(__UpperCamelCase ,__UpperCamelCase ,bootstrap_aggregation=__UpperCamelCase ,rouge_keys=['rouge2', 'rougeL'] )\r\n assert isinstance(__UpperCamelCase ,__UpperCamelCase )\r\n SCREAMING_SNAKE_CASE : Optional[int] \t\t\t=\t\tcalculate_rouge(__UpperCamelCase ,__UpperCamelCase ,bootstrap_aggregation=__UpperCamelCase ,rouge_keys=['rouge2'] )\r\n assert (\r\n pd.DataFrame(no_aggregation['rouge2'] ).fmeasure.mean()\r\n == pd.DataFrame(no_aggregation_just_ra['rouge2'] ).fmeasure.mean()\r\n )\r\n\r\n\r\n\r\n\r\ndef lowercase__(\t):\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n SCREAMING_SNAKE_CASE : Tuple \t\t\t=\t\t'rougeLsum'\r\n SCREAMING_SNAKE_CASE : Dict \t\t\t=\t\tcalculate_rouge(__UpperCamelCase ,__UpperCamelCase ,newline_sep=__UpperCamelCase ,rouge_keys=[k] )[k]\r\n SCREAMING_SNAKE_CASE : Tuple \t\t\t=\t\tcalculate_rouge(__UpperCamelCase ,__UpperCamelCase ,newline_sep=__UpperCamelCase ,rouge_keys=[k] )[k]\r\n assert score > score_no_sep\r\n\r\n\r\n\r\n\r\ndef lowercase__(\t):\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n SCREAMING_SNAKE_CASE : int \t\t\t=\t\t['rouge1', 'rouge2', 'rougeL']\r\n SCREAMING_SNAKE_CASE : Optional[int] \t\t\t=\t\tcalculate_rouge(__UpperCamelCase ,__UpperCamelCase ,newline_sep=__UpperCamelCase ,rouge_keys=__UpperCamelCase )\r\n SCREAMING_SNAKE_CASE : Tuple \t\t\t=\t\tcalculate_rouge(__UpperCamelCase ,__UpperCamelCase ,newline_sep=__UpperCamelCase ,rouge_keys=__UpperCamelCase )\r\n assert score_sep == score_no_sep\r\n\r\n\r\n\r\n\r\ndef lowercase__(\t):\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n SCREAMING_SNAKE_CASE : Any \t\t\t=\t\t[\r\n 'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.',\r\n 'Marseille prosecutor says \\\"so far no videos were used in the crash investigation\\\" despite media reports .',\r\n ]\r\n SCREAMING_SNAKE_CASE : Dict \t\t\t=\t\t[\r\n 'Margot Frank, died in 1945, a month earlier than previously thought.',\r\n 'Prosecutor: \\\"No videos were used in the crash investigation\\\" German papers say they saw a cell phone video of'\r\n ' the final seconds on board Flight 9525.',\r\n ]\r\n assert calculate_rouge(__UpperCamelCase ,__UpperCamelCase ,newline_sep=__UpperCamelCase ) == calculate_rouge(__UpperCamelCase ,__UpperCamelCase ,newline_sep=__UpperCamelCase )\r\n\r\n\r\n\r\n\r\ndef lowercase__(\t):\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n SCREAMING_SNAKE_CASE : Optional[Any] \t\t\t=\t\t[\r\n '\\\" \\\"a person who has such a video needs to immediately give it to the investigators,\\\" prosecutor says . \\\"it is a very disturbing scene,\\\" editor-in-chief of bild online tells \\\"erin burnett: outfront\\\" '\r\n ]\r\n SCREAMING_SNAKE_CASE : Any \t\t\t=\t\t[\r\n ' Marseille prosecutor says \\\"so far no videos were used in the crash investigation\\\" despite media reports . Journalists at Bild and Paris Match are \\\"very confident\\\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'\r\n ]\r\n\r\n SCREAMING_SNAKE_CASE : List[str] \t\t\t=\t\tcalculate_rouge(__UpperCamelCase ,__UpperCamelCase ,rouge_keys=['rougeLsum'] ,newline_sep=__UpperCamelCase )['rougeLsum']\r\n SCREAMING_SNAKE_CASE : List[Any] \t\t\t=\t\tcalculate_rouge(__UpperCamelCase ,__UpperCamelCase ,rouge_keys=['rougeLsum'] )['rougeLsum']\r\n assert new_score > prev_score\r\n\r\n\r\n\r\n\r\n\r\ndef lowercase__(\t):\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n SCREAMING_SNAKE_CASE : int \t\t\t=\t\tPath('examples/seq2seq/test_data/wmt_en_ro' )\r\n SCREAMING_SNAKE_CASE : int \t\t\t=\t\tcalculate_rouge_path(data_dir.joinpath('test.source' ) ,data_dir.joinpath('test.target' ) )\r\n assert isinstance(__UpperCamelCase ,__UpperCamelCase )\r\n SCREAMING_SNAKE_CASE : str \t\t\t=\t\tcalculate_rouge_path(\r\n data_dir.joinpath('test.source' ) ,data_dir.joinpath('test.target' ) ,bootstrap_aggregation=__UpperCamelCase )\r\n assert isinstance(__UpperCamelCase ,__UpperCamelCase )\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":354,"string":"354"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...utils import logging\r\n\r\n\r\nUpperCamelCase_ \t\t\t\t\t=\t\t\t\t\t\t\tlogging.get_logger(__name__)\r\n\r\nUpperCamelCase_ \t\t\t\t\t=\t\t\t\t\t\t\t{\"openai-gpt\": \"https://huggingface.co/openai-gpt/resolve/main/config.json\"}\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass _a\t\t\t\t\t\t\t(\tSCREAMING_SNAKE_CASE\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n A\t\t:\t\t\t\t\t\t\tint =\t\t'''openai-gpt'''\r\n A\t\t:\t\t\t\t\t\t\tDict =\t\t{\r\n '''max_position_embeddings''': '''n_positions''',\r\n '''hidden_size''': '''n_embd''',\r\n '''num_attention_heads''': '''n_head''',\r\n '''num_hidden_layers''': '''n_layer''',\r\n }\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __init__( self, A=40_478, A=512, A=768, A=12, A=12, A=\"gelu\", A=0.1, A=0.1, A=0.1, A=1E-5, A=0.02, A=\"cls_index\", A=True, A=None, A=True, A=0.1, **A, ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n SCREAMING_SNAKE_CASE : int \t\t\t=\t\tvocab_size\r\n SCREAMING_SNAKE_CASE : Union[str, Any] \t\t\t=\t\tn_positions\r\n SCREAMING_SNAKE_CASE : Tuple \t\t\t=\t\tn_embd\r\n SCREAMING_SNAKE_CASE : Any \t\t\t=\t\tn_layer\r\n SCREAMING_SNAKE_CASE : Tuple \t\t\t=\t\tn_head\r\n SCREAMING_SNAKE_CASE : Union[str, Any] \t\t\t=\t\tafn\r\n SCREAMING_SNAKE_CASE : List[str] \t\t\t=\t\tresid_pdrop\r\n SCREAMING_SNAKE_CASE : List[str] \t\t\t=\t\tembd_pdrop\r\n SCREAMING_SNAKE_CASE : Tuple \t\t\t=\t\tattn_pdrop\r\n SCREAMING_SNAKE_CASE : List[str] \t\t\t=\t\tlayer_norm_epsilon\r\n SCREAMING_SNAKE_CASE : Any \t\t\t=\t\tinitializer_range\r\n SCREAMING_SNAKE_CASE : List[Any] \t\t\t=\t\tsummary_type\r\n SCREAMING_SNAKE_CASE : int \t\t\t=\t\tsummary_use_proj\r\n SCREAMING_SNAKE_CASE : Union[str, Any] \t\t\t=\t\tsummary_activation\r\n SCREAMING_SNAKE_CASE : List[str] \t\t\t=\t\tsummary_first_dropout\r\n SCREAMING_SNAKE_CASE : Any \t\t\t=\t\tsummary_proj_to_labels\r\n super().__init__(**A )\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":246,"string":"246"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":756,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n# coding=utf-8\r\n# Copyright 2020 The HuggingFace Inc. team.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n# this script dumps information about the environment\r\n\r\nimport os\r\nimport sys\r\n\r\nimport transformers\r\n\r\n\r\nUpperCamelCase__: Optional[int]\t\t\t\t\t\t\t\t=\t\t\t\"3\"\r\n\r\nprint(\"Python version:\", sys.version)\r\nprint(\"transformers version:\", transformers.__version__)\r\n\r\ntry:\r\n\t\t\t\t\t\timport torch\r\n\r\n\t\t\t\t\t\tprint(\"Torch version:\", torch.__version__)\r\n\t\t\t\t\t\tprint(\"Cuda available:\", torch.cuda.is_available())\r\n\t\t\t\t\t\tprint(\"Cuda version:\", torch.version.cuda)\r\n\t\t\t\t\t\tprint(\"CuDNN version:\", torch.backends.cudnn.version())\r\n\t\t\t\t\t\tprint(\"Number of GPUs available:\", torch.cuda.device_count())\r\n\t\t\t\t\t\tprint(\"NCCL version:\", torch.cuda.nccl.version())\r\nexcept ImportError:\r\n\t\t\t\t\t\tprint(\"Torch version:\", None)\r\n\r\ntry:\r\n\t\t\t\t\t\timport deepspeed\r\n\r\n\t\t\t\t\t\tprint(\"DeepSpeed version:\", deepspeed.__version__)\r\nexcept ImportError:\r\n\t\t\t\t\t\tprint(\"DeepSpeed version:\", None)\r\n\r\ntry:\r\n\t\t\t\t\t\timport tensorflow as tf\r\n\r\n\t\t\t\t\t\tprint(\"TensorFlow version:\", tf.__version__)\r\n\t\t\t\t\t\tprint(\"TF GPUs available:\", bool(tf.config.list_physical_devices(\"GPU\")))\r\n\t\t\t\t\t\tprint(\"Number of TF GPUs available:\", len(tf.config.list_physical_devices(\"GPU\")))\r\nexcept ImportError:\r\n\t\t\t\t\t\tprint(\"TensorFlow version:\", None)\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":23,"string":"23"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\nimport matplotlib.pyplot as plt # type: ignore\r\nimport numpy\r\n\r\n# initial triangle of Koch snowflake\r\nUpperCamelCase__: Tuple\t\t\t\t\t\t\t\t=\t\t\tnumpy.array([0, 0])\r\nUpperCamelCase__: Union[str, Any]\t\t\t\t\t\t\t\t=\t\t\tnumpy.array([0.5, 0.8660254])\r\nUpperCamelCase__: Dict\t\t\t\t\t\t\t\t=\t\t\tnumpy.array([1, 0])\r\nUpperCamelCase__: int\t\t\t\t\t\t\t\t=\t\t\t[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]\r\n\r\n\r\ndef snake_case_ (\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tlist[numpy.ndarray] , _lowerCAmelCase :\t\t\t\t\t\tint ) -> list[numpy.ndarray]:\r\n\t\t\t\t\t\t\tUpperCAmelCase : Union[str, Any] \t\t\t\t\t\t\t=\t\tinitial_vectors\r\n\t\t\t\t\t\t\tfor _ in range(_lowerCAmelCase ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase : Union[str, Any] \t\t\t\t\t\t\t=\t\titeration_step(_lowerCAmelCase )\r\n\t\t\t\t\t\t\treturn vectors\r\n\r\n\r\ndef snake_case_ (\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tlist[numpy.ndarray] ) -> list[numpy.ndarray]:\r\n\t\t\t\t\t\t\tUpperCAmelCase : Tuple \t\t\t\t\t\t\t=\t\t[]\r\n\t\t\t\t\t\t\tfor i, start_vector in enumerate(vectors[:-1] ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase : List[str] \t\t\t\t\t\t\t=\t\tvectors[i + 1]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tnew_vectors.append(_lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase : Optional[Any] \t\t\t\t\t\t\t=\t\tend_vector - start_vector\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tnew_vectors.append(start_vector + difference_vector / 3 )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tnew_vectors.append(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tnew_vectors.append(start_vector + difference_vector * 2 / 3 )\r\n\t\t\t\t\t\t\tnew_vectors.append(vectors[-1] )\r\n\t\t\t\t\t\t\treturn new_vectors\r\n\r\n\r\ndef snake_case_ (\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tnumpy.ndarray , _lowerCAmelCase :\t\t\t\t\t\tfloat ) -> numpy.ndarray:\r\n\t\t\t\t\t\t\tUpperCAmelCase : List[str] \t\t\t\t\t\t\t=\t\tnumpy.radians(_lowerCAmelCase )\r\n\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t\t\t,\t\tUpperCAmelCase : Tuple \t\t\t\t\t\t\t=\t\tnumpy.cos(_lowerCAmelCase ), numpy.sin(_lowerCAmelCase )\r\n\t\t\t\t\t\t\tUpperCAmelCase : Union[str, Any] \t\t\t\t\t\t\t=\t\tnumpy.array(((c, -s), (s, c)) )\r\n\t\t\t\t\t\t\treturn numpy.dot(_lowerCAmelCase , _lowerCAmelCase )\r\n\r\n\r\ndef snake_case_ (\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tlist[numpy.ndarray] ) -> None:\r\n\t\t\t\t\t\t\tUpperCAmelCase : List[Any] \t\t\t\t\t\t\t=\t\tplt.gca()\r\n\t\t\t\t\t\t\taxes.set_aspect('''equal''' )\r\n\r\n\t\t\t\t\t\t\t# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all\r\n\t\t\t\t\t\t\t# y-coordinates as inputs, which are constructed from the vector-list using\r\n\t\t\t\t\t\t\t# zip()\r\n\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t\t\t,\t\tUpperCAmelCase : str \t\t\t\t\t\t\t=\t\tzip(*_lowerCAmelCase )\r\n\t\t\t\t\t\t\tplt.plot(_lowerCAmelCase , _lowerCAmelCase )\r\n\t\t\t\t\t\t\tplt.show()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t\t\timport doctest\r\n\r\n\t\t\t\t\t\tdoctest.testmod()\r\n\r\n\t\t\t\t\t\tUpperCamelCase__: List[Any]\t\t\t\t\t\t\t\t=\t\t\titerate(INITIAL_VECTORS, 5)\r\n\t\t\t\t\t\tplot(processed_vectors)\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":23,"string":"23"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":757,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\n\nimport copy\nimport os\nfrom typing import Union\n\nfrom ...configuration_utils import PretrainedConfig\nfrom ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES\nfrom ...utils import logging\nfrom ..auto import CONFIG_MAPPING\n\n\n__snake_case : List[str] = logging.get_logger(__name__)\n\n__snake_case : Tuple = {\n \"\"\"Salesforce/instruct-blip-flan-t5\"\"\": \"\"\"https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json\"\"\",\n}\n\n\nclass __SCREAMING_SNAKE_CASE (\t\t\t\t__lowercase):\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t: Optional[int]\t\t\t\t\t\t= '''instructblip_vision_model'''\n\n\n\n\n\n\n\t\t\t\t\t\tdef __init__(\t\tself , _UpperCamelCase=14_08 , _UpperCamelCase=61_44 , _UpperCamelCase=39 , _UpperCamelCase=16 , _UpperCamelCase=2_24 , _UpperCamelCase=14 , _UpperCamelCase=\"gelu\" , _UpperCamelCase=1E-6 , _UpperCamelCase=0.0 , _UpperCamelCase=1E-10 , _UpperCamelCase=True , **_UpperCamelCase , ):\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\t\t\t\t\t\t\t\t\t\t\t\tsuper().__init__(**_UpperCamelCase\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = hidden_size\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = intermediate_size\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = num_hidden_layers\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = num_attention_heads\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = patch_size\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = image_size\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = initializer_range\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = attention_dropout\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = layer_norm_eps\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = hidden_act\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = qkv_bias\n\n\n\n\n\n\n\n\t\t\t\t\t\t@classmethod\n\t\t\t\t\t\tdef UpperCamelCase__ (\t\tcls , _UpperCamelCase , **_UpperCamelCase\t\t\t\t):\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\t\t\t\t\t\t\t\t\t\t\t\tcls._set_token_in_kwargs(_UpperCamelCase\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ , lowerCAmelCase__ = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t# get the vision config dict if we are loading from InstructBlipConfig\n\t\t\t\t\t\t\t\t\t\t\t\tif config_dict.get('model_type'\t\t\t\t) == \"instructblip\":\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = config_dict['vision_config']\n\n\t\t\t\t\t\t\t\t\t\t\t\tif \"model_type\" in config_dict and hasattr(cls , 'model_type'\t\t\t\t) and config_dict[\"model_type\"] != cls.model_type:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.warning(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t F\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t F\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\"\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\treturn cls.from_dict(_UpperCamelCase , **_UpperCamelCase\t\t\t\t)\n\n\n\n\nclass __SCREAMING_SNAKE_CASE (\t\t\t\t__lowercase):\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t: str\t\t\t\t\t\t= '''instructblip_qformer'''\n\n\n\n\n\n\n\t\t\t\t\t\tdef __init__(\t\tself , _UpperCamelCase=3_05_22 , _UpperCamelCase=7_68 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=30_72 , _UpperCamelCase=\"gelu\" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_12 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-12 , _UpperCamelCase=0 , _UpperCamelCase=\"absolute\" , _UpperCamelCase=2 , _UpperCamelCase=14_08 , **_UpperCamelCase , ):\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\t\t\t\t\t\t\t\t\t\t\t\tsuper().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = vocab_size\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = hidden_size\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = num_hidden_layers\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = num_attention_heads\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = hidden_act\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = intermediate_size\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = hidden_dropout_prob\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = attention_probs_dropout_prob\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = max_position_embeddings\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = initializer_range\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = layer_norm_eps\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = position_embedding_type\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = cross_attention_frequency\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = encoder_hidden_size\n\n\n\n\n\n\n\n\t\t\t\t\t\t@classmethod\n\t\t\t\t\t\tdef UpperCamelCase__ (\t\tcls , _UpperCamelCase , **_UpperCamelCase\t\t\t\t):\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\t\t\t\t\t\t\t\t\t\t\t\tcls._set_token_in_kwargs(_UpperCamelCase\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ , lowerCAmelCase__ = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t# get the qformer config dict if we are loading from InstructBlipConfig\n\t\t\t\t\t\t\t\t\t\t\t\tif config_dict.get('model_type'\t\t\t\t) == \"instructblip\":\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = config_dict['qformer_config']\n\n\t\t\t\t\t\t\t\t\t\t\t\tif \"model_type\" in config_dict and hasattr(cls , 'model_type'\t\t\t\t) and config_dict[\"model_type\"] != cls.model_type:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.warning(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t F\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t F\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\"\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\treturn cls.from_dict(_UpperCamelCase , **_UpperCamelCase\t\t\t\t)\n\n\n\n\nclass __SCREAMING_SNAKE_CASE (\t\t\t\t__lowercase):\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t: List[str]\t\t\t\t\t\t= '''instructblip'''\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t: List[str]\t\t\t\t\t\t= True\n\n\n\n\n\n\n\t\t\t\t\t\tdef __init__(\t\tself , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=32 , **_UpperCamelCase\t\t\t\t):\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\t\t\t\t\t\t\t\t\t\t\t\tsuper().__init__(**_UpperCamelCase\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\tif vision_config is None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = {}\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.'\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\tif qformer_config is None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = {}\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.'\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\tif text_config is None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = {}\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).'\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = InstructBlipVisionConfig(**_UpperCamelCase\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = InstructBlipQFormerConfig(**_UpperCamelCase\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = text_config['model_type'] if 'model_type' in text_config else 'opt'\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = CONFIG_MAPPING[text_model_type](**_UpperCamelCase\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = self.text_config.tie_word_embeddings\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = self.text_config.is_encoder_decoder\n\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = num_query_tokens\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = self.vision_config.hidden_size\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = 1.0\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = 0.02\n\n\n\n\n\n\n\t\t\t\t\t\t@classmethod\n\t\t\t\t\t\tdef UpperCamelCase__ (\t\tcls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , ):\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\t\t\t\t\t\t\t\t\t\t\t\treturn cls(\n\t\t\t\t\t\t\t\t\t\t\t\t vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCamelCase , )\n\n\n\n\n\n\n\n\t\t\t\t\t\tdef UpperCamelCase__ (\t\tself\t\t\t\t):\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = copy.deepcopy(self.__dict__\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = self.vision_config.to_dict()\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = self.qformer_config.to_dict()\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = self.text_config.to_dict()\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ = self.__class__.model_type\n\t\t\t\t\t\t\t\t\t\t\t\treturn output\n\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":122,"string":"122"},"style_context":{"kind":"string","value":"\n\n\n\n\n\n\nimport qiskit\n\n\n\n\n\n\n\ndef \t\t_UpperCamelCase\t\t\t\t( UpperCamelCase_ : int , UpperCamelCase_ : int\t\t\t\t\t\t)\t\t\t\t\t->\t\t\t\tqiskit.result.counts.Counts:\n\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\t\t\t\t\t\tlowerCAmelCase__ = qiskit.Aer.get_backend('aer_simulator'\t\t\t\t\t\t)\n\n\t\t\t\t\t\t# Create a Quantum Circuit acting on the q register\n\t\t\t\t\t\tlowerCAmelCase__ = qiskit.QuantumCircuit(UpperCamelCase_ , UpperCamelCase_\t\t\t\t\t\t)\n\n\t\t\t\t\t\t# Map the quantum measurement to the classical bits\n\t\t\t\t\t\tcircuit.measure([0] , [0]\t\t\t\t\t\t)\n\n\t\t\t\t\t\t# Execute the circuit on the simulator\n\t\t\t\t\t\tlowerCAmelCase__ = qiskit.execute(UpperCamelCase_ , UpperCamelCase_ , shots=1000\t\t\t\t\t\t)\n\n\t\t\t\t\t\t# Return the histogram data of the results of the experiment.\n\t\t\t\t\t\treturn job.result().get_counts(UpperCamelCase_\t\t\t\t\t\t)\n\n\nif __name__ == \"__main__\":\n\t\t\t\t\tprint(f'Total count for various states are: {single_qubit_measure(1, 1)}')\n\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":122,"string":"122"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":758,"cells":{"code":{"kind":"string","value":"\r\n\r\nimport argparse\r\nimport re\r\nfrom pathlib import Path\r\n\r\nimport requests\r\nimport torch\r\nfrom PIL import Image\r\nfrom torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor\r\n\r\nfrom transformers import (\r\n EfficientFormerConfig,\r\n EfficientFormerForImageClassificationWithTeacher,\r\n EfficientFormerImageProcessor,\r\n)\r\nfrom transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling\r\ndef \t\t\t\t\t\t_snake_case( SCREAMING_SNAKE_CASE__\t\t\t:\tList[str] ,\t\t\tSCREAMING_SNAKE_CASE__\t\t\t:\tDict )\t\t\t\t\t\t\t-> List[str]:\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n A__ = old_name\r\n\r\n if \"patch_embed\" in old_name:\r\n A__ ,\t\t\t\t\t\tA__ ,\t\t\t\t\t\tA__ = old_name.split('.' )\r\n\r\n if layer == \"0\":\r\n A__ = old_name.replace('0' ,\t\t\t'convolution1' )\r\n elif layer == \"1\":\r\n A__ = old_name.replace('1' ,\t\t\t'batchnorm_before' )\r\n elif layer == \"3\":\r\n A__ = old_name.replace('3' ,\t\t\t'convolution2' )\r\n else:\r\n A__ = old_name.replace('4' ,\t\t\t'batchnorm_after' )\r\n\r\n if \"network\" in old_name and re.search(R'\\d\\.\\d' ,\t\t\tUpperCAmelCase__ ):\r\n A__ = R'\\b\\d{2}\\b'\r\n if bool(re.search(UpperCAmelCase__ ,\t\t\tUpperCAmelCase__ ) ):\r\n A__ = re.search(R'\\d\\.\\d\\d.' ,\t\t\tUpperCAmelCase__ ).group()\r\n else:\r\n A__ = re.search(R'\\d\\.\\d.' ,\t\t\tUpperCAmelCase__ ).group()\r\n if int(match[0] ) < 6:\r\n A__ = old_name.replace(UpperCAmelCase__ ,\t\t\t'' )\r\n A__ = trimmed_name.replace('network' ,\t\t\tmatch[0] + '.meta4D_layers.blocks.' + match[2:-1] )\r\n A__ = 'intermediate_stages.' + trimmed_name\r\n else:\r\n A__ = old_name.replace(UpperCAmelCase__ ,\t\t\t'' )\r\n if int(match[2] ) < num_meta4D_last_stage:\r\n A__ = trimmed_name.replace('network' ,\t\t\t'meta4D_layers.blocks.' + match[2] )\r\n else:\r\n A__ = str(int(match[2] ) - num_meta4D_last_stage )\r\n A__ = trimmed_name.replace('network' ,\t\t\t'meta3D_layers.blocks.' + layer_index )\r\n if \"norm1\" in old_name:\r\n A__ = trimmed_name.replace('norm1' ,\t\t\t'layernorm1' )\r\n elif \"norm2\" in old_name:\r\n A__ = trimmed_name.replace('norm2' ,\t\t\t'layernorm2' )\r\n elif \"fc1\" in old_name:\r\n A__ = trimmed_name.replace('fc1' ,\t\t\t'linear_in' )\r\n elif \"fc2\" in old_name:\r\n A__ = trimmed_name.replace('fc2' ,\t\t\t'linear_out' )\r\n\r\n A__ = 'last_stage.' + trimmed_name\r\n\r\n elif \"network\" in old_name and re.search(R'.\\d.' ,\t\t\tUpperCAmelCase__ ):\r\n A__ = old_name.replace('network' ,\t\t\t'intermediate_stages' )\r\n\r\n if \"fc\" in new_name:\r\n A__ = new_name.replace('fc' ,\t\t\t'convolution' )\r\n elif (\"norm1\" in new_name) and (\"layernorm1\" not in new_name):\r\n A__ = new_name.replace('norm1' ,\t\t\t'batchnorm_before' )\r\n elif (\"norm2\" in new_name) and (\"layernorm2\" not in new_name):\r\n A__ = new_name.replace('norm2' ,\t\t\t'batchnorm_after' )\r\n if \"proj\" in new_name:\r\n A__ = new_name.replace('proj' ,\t\t\t'projection' )\r\n if \"dist_head\" in new_name:\r\n A__ = new_name.replace('dist_head' ,\t\t\t'distillation_classifier' )\r\n elif \"head\" in new_name:\r\n A__ = new_name.replace('head' ,\t\t\t'classifier' )\r\n elif \"patch_embed\" in new_name:\r\n A__ = 'efficientformer.' + new_name\r\n elif new_name == \"norm.weight\" or new_name == \"norm.bias\":\r\n A__ = new_name.replace('norm' ,\t\t\t'layernorm' )\r\n A__ = 'efficientformer.' + new_name\r\n else:\r\n A__ = 'efficientformer.encoder.' + new_name\r\n\r\n return new_name\r\ndef \t\t\t\t\t\t_snake_case( SCREAMING_SNAKE_CASE__\t\t\t:\tUnion[str, Any] ,\t\t\tSCREAMING_SNAKE_CASE__\t\t\t:\tDict )\t\t\t\t\t\t\t-> int:\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n for key in checkpoint.copy().keys():\r\n A__ = checkpoint.pop(UpperCAmelCase__ )\r\n A__ = val\r\n\r\n return checkpoint\r\ndef \t\t\t\t\t\t_snake_case( )\t\t\t\t\t\t\t-> int:\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'\r\n A__ = Image.open(requests.get(UpperCAmelCase__ ,\t\t\tstream=UpperCAmelCase__ ).raw )\r\n\r\n return image\r\ndef \t\t\t\t\t\t_snake_case( SCREAMING_SNAKE_CASE__\t\t\t:\tList[Any] ,\t\t\tSCREAMING_SNAKE_CASE__\t\t\t:\tTuple ,\t\t\tSCREAMING_SNAKE_CASE__\t\t\t:\tint ,\t\t\tSCREAMING_SNAKE_CASE__\t\t\t:\tUnion[str, Any] )\t\t\t\t\t\t\t-> Optional[int]:\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n A__ = torch.load(UpperCAmelCase__ ,\t\t\tmap_location='cpu' )['model']\r\n A__ = EfficientFormerConfig.from_json_file(UpperCAmelCase__ )\r\n A__ = EfficientFormerForImageClassificationWithTeacher(UpperCAmelCase__ )\r\n A__ = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )\r\n\r\n A__ = config.depths[-1] - config.num_metaad_blocks + 1\r\n A__ = convert_torch_checkpoint(UpperCAmelCase__ ,\t\t\tUpperCAmelCase__ )\r\n\r\n model.load_state_dict(UpperCAmelCase__ )\r\n model.eval()\r\n\r\n A__ = {\r\n 'bilinear': PILImageResampling.BILINEAR,\r\n 'bicubic': PILImageResampling.BICUBIC,\r\n 'nearest': PILImageResampling.NEAREST,\r\n }\r\n\r\n # prepare image\r\n A__ = prepare_img()\r\n A__ = 256\r\n A__ = 224\r\n A__ = EfficientFormerImageProcessor(\r\n size={'shortest_edge': image_size} ,\t\t\tcrop_size={'height': crop_size, 'width': crop_size} ,\t\t\tresample=pillow_resamplings['bicubic'] ,\t\t\t)\r\n A__ = processor(images=UpperCAmelCase__ ,\t\t\treturn_tensors='pt' ).pixel_values\r\n\r\n # original processing pipeline\r\n A__ = Compose(\r\n [\r\n Resize(UpperCAmelCase__ ,\t\t\tinterpolation=pillow_resamplings['bicubic'] ),\r\n CenterCrop(UpperCAmelCase__ ),\r\n ToTensor(),\r\n Normalize(UpperCAmelCase__ ,\t\t\tUpperCAmelCase__ ),\r\n ] )\r\n A__ = image_transforms(UpperCAmelCase__ ).unsqueeze(0 )\r\n\r\n assert torch.allclose(UpperCAmelCase__ ,\t\t\tUpperCAmelCase__ )\r\n\r\n A__ = model(UpperCAmelCase__ )\r\n A__ = outputs.logits\r\n\r\n A__ = (1, 1000)\r\n\r\n if \"l1\" in model_name:\r\n A__ = torch.Tensor(\r\n [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )\r\n assert torch.allclose(logits[0, :10] ,\t\t\tUpperCAmelCase__ ,\t\t\tatol=1E-3 )\r\n assert logits.shape == expected_shape\r\n elif \"l3\" in model_name:\r\n A__ = torch.Tensor(\r\n [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )\r\n assert torch.allclose(logits[0, :10] ,\t\t\tUpperCAmelCase__ ,\t\t\tatol=1E-3 )\r\n assert logits.shape == expected_shape\r\n elif \"l7\" in model_name:\r\n A__ = torch.Tensor(\r\n [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )\r\n assert logits.shape == expected_shape\r\n else:\r\n raise ValueError(\r\n f'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )\r\n\r\n # Save Checkpoints\r\n Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )\r\n model.save_pretrained(UpperCAmelCase__ )\r\n print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )\r\n processor.save_pretrained(UpperCAmelCase__ )\r\n print(f'Processor successfuly saved at {pytorch_dump_path}' )\r\n\r\n if push_to_hub:\r\n print('Pushing model to the hub...' )\r\n\r\n model.push_to_hub(\r\n repo_id=f'Bearnardd/{pytorch_dump_path}' ,\t\t\tcommit_message='Add model' ,\t\t\tuse_temp_dir=UpperCAmelCase__ ,\t\t\t)\r\n processor.push_to_hub(\r\n repo_id=f'Bearnardd/{pytorch_dump_path}' ,\t\t\tcommit_message='Add image processor' ,\t\t\tuse_temp_dir=UpperCAmelCase__ ,\t\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n lowercase_ \t\t= argparse.ArgumentParser()\r\n # Required parameters\r\n parser.add_argument(\r\n \"--pytorch_model_path\",\r\n default=None,\r\n type=str,\r\n required=True,\r\n help=\"Path to EfficientFormer pytorch checkpoint.\",\r\n )\r\n parser.add_argument(\r\n \"--config_file\",\r\n default=None,\r\n type=str,\r\n required=True,\r\n help=\"The json file for EfficientFormer model config.\",\r\n )\r\n parser.add_argument(\r\n \"--pytorch_dump_path\", default=None, type=str, required=True, help=\"Path to the output PyTorch model.\"\r\n )\r\n\r\n parser.add_argument(\"--push_to_hub\", action=\"store_true\", help=\"Push model and image processor to the hub\")\r\n parser.add_argument(\r\n \"--no-push_to_hub\",\r\n dest=\"push_to_hub\",\r\n action=\"store_false\",\r\n help=\"Do not push model and image processor to the hub\",\r\n )\r\n parser.set_defaults(push_to_hub=True)\r\n\r\n lowercase_ \t\t= parser.parse_args()\r\n convert_efficientformer_checkpoint(\r\n checkpoint_path=args.pytorch_model_path,\r\n efficientformer_config_file=args.config_file,\r\n pytorch_dump_path=args.pytorch_dump_path,\r\n push_to_hub=args.push_to_hub,\r\n )\r\n\r\n"},"code_codestyle":{"kind":"number","value":7,"string":"7"},"style_context":{"kind":"string","value":"\n\n'''simple docstring'''\n\n\n\n\n\n\n\nimport unittest\n\nfrom transformers import (\n MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,\n TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,\n TextaTextGenerationPipeline,\n pipeline,\n)\nfrom transformers.testing_utils import is_pipeline_test, require_tf, require_torch\nfrom transformers.utils import is_torch_available\n\nfrom .test_pipelines_common import ANY\n\n\nif is_torch_available():\n import torch\n\n\n@is_pipeline_test\nclass A__\t\t\t\t\t(\t\t\t\t\tunittest.TestCase\t):\n lowercase\t\t =\t\t\t\t\t\tMODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING\n lowercase\t\t =\t\t\t\t\t\tTF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING\n\n\n\n def snake_case_\t( self\t\t\t\t\t\t, UpperCamelCase__\t\t\t\t\t\t, UpperCamelCase__\t\t\t\t\t\t, UpperCamelCase__\t\t\t\t) ->\t\t\t\t\tTuple:\n\n\n\n\n\n '''simple docstring'''\n A_\t\t =\t\t\t\t\tTextaTextGenerationPipeline(model=UpperCamelCase__\t\t\t\t\t\t, tokenizer=UpperCamelCase__\t\t\t\t)\n return generator, [\"Something to write\", \"Something else\"]\n\n\n\n def snake_case_\t( self\t\t\t\t\t\t, UpperCamelCase__\t\t\t\t\t\t, UpperCamelCase__\t\t\t\t) ->\t\t\t\t\tAny:\n\n\n\n\n\n '''simple docstring'''\n A_\t\t =\t\t\t\t\tgenerator(\"\"\"Something there\"\"\"\t\t\t\t)\n self.assertEqual(UpperCamelCase__\t\t\t\t\t\t, [{\"\"\"generated_text\"\"\": ANY(UpperCamelCase__\t\t\t\t)}]\t\t\t\t)\n # These are encoder decoder, they don't just append to incoming string\n self.assertFalse(outputs[0][\"\"\"generated_text\"\"\"].startswith(\"\"\"Something there\"\"\"\t\t\t\t)\t\t\t\t)\n\n A_\t\t =\t\t\t\t\tgenerator([\"\"\"This is great !\"\"\", \"\"\"Something else\"\"\"]\t\t\t\t\t\t, num_return_sequences=2\t\t\t\t\t\t, do_sample=UpperCamelCase__\t\t\t\t)\n self.assertEqual(\n UpperCamelCase__\t\t\t\t\t\t, [\n [{\"\"\"generated_text\"\"\": ANY(UpperCamelCase__\t\t\t\t)}, {\"\"\"generated_text\"\"\": ANY(UpperCamelCase__\t\t\t\t)}],\n [{\"\"\"generated_text\"\"\": ANY(UpperCamelCase__\t\t\t\t)}, {\"\"\"generated_text\"\"\": ANY(UpperCamelCase__\t\t\t\t)}],\n ]\t\t\t\t\t\t, )\n\n A_\t\t =\t\t\t\t\tgenerator(\n [\"\"\"This is great !\"\"\", \"\"\"Something else\"\"\"]\t\t\t\t\t\t, num_return_sequences=2\t\t\t\t\t\t, batch_size=2\t\t\t\t\t\t, do_sample=UpperCamelCase__\t\t\t\t)\n self.assertEqual(\n UpperCamelCase__\t\t\t\t\t\t, [\n [{\"\"\"generated_text\"\"\": ANY(UpperCamelCase__\t\t\t\t)}, {\"\"\"generated_text\"\"\": ANY(UpperCamelCase__\t\t\t\t)}],\n [{\"\"\"generated_text\"\"\": ANY(UpperCamelCase__\t\t\t\t)}, {\"\"\"generated_text\"\"\": ANY(UpperCamelCase__\t\t\t\t)}],\n ]\t\t\t\t\t\t, )\n\n with self.assertRaises(UpperCamelCase__\t\t\t\t):\n generator(4\t\t\t\t)\n\n\n\n @require_torch\n def snake_case_\t( self\t\t\t\t) ->\t\t\t\t\tDict:\n\n\n\n\n\n '''simple docstring'''\n A_\t\t =\t\t\t\t\tpipeline(\"\"\"text2text-generation\"\"\"\t\t\t\t\t\t, model=\"\"\"patrickvonplaten/t5-tiny-random\"\"\"\t\t\t\t\t\t, framework=\"\"\"pt\"\"\"\t\t\t\t)\n # do_sample=False necessary for reproducibility\n A_\t\t =\t\t\t\t\tgenerator(\"\"\"Something there\"\"\"\t\t\t\t\t\t, do_sample=UpperCamelCase__\t\t\t\t)\n self.assertEqual(UpperCamelCase__\t\t\t\t\t\t, [{\"\"\"generated_text\"\"\": \"\"\"\"\"\"}]\t\t\t\t)\n\n A_\t\t =\t\t\t\t\t3\n A_\t\t =\t\t\t\t\tgenerator(\n \"\"\"Something there\"\"\"\t\t\t\t\t\t, num_return_sequences=UpperCamelCase__\t\t\t\t\t\t, num_beams=UpperCamelCase__\t\t\t\t\t\t, )\n A_\t\t =\t\t\t\t\t[\n {\"\"\"generated_text\"\"\": \"\"\"Beide Beide Beide Beide Beide Beide Beide Beide Beide\"\"\"},\n {\"\"\"generated_text\"\"\": \"\"\"Beide Beide Beide Beide Beide Beide Beide Beide\"\"\"},\n {\"\"\"generated_text\"\"\": \"\"\"\"\"\"},\n ]\n self.assertEqual(UpperCamelCase__\t\t\t\t\t\t, UpperCamelCase__\t\t\t\t)\n\n A_\t\t =\t\t\t\t\tgenerator(\"\"\"This is a test\"\"\"\t\t\t\t\t\t, do_sample=UpperCamelCase__\t\t\t\t\t\t, num_return_sequences=2\t\t\t\t\t\t, return_tensors=UpperCamelCase__\t\t\t\t)\n self.assertEqual(\n UpperCamelCase__\t\t\t\t\t\t, [\n {\"\"\"generated_token_ids\"\"\": ANY(torch.Tensor\t\t\t\t)},\n {\"\"\"generated_token_ids\"\"\": ANY(torch.Tensor\t\t\t\t)},\n ]\t\t\t\t\t\t, )\n A_\t\t =\t\t\t\t\tgenerator.model.config.eos_token_id\n A_\t\t =\t\t\t\t\t\"\"\"\"\"\"\n A_\t\t =\t\t\t\t\tgenerator(\n [\"\"\"This is a test\"\"\", \"\"\"This is a second test\"\"\"]\t\t\t\t\t\t, do_sample=UpperCamelCase__\t\t\t\t\t\t, num_return_sequences=2\t\t\t\t\t\t, batch_size=2\t\t\t\t\t\t, return_tensors=UpperCamelCase__\t\t\t\t\t\t, )\n self.assertEqual(\n UpperCamelCase__\t\t\t\t\t\t, [\n [\n {\"\"\"generated_token_ids\"\"\": ANY(torch.Tensor\t\t\t\t)},\n {\"\"\"generated_token_ids\"\"\": ANY(torch.Tensor\t\t\t\t)},\n ],\n [\n {\"\"\"generated_token_ids\"\"\": ANY(torch.Tensor\t\t\t\t)},\n {\"\"\"generated_token_ids\"\"\": ANY(torch.Tensor\t\t\t\t)},\n ],\n ]\t\t\t\t\t\t, )\n\n\n\n\n\n\n\n @require_tf\n def snake_case_\t( self\t\t\t\t) ->\t\t\t\t\tAny:\n\n\n\n\n\n '''simple docstring'''\n A_\t\t =\t\t\t\t\tpipeline(\"\"\"text2text-generation\"\"\"\t\t\t\t\t\t, model=\"\"\"patrickvonplaten/t5-tiny-random\"\"\"\t\t\t\t\t\t, framework=\"\"\"tf\"\"\"\t\t\t\t)\n # do_sample=False necessary for reproducibility\n A_\t\t =\t\t\t\t\tgenerator(\"\"\"Something there\"\"\"\t\t\t\t\t\t, do_sample=UpperCamelCase__\t\t\t\t)\n self.assertEqual(UpperCamelCase__\t\t\t\t\t\t, [{\"\"\"generated_text\"\"\": \"\"\"\"\"\"}]\t\t\t\t)\n"},"style_context_codestyle":{"kind":"number","value":162,"string":"162"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":759,"cells":{"code":{"kind":"string","value":"\n\n\n\nimport logging\n\nimport numpy as np\nimport pytest\nfrom scipy.linalg import eigh\n\nlogging.basicConfig(level=logging.INFO, format=\"%(message)s\")\ndef UpperCamelCase\t\t\t\t\t\t( _A\t\t\t:\t\t\t\tnp.ndarray )-> np.ndarray:\n\n\n\n\n\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\t\t\t\t\t\treturn input_array.reshape((input_array.size, 1) )\ndef UpperCamelCase\t\t\t\t\t\t( _A\t\t\t:\t\t\t\tnp.ndarray\t\t\t,\t\t\t\t\t\t\t_A\t\t\t:\t\t\t\tnp.ndarray\t\t\t,\t\t\t\t\t\t\t_A\t\t\t:\t\t\t\tint )-> np.ndarray:\n\n\n\n\n\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\t\t\t\t\t\tA__\t\t\t\t\t\t\t= np.nan\n\t\t\t\t\t\tfor i in range(_A ):\n\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t\t\t= features[:, labels == i]\n\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t\t\t= data.mean(1 )\n\t\t\t\t\t\t\t\t\t\t\t\t# Centralize the data of class i\n\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t\t\t= data - column_reshape(_A )\n\t\t\t\t\t\t\t\t\t\t\t\tif i > 0:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# If covariance_sum is not None\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcovariance_sum += np.dot(_A\t\t\t,\t\t\t\t\t\t\tcentered_data.T )\n\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# If covariance_sum is np.nan (i.e. first loop)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t\t\t= np.dot(_A\t\t\t,\t\t\t\t\t\t\tcentered_data.T )\n\n\t\t\t\t\t\treturn covariance_sum / features.shape[1]\ndef UpperCamelCase\t\t\t\t\t\t( _A\t\t\t:\t\t\t\tnp.ndarray\t\t\t,\t\t\t\t\t\t\t_A\t\t\t:\t\t\t\tnp.ndarray\t\t\t,\t\t\t\t\t\t\t_A\t\t\t:\t\t\t\tint )-> np.ndarray:\n\n\n\n\n\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\t\t\t\t\t\tA__\t\t\t\t\t\t\t= features.mean(1 )\n\t\t\t\t\t\tA__\t\t\t\t\t\t\t= np.nan\n\t\t\t\t\t\tfor i in range(_A ):\n\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t\t\t= features[:, labels == i]\n\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t\t\t= data.shape[1]\n\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t\t\t= data.mean(1 )\n\t\t\t\t\t\t\t\t\t\t\t\tif i > 0:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# If covariance_sum is not None\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcovariance_sum += device_data * np.dot(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t column_reshape(_A ) - column_reshape(_A )\t\t\t,\t\t\t\t\t\t\t(column_reshape(_A ) - column_reshape(_A )).T\t\t\t,\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# If covariance_sum is np.nan (i.e. first loop)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t\t\t= device_data * np.dot(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t column_reshape(_A ) - column_reshape(_A )\t\t\t,\t\t\t\t\t\t\t(column_reshape(_A ) - column_reshape(_A )).T\t\t\t,\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\treturn covariance_sum / features.shape[1]\ndef UpperCamelCase\t\t\t\t\t\t( _A\t\t\t:\t\t\t\tnp.ndarray\t\t\t,\t\t\t\t\t\t\t_A\t\t\t:\t\t\t\tint )-> np.ndarray:\n\n\n\n\n\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\t\t\t\t\t\tif features.any():\n\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t\t\t= features.mean(1 )\n\t\t\t\t\t\t\t\t\t\t\t\t# Center the dataset\n\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t\t\t= features - np.reshape(_A\t\t\t,\t\t\t\t\t\t\t(data_mean.size, 1) )\n\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t\t\t= np.dot(_A\t\t\t,\t\t\t\t\t\t\tcentered_data.T ) / features.shape[1]\n\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t\t, A__\t\t\t\t\t\t\t= np.linalg.eigh(_A )\n\t\t\t\t\t\t\t\t\t\t\t\t# Take all the columns in the reverse order (-1), and then takes only the first\n\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t\t\t= eigenvectors[:, ::-1][:, 0:dimensions]\n\t\t\t\t\t\t\t\t\t\t\t\t# Project the database on the new space\n\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t\t\t= np.dot(filtered_eigenvectors.T\t\t\t,\t\t\t\t\t\t\t_A )\n\t\t\t\t\t\t\t\t\t\t\t\tlogging.info(\"Principal Component Analysis computed\" )\n\n\t\t\t\t\t\t\t\t\t\t\t\treturn projected_data\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\tlogging.basicConfig(level=logging.ERROR\t\t\t,\t\t\t\t\t\t\tformat=\"%(message)s\"\t\t\t,\t\t\t\t\t\t\tforce=_A )\n\t\t\t\t\t\t\t\t\t\t\t\tlogging.error(\"Dataset empty\" )\n\t\t\t\t\t\t\t\t\t\t\t\traise AssertionError\ndef UpperCamelCase\t\t\t\t\t\t( _A\t\t\t:\t\t\t\tnp.ndarray\t\t\t,\t\t\t\t\t\t\t_A\t\t\t:\t\t\t\tnp.ndarray\t\t\t,\t\t\t\t\t\t\t_A\t\t\t:\t\t\t\tint\t\t\t,\t\t\t\t\t\t\t_A\t\t\t:\t\t\t\tint )-> np.ndarray:\n\n\n\n\n\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\t\t\t\t\t\tassert classes > dimensions\n\n\t\t\t\t\t\t# Check if features have been already loaded\n\t\t\t\t\t\tif features.any:\n\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t\t, A__\t\t\t\t\t\t\t= eigh(\n\t\t\t\t\t\t\t\t\t\t\t\t covariance_between_classes(_A\t\t\t,\t\t\t\t\t\t\t_A\t\t\t,\t\t\t\t\t\t\t_A )\t\t\t,\t\t\t\t\t\t\tcovariance_within_classes(_A\t\t\t,\t\t\t\t\t\t\t_A\t\t\t,\t\t\t\t\t\t\t_A )\t\t\t,\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t\t\t= eigenvectors[:, ::-1][:, :dimensions]\n\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t\t, A__\t\t\t\t\t\t, A__\t\t\t\t\t\t\t= np.linalg.svd(_A )\n\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t\t\t= svd_matrix[:, 0:dimensions]\n\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t\t\t= np.dot(filtered_svd_matrix.T\t\t\t,\t\t\t\t\t\t\t_A )\n\t\t\t\t\t\t\t\t\t\t\t\tlogging.info(\"Linear Discriminant Analysis computed\" )\n\n\t\t\t\t\t\t\t\t\t\t\t\treturn projected_data\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\tlogging.basicConfig(level=logging.ERROR\t\t\t,\t\t\t\t\t\t\tformat=\"%(message)s\"\t\t\t,\t\t\t\t\t\t\tforce=_A )\n\t\t\t\t\t\t\t\t\t\t\t\tlogging.error(\"Dataset empty\" )\n\t\t\t\t\t\t\t\t\t\t\t\traise AssertionError\ndef UpperCamelCase\t\t\t\t\t\t( )-> None:\n\n\n\n\n\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\t\t\t\t\t\tA__\t\t\t\t\t\t\t= np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )\n\t\t\t\t\t\tA__\t\t\t\t\t\t\t= np.array([0, 0, 0, 1, 1] )\n\t\t\t\t\t\tA__\t\t\t\t\t\t\t= 2\n\t\t\t\t\t\tA__\t\t\t\t\t\t\t= 2\n\n\t\t\t\t\t\t# Assert that the function raises an AssertionError if dimensions > classes\n\t\t\t\t\t\twith pytest.raises(_A ) as error_info:\n\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t\t\t= linear_discriminant_analysis(\n\t\t\t\t\t\t\t\t\t\t\t\t _A\t\t\t,\t\t\t\t\t\t\t_A\t\t\t,\t\t\t\t\t\t\t_A\t\t\t,\t\t\t\t\t\t\t_A )\n\t\t\t\t\t\t\t\t\t\t\t\tif isinstance(_A\t\t\t,\t\t\t\t\t\t\tnp.ndarray ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise AssertionError(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"Did not raise AssertionError for dimensions > classes\" )\n\t\t\t\t\t\t\t\t\t\t\t\tassert error_info.type is AssertionError\ndef UpperCamelCase\t\t\t\t\t\t( )-> None:\n\n\n\n\n\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\t\t\t\t\t\tA__\t\t\t\t\t\t\t= np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )\n\t\t\t\t\t\tA__\t\t\t\t\t\t\t= 2\n\t\t\t\t\t\tA__\t\t\t\t\t\t\t= np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] )\n\n\t\t\t\t\t\twith pytest.raises(_A ) as error_info:\n\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t\t\t= principal_component_analysis(_A\t\t\t,\t\t\t\t\t\t\t_A )\n\t\t\t\t\t\t\t\t\t\t\t\tif not np.allclose(_A\t\t\t,\t\t\t\t\t\t\t_A ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise AssertionError\n\t\t\t\t\t\t\t\t\t\t\t\tassert error_info.type is AssertionError\n\n\nif __name__ == \"__main__\":\n\t\t\t\t\t\timport doctest\n\n\t\t\t\t\t\tdoctest.testmod()\n\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":198,"string":"198"},"style_context":{"kind":"string","value":"\n\n\n\nimport os\nimport time\n\nimport pytest\n\nfrom datasets.utils.filelock import FileLock, Timeout\ndef UpperCamelCase\t\t\t\t\t\t( _A\t\t\t:\t\t\t\tOptional[int] )-> List[Any]:\n\n\n\n\n\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\t\t\t\t\t\tA__\t\t\t\t\t\t\t= FileLock(str(tmpdir / \"foo.lock\" ) )\n\t\t\t\t\t\tA__\t\t\t\t\t\t\t= FileLock(str(tmpdir / \"foo.lock\" ) )\n\t\t\t\t\t\tA__\t\t\t\t\t\t\t= 0.01\n\t\t\t\t\t\twith locka.acquire():\n\t\t\t\t\t\t\t\t\t\t\t\twith pytest.raises(_A ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t\t\t= time.time()\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlocka.acquire(_A )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tassert time.time() - _start > timeout\ndef UpperCamelCase\t\t\t\t\t\t( _A\t\t\t:\t\t\t\tstr )-> List[Any]:\n\n\n\n\n\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\t\t\t\t\t\tA__\t\t\t\t\t\t\t= \"a\" * 1000 + \".lock\"\n\t\t\t\t\t\tA__\t\t\t\t\t\t\t= FileLock(str(tmpdir / filename ) )\n\t\t\t\t\t\tassert locka._lock_file.endswith(\".lock\" )\n\t\t\t\t\t\tassert not locka._lock_file.endswith(_A )\n\t\t\t\t\t\tassert len(os.path.basename(locka._lock_file ) ) <= 255\n\t\t\t\t\t\tA__\t\t\t\t\t\t\t= FileLock(tmpdir / filename )\n\t\t\t\t\t\twith locka.acquire():\n\t\t\t\t\t\t\t\t\t\t\t\twith pytest.raises(_A ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlocka.acquire(0 )\n\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":198,"string":"198"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":760,"cells":{"code":{"kind":"string","value":"\r\r\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\rclass UpperCamelCase_\t:\r\r\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\tdef __init__( self\t\t\t:\t\t\t\tDict\t\t\t,\t\t\tUpperCAmelCase__\t\t\t:\t\t\t\tint\t\t\t,\t\t\tUpperCAmelCase__\t\t\t:\t\t\t\tAny\t\t\t,\t\t\tUpperCAmelCase__\t\t\t:\t\t\t\tList[str] ) ->\tstr:\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\tname\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\tvalue\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\tweight\r\r\r\r\r\tdef __repr__( self\t\t\t:\t\t\t\tTuple ) ->\tUnion[str, Any]:\r\t\t\t\t\t\treturn F\"\"\"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})\"\"\"\r\r\r\r\r\tdef UpperCAmelCase_ ( self\t\t\t:\t\t\t\tDict ) ->\tAny:\r\t\t\t\t\t\treturn self.value\r\r\r\r\r\tdef UpperCAmelCase_ ( self\t\t\t:\t\t\t\tList[Any] ) ->\tstr:\r\t\t\t\t\t\treturn self.name\r\r\r\r\r\tdef UpperCAmelCase_ ( self\t\t\t:\t\t\t\tTuple ) ->\tint:\r\t\t\t\t\t\treturn self.weight\r\r\r\r\r\tdef UpperCAmelCase_ ( self\t\t\t:\t\t\t\tint ) ->\tint:\r\t\t\t\t\t\treturn self.value / self.weight\r\r\r\r\rdef UpperCAmelCase__ (lowerCAmelCase_ ,\t\t\t\t\t\tlowerCAmelCase_ ,\t\t\t\t\t\tlowerCAmelCase_ ):\r\r\r\r\r\t\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t[]\r\t\t\t\t\tfor i in range(len(lowerCAmelCase_ ) ):\r\t\t\t\t\t\t\t\t\t\tmenu.append(Things(name[i] ,\t\t\t\t\t\tvalue[i] ,\t\t\t\t\t\tweight[i] ) )\r\t\t\t\t\treturn menu\r\r\r\r\rdef UpperCAmelCase__ (lowerCAmelCase_ ,\t\t\t\t\t\tlowerCAmelCase_ ,\t\t\t\t\t\tlowerCAmelCase_ ):\r\r\r\r\r\t\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\tsorted(lowerCAmelCase_ ,\t\t\t\t\t\tkey=lowerCAmelCase_ ,\t\t\t\t\t\treverse=lowerCAmelCase_ )\r\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t[]\r\t\t\t\t\t__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t0.0, 0.0\r\t\t\t\t\tfor i in range(len(lowerCAmelCase_ ) ):\r\t\t\t\t\t\t\t\t\t\tif (total_cost + items_copy[i].get_weight()) <= max_cost:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tresult.append(items_copy[i] )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttotal_cost += items_copy[i].get_weight()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttotal_value += items_copy[i].get_value()\r\t\t\t\t\treturn (result, total_value)\r\r\r\r\rdef UpperCAmelCase__ ():\r\r\r\r\r\t\t\t\t\t'''simple docstring'''\r\r\r\r\rif __name__ == \"__main__\":\r\t\timport doctest\r\r\t\tdoctest.testmod()\r"},"code_codestyle":{"kind":"number","value":54,"string":"54"},"style_context":{"kind":"string","value":"\r\r\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\rimport unittest\r\rfrom transformers.testing_utils import CaptureStdout\rfrom transformers.tools.python_interpreter import evaluate\r\r\r\r\rdef UpperCAmelCase__ (lowerCAmelCase_ ):\r\r\r\r\r\t\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\t\treturn x + 2\r\r\r\r\rclass UpperCamelCase_\t( unittest.TestCase):\r\r\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\tdef UpperCAmelCase_ ( self\t\t\t:\t\t\t\tAny ) ->\tAny:\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t\"x = 3\"\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t{}\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\tevaluate(UpperCAmelCase__\t\t\t,\t\t\t{}\t\t\t,\t\t\tstate=UpperCAmelCase__ )\r\t\t\t\t\t\tassert result == 3\r\t\t\t\t\t\tself.assertDictEqual(UpperCAmelCase__\t\t\t,\t\t\t{\"x\": 3} )\r\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t\"x = y\"\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t{\"y\": 5}\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\tevaluate(UpperCAmelCase__\t\t\t,\t\t\t{}\t\t\t,\t\t\tstate=UpperCAmelCase__ )\r\t\t\t\t\t\t# evaluate returns the value of the last assignment.\r\t\t\t\t\t\tassert result == 5\r\t\t\t\t\t\tself.assertDictEqual(UpperCAmelCase__\t\t\t,\t\t\t{\"x\": 5, \"y\": 5} )\r\r\r\r\r\tdef UpperCAmelCase_ ( self\t\t\t:\t\t\t\tDict ) ->\tList[str]:\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t\"y = add_two(x)\"\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t{\"x\": 3}\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\tevaluate(UpperCAmelCase__\t\t\t,\t\t\t{\"add_two\": add_two}\t\t\t,\t\t\tstate=UpperCAmelCase__ )\r\t\t\t\t\t\tassert result == 5\r\t\t\t\t\t\tself.assertDictEqual(UpperCAmelCase__\t\t\t,\t\t\t{\"x\": 3, \"y\": 5} )\r\r\t\t\t\t\t\t# Won't work without the tool\r\t\t\t\t\t\twith CaptureStdout() as out:\r\t\t\t\t\t\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\tevaluate(UpperCAmelCase__\t\t\t,\t\t\t{}\t\t\t,\t\t\tstate=UpperCAmelCase__ )\r\t\t\t\t\t\tassert result is None\r\t\t\t\t\t\tassert \"tried to execute add_two\" in out.out\r\r\r\r\r\tdef UpperCAmelCase_ ( self\t\t\t:\t\t\t\tList[Any] ) ->\tList[Any]:\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t\"x = 3\"\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t{}\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\tevaluate(UpperCAmelCase__\t\t\t,\t\t\t{}\t\t\t,\t\t\tstate=UpperCAmelCase__ )\r\t\t\t\t\t\tassert result == 3\r\t\t\t\t\t\tself.assertDictEqual(UpperCAmelCase__\t\t\t,\t\t\t{\"x\": 3} )\r\r\r\r\r\tdef UpperCAmelCase_ ( self\t\t\t:\t\t\t\tstr ) ->\tAny:\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t\"test_dict = {'x': x, 'y': add_two(x)}\"\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t{\"x\": 3}\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\tevaluate(UpperCAmelCase__\t\t\t,\t\t\t{\"add_two\": add_two}\t\t\t,\t\t\tstate=UpperCAmelCase__ )\r\t\t\t\t\t\tself.assertDictEqual(UpperCAmelCase__\t\t\t,\t\t\t{\"x\": 3, \"y\": 5} )\r\t\t\t\t\t\tself.assertDictEqual(UpperCAmelCase__\t\t\t,\t\t\t{\"x\": 3, \"test_dict\": {\"x\": 3, \"y\": 5}} )\r\r\r\r\r\tdef UpperCAmelCase_ ( self\t\t\t:\t\t\t\tint ) ->\tOptional[Any]:\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t\"x = 3\\ny = 5\"\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t{}\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\tevaluate(UpperCAmelCase__\t\t\t,\t\t\t{}\t\t\t,\t\t\tstate=UpperCAmelCase__ )\r\t\t\t\t\t\t# evaluate returns the value of the last assignment.\r\t\t\t\t\t\tassert result == 5\r\t\t\t\t\t\tself.assertDictEqual(UpperCAmelCase__\t\t\t,\t\t\t{\"x\": 3, \"y\": 5} )\r\r\r\r\r\tdef UpperCAmelCase_ ( self\t\t\t:\t\t\t\tAny ) ->\tAny:\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t\"text = f'This is x: {x}.'\"\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t{\"x\": 3}\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\tevaluate(UpperCAmelCase__\t\t\t,\t\t\t{}\t\t\t,\t\t\tstate=UpperCAmelCase__ )\r\t\t\t\t\t\t# evaluate returns the value of the last assignment.\r\t\t\t\t\t\tassert result == \"This is x: 3.\"\r\t\t\t\t\t\tself.assertDictEqual(UpperCAmelCase__\t\t\t,\t\t\t{\"x\": 3, \"text\": \"This is x: 3.\"} )\r\r\r\r\r\tdef UpperCAmelCase_ ( self\t\t\t:\t\t\t\tUnion[str, Any] ) ->\tOptional[int]:\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t\"if x <= 3:\\n y = 2\\nelse:\\n y = 5\"\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t{\"x\": 3}\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\tevaluate(UpperCAmelCase__\t\t\t,\t\t\t{}\t\t\t,\t\t\tstate=UpperCAmelCase__ )\r\t\t\t\t\t\t# evaluate returns the value of the last assignment.\r\t\t\t\t\t\tassert result == 2\r\t\t\t\t\t\tself.assertDictEqual(UpperCAmelCase__\t\t\t,\t\t\t{\"x\": 3, \"y\": 2} )\r\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t{\"x\": 8}\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\tevaluate(UpperCAmelCase__\t\t\t,\t\t\t{}\t\t\t,\t\t\tstate=UpperCAmelCase__ )\r\t\t\t\t\t\t# evaluate returns the value of the last assignment.\r\t\t\t\t\t\tassert result == 5\r\t\t\t\t\t\tself.assertDictEqual(UpperCAmelCase__\t\t\t,\t\t\t{\"x\": 8, \"y\": 5} )\r\r\r\r\r\tdef UpperCAmelCase_ ( self\t\t\t:\t\t\t\tTuple ) ->\tstr:\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t\"test_list = [x, add_two(x)]\"\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t{\"x\": 3}\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\tevaluate(UpperCAmelCase__\t\t\t,\t\t\t{\"add_two\": add_two}\t\t\t,\t\t\tstate=UpperCAmelCase__ )\r\t\t\t\t\t\tself.assertListEqual(UpperCAmelCase__\t\t\t,\t\t\t[3, 5] )\r\t\t\t\t\t\tself.assertDictEqual(UpperCAmelCase__\t\t\t,\t\t\t{\"x\": 3, \"test_list\": [3, 5]} )\r\r\r\r\r\tdef UpperCAmelCase_ ( self\t\t\t:\t\t\t\tAny ) ->\tint:\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t\"y = x\"\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t{\"x\": 3}\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\tevaluate(UpperCAmelCase__\t\t\t,\t\t\t{}\t\t\t,\t\t\tstate=UpperCAmelCase__ )\r\t\t\t\t\t\tassert result == 3\r\t\t\t\t\t\tself.assertDictEqual(UpperCAmelCase__\t\t\t,\t\t\t{\"x\": 3, \"y\": 3} )\r\r\r\r\r\tdef UpperCAmelCase_ ( self\t\t\t:\t\t\t\tTuple ) ->\tint:\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t\"test_list = [x, add_two(x)]\\ntest_list[1]\"\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t{\"x\": 3}\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\tevaluate(UpperCAmelCase__\t\t\t,\t\t\t{\"add_two\": add_two}\t\t\t,\t\t\tstate=UpperCAmelCase__ )\r\t\t\t\t\t\tassert result == 5\r\t\t\t\t\t\tself.assertDictEqual(UpperCAmelCase__\t\t\t,\t\t\t{\"x\": 3, \"test_list\": [3, 5]} )\r\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t\"test_dict = {'x': x, 'y': add_two(x)}\\ntest_dict['y']\"\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t{\"x\": 3}\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\tevaluate(UpperCAmelCase__\t\t\t,\t\t\t{\"add_two\": add_two}\t\t\t,\t\t\tstate=UpperCAmelCase__ )\r\t\t\t\t\t\tassert result == 5\r\t\t\t\t\t\tself.assertDictEqual(UpperCAmelCase__\t\t\t,\t\t\t{\"x\": 3, \"test_dict\": {\"x\": 3, \"y\": 5}} )\r\r\r\r\r\tdef UpperCAmelCase_ ( self\t\t\t:\t\t\t\tList[str] ) ->\tList[str]:\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t\"x = 0\\nfor i in range(3):\\n x = i\"\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\t{}\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t =\t\t\t\tevaluate(UpperCAmelCase__\t\t\t,\t\t\t{\"range\": range}\t\t\t,\t\t\tstate=UpperCAmelCase__ )\r\t\t\t\t\t\tassert result == 2\r\t\t\t\t\t\tself.assertDictEqual(UpperCAmelCase__\t\t\t,\t\t\t{\"x\": 2, \"i\": 2} )\r"},"style_context_codestyle":{"kind":"number","value":54,"string":"54"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":761,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\nimport argparse\r\nimport gc\r\nimport json\r\nimport os\r\n\r\nimport torch\r\nfrom datasets import load_dataset\r\nfrom torch.optim import AdamW\r\nfrom torch.utils.data import DataLoader\r\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\r\n\r\nfrom accelerate import Accelerator, DistributedType\r\nfrom accelerate.utils.deepspeed import DummyOptim, DummyScheduler\r\n\r\n\r\nSCREAMING_SNAKE_CASE__ = 1_6\r\nSCREAMING_SNAKE_CASE__ = 3_2\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef lowercase__\t\t\t(\t\t\t\t__UpperCamelCase )->\t\t\tUnion[str, Any]:\r\n return int(x / 2**20 )\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass a_\t\t\t\t\t\t:\r\n\r\n def __enter__(\t\t\t\tself\t\t\t\t) ->\t\t\t\tOptional[Any]:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n gc.collect()\r\n torch.cuda.empty_cache()\r\n torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero\r\n UpperCamelCase = torch.cuda.memory_allocated()\r\n return self\r\n\r\n\r\n def __exit__(\t\t\t\tself ,\t\t*_SCREAMING_SNAKE_CASE\t\t\t\t) ->\t\t\t\tList[str]:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n gc.collect()\r\n torch.cuda.empty_cache()\r\n UpperCamelCase = torch.cuda.memory_allocated()\r\n UpperCamelCase = torch.cuda.max_memory_allocated()\r\n UpperCamelCase = bamb(self.end - self.begin\t\t\t\t)\r\n UpperCamelCase = bamb(self.peak - self.begin\t\t\t\t)\r\n # print(f\"delta used/peak {self.used:4d}/{self.peaked:4d}\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef lowercase__\t\t\t(\t\t\t\t__UpperCamelCase\t\t,\t\t\t\t\t\t__UpperCamelCase = 16\t\t,\t\t\t\t\t\t__UpperCamelCase = \"bert-base-cased\"\t\t,\t\t\t\t\t\t__UpperCamelCase = 320\t\t,\t\t\t\t\t\t__UpperCamelCase = 160\t\t,\t\t\t\t\t\t)->\t\t\tOptional[int]:\r\n UpperCamelCase = AutoTokenizer.from_pretrained(_a )\r\n UpperCamelCase = load_dataset(\r\n \"\"\"glue\"\"\"\t\t,\t\t\t\t\t\t\"\"\"mrpc\"\"\"\t\t,\t\t\t\t\t\tsplit={\"\"\"train\"\"\": F\"train[:{n_train}]\", \"\"\"validation\"\"\": F\"validation[:{n_val}]\"} )\r\n\r\n def tokenize_function(__UpperCamelCase ):\r\n # max_length=None => use the model max length (it's actually the default)\r\n UpperCamelCase = tokenizer(examples[\"\"\"sentence1\"\"\"]\t\t,\t\t\t\t\t\texamples[\"\"\"sentence2\"\"\"]\t\t,\t\t\t\t\t\ttruncation=_a\t\t,\t\t\t\t\t\tmax_length=_a )\r\n return outputs\r\n\r\n # Apply the method we just defined to all the examples in all the splits of the dataset\r\n UpperCamelCase = datasets.map(\r\n _a\t\t,\t\t\t\t\t\tbatched=_a\t\t,\t\t\t\t\t\tremove_columns=[\"\"\"idx\"\"\", \"\"\"sentence1\"\"\", \"\"\"sentence2\"\"\"]\t\t,\t\t\t\t\t\tload_from_cache_file=_a )\r\n\r\n # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\r\n # transformers library\r\n UpperCamelCase = tokenized_datasets.rename_column(\"\"\"label\"\"\"\t\t,\t\t\t\t\t\t\"\"\"labels\"\"\" )\r\n\r\n def collate_fn(__UpperCamelCase ):\r\n # On TPU it's best to pad everything to the same length or training will be very slow.\r\n if accelerator.distributed_type == DistributedType.TPU:\r\n return tokenizer.pad(_a\t\t,\t\t\t\t\t\tpadding=\"\"\"max_length\"\"\"\t\t,\t\t\t\t\t\tmax_length=128\t\t,\t\t\t\t\t\treturn_tensors=\"\"\"pt\"\"\" )\r\n return tokenizer.pad(_a\t\t,\t\t\t\t\t\tpadding=\"\"\"longest\"\"\"\t\t,\t\t\t\t\t\treturn_tensors=\"\"\"pt\"\"\" )\r\n\r\n # Instantiate dataloaders.\r\n UpperCamelCase = DataLoader(\r\n tokenized_datasets[\"\"\"train\"\"\"]\t\t,\t\t\t\t\t\tshuffle=_a\t\t,\t\t\t\t\t\tcollate_fn=_a\t\t,\t\t\t\t\t\tbatch_size=_a )\r\n UpperCamelCase = DataLoader(\r\n tokenized_datasets[\"\"\"validation\"\"\"]\t\t,\t\t\t\t\t\tshuffle=_a\t\t,\t\t\t\t\t\tcollate_fn=_a\t\t,\t\t\t\t\t\tbatch_size=_a )\r\n\r\n return train_dataloader, eval_dataloader\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef lowercase__\t\t\t(\t\t\t\t__UpperCamelCase\t\t,\t\t\t\t\t\t__UpperCamelCase )->\t\t\tTuple:\r\n # Initialize accelerator\r\n UpperCamelCase = Accelerator()\r\n\r\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\r\n UpperCamelCase = config[\"lr\"]\r\n UpperCamelCase = int(config[\"\"\"num_epochs\"\"\"] )\r\n UpperCamelCase = int(config[\"\"\"seed\"\"\"] )\r\n UpperCamelCase = int(config[\"\"\"batch_size\"\"\"] )\r\n UpperCamelCase = args.model_name_or_path\r\n\r\n set_seed(_a )\r\n UpperCamelCase = get_dataloaders(_a\t\t,\t\t\t\t\t\t_a\t\t,\t\t\t\t\t\t_a\t\t,\t\t\t\t\t\targs.n_train\t\t,\t\t\t\t\t\targs.n_val )\r\n\r\n # Instantiate the model (we build the model here so that the seed also control new weights initialization)\r\n UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(_a\t\t,\t\t\t\t\t\treturn_dict=_a )\r\n\r\n # Instantiate optimizer\r\n UpperCamelCase = (\r\n AdamW\r\n if accelerator.state.deepspeed_plugin is None\r\n or \"optimizer\" not in accelerator.state.deepspeed_plugin.deepspeed_config\r\n else DummyOptim\r\n )\r\n UpperCamelCase = optimizer_cls(params=model.parameters()\t\t,\t\t\t\t\t\tlr=_a )\r\n\r\n if accelerator.state.deepspeed_plugin is not None:\r\n UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[\r\n \"gradient_accumulation_steps\"\r\n ]\r\n else:\r\n UpperCamelCase = 1\r\n UpperCamelCase = (len(_a ) * num_epochs) // gradient_accumulation_steps\r\n\r\n # Instantiate scheduler\r\n if (\r\n accelerator.state.deepspeed_plugin is None\r\n or \"scheduler\" not in accelerator.state.deepspeed_plugin.deepspeed_config\r\n ):\r\n UpperCamelCase = get_linear_schedule_with_warmup(\r\n optimizer=_a\t\t,\t\t\t\t\t\tnum_warmup_steps=0\t\t,\t\t\t\t\t\tnum_training_steps=_a\t\t,\t\t\t\t\t\t)\r\n else:\r\n UpperCamelCase = DummyScheduler(_a\t\t,\t\t\t\t\t\ttotal_num_steps=_a\t\t,\t\t\t\t\t\twarmup_num_steps=0 )\r\n\r\n # Prepare everything\r\n # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\r\n # prepare method.\r\n UpperCamelCase = accelerator.prepare(\r\n _a\t\t,\t\t\t\t\t\t_a\t\t,\t\t\t\t\t\t_a\t\t,\t\t\t\t\t\t_a\t\t,\t\t\t\t\t\t_a )\r\n\r\n # We need to keep track of how many total steps we have iterated over\r\n UpperCamelCase = 0\r\n # We also need to keep track of the stating epoch so files are named properly\r\n UpperCamelCase = 0\r\n\r\n # Now we train the model\r\n UpperCamelCase = {}\r\n for epoch in range(_a\t\t,\t\t\t\t\t\t_a ):\r\n with TorchTracemalloc() as tracemalloc:\r\n model.train()\r\n for step, batch in enumerate(_a ):\r\n UpperCamelCase = model(**_a )\r\n UpperCamelCase = outputs.loss\r\n UpperCamelCase = loss / gradient_accumulation_steps\r\n accelerator.backward(_a )\r\n if step % gradient_accumulation_steps == 0:\r\n optimizer.step()\r\n lr_scheduler.step()\r\n optimizer.zero_grad()\r\n\r\n overall_step += 1\r\n\r\n # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage\r\n accelerator.print(\"\"\"Memory before entering the train : {}\"\"\".format(bamb(tracemalloc.begin ) ) )\r\n accelerator.print(\"\"\"Memory consumed at the end of the train (end-begin): {}\"\"\".format(tracemalloc.used ) )\r\n accelerator.print(\"\"\"Peak Memory consumed during the train (max-begin): {}\"\"\".format(tracemalloc.peaked ) )\r\n accelerator.print(\r\n \"\"\"Total Peak Memory consumed during the train (max): {}\"\"\".format(\r\n tracemalloc.peaked + bamb(tracemalloc.begin ) ) )\r\n UpperCamelCase = tracemalloc.peaked + bamb(tracemalloc.begin )\r\n if args.peak_memory_upper_bound is not None:\r\n assert (\r\n train_total_peak_memory[F\"epoch-{epoch}\"] <= args.peak_memory_upper_bound\r\n ), \"Peak memory usage exceeded the upper bound\"\r\n\r\n accelerator.wait_for_everyone()\r\n if accelerator.is_main_process:\r\n with open(os.path.join(args.output_dir\t\t,\t\t\t\t\t\t\"\"\"peak_memory_utilization.json\"\"\" )\t\t,\t\t\t\t\t\t\"\"\"w\"\"\" ) as f:\r\n json.dump(_a\t\t,\t\t\t\t\t\t_a )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef lowercase__\t\t\t(\t\t\t\t)->\t\t\tList[Any]:\r\n UpperCamelCase = argparse.ArgumentParser(description=\"\"\"Simple example of training script tracking peak GPU memory usage.\"\"\" )\r\n parser.add_argument(\r\n \"\"\"--model_name_or_path\"\"\"\t\t,\t\t\t\t\t\ttype=_a\t\t,\t\t\t\t\t\tdefault=\"\"\"bert-base-cased\"\"\"\t\t,\t\t\t\t\t\thelp=\"\"\"Path to pretrained model or model identifier from huggingface.co/models.\"\"\"\t\t,\t\t\t\t\t\trequired=_a\t\t,\t\t\t\t\t\t)\r\n parser.add_argument(\r\n \"\"\"--output_dir\"\"\"\t\t,\t\t\t\t\t\ttype=_a\t\t,\t\t\t\t\t\tdefault=\"\"\".\"\"\"\t\t,\t\t\t\t\t\thelp=\"\"\"Optional save directory where all checkpoint folders will be stored. Default is the current working directory.\"\"\"\t\t,\t\t\t\t\t\t)\r\n parser.add_argument(\r\n \"\"\"--peak_memory_upper_bound\"\"\"\t\t,\t\t\t\t\t\ttype=_a\t\t,\t\t\t\t\t\tdefault=_a\t\t,\t\t\t\t\t\thelp=\"\"\"The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.\"\"\"\t\t,\t\t\t\t\t\t)\r\n parser.add_argument(\r\n \"\"\"--n_train\"\"\"\t\t,\t\t\t\t\t\ttype=_a\t\t,\t\t\t\t\t\tdefault=320\t\t,\t\t\t\t\t\thelp=\"\"\"Number of training examples to use.\"\"\"\t\t,\t\t\t\t\t\t)\r\n parser.add_argument(\r\n \"\"\"--n_val\"\"\"\t\t,\t\t\t\t\t\ttype=_a\t\t,\t\t\t\t\t\tdefault=160\t\t,\t\t\t\t\t\thelp=\"\"\"Number of validation examples to use.\"\"\"\t\t,\t\t\t\t\t\t)\r\n parser.add_argument(\r\n \"\"\"--num_epochs\"\"\"\t\t,\t\t\t\t\t\ttype=_a\t\t,\t\t\t\t\t\tdefault=1\t\t,\t\t\t\t\t\thelp=\"\"\"Number of train epochs.\"\"\"\t\t,\t\t\t\t\t\t)\r\n UpperCamelCase = parser.parse_args()\r\n UpperCamelCase = {\"lr\": 2E-5, \"num_epochs\": args.num_epochs, \"seed\": 42, \"batch_size\": 16}\r\n training_function(_a\t\t,\t\t\t\t\t\t_a )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n"},"code_codestyle":{"kind":"number","value":364,"string":"364"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\nimport os\r\nfrom typing import List, Optional, Union\r\n\r\nfrom ...tokenization_utils import PreTrainedTokenizer\r\nfrom ...tokenization_utils_base import AddedToken\r\nfrom ...utils import logging\r\n\r\n\r\nSCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)\r\n\r\nSCREAMING_SNAKE_CASE__ = {'vocab_file': 'vocab.txt'}\r\n\r\nSCREAMING_SNAKE_CASE__ = {\r\n 'vocab_file': {\r\n 'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',\r\n 'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',\r\n },\r\n}\r\n\r\nSCREAMING_SNAKE_CASE__ = {\r\n 'facebook/esm2_t6_8M_UR50D': 1_0_2_4,\r\n 'facebook/esm2_t12_35M_UR50D': 1_0_2_4,\r\n}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef lowercase__\t\t\t(\t\t\t\t__UpperCamelCase )->\t\t\tAny:\r\n with open(__UpperCamelCase\t\t,\t\t\t\t\t\t\"\"\"r\"\"\" ) as f:\r\n UpperCamelCase = f.read().splitlines()\r\n return [l.strip() for l in lines]\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass a_\t\t\t\t\t\t( lowerCamelCase ):\r\n lowercase\t\t\t\t\t\t\t=\t\t\t\tVOCAB_FILES_NAMES\r\n lowercase\t\t\t\t\t\t\t=\t\t\t\tPRETRAINED_VOCAB_FILES_MAP\r\n lowercase\t\t\t\t\t\t\t=\t\t\t\tPRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\r\n lowercase\t\t\t\t\t\t\t=\t\t\t\t[\"\"\"input_ids\"\"\", \"\"\"attention_mask\"\"\"]\r\n\r\n def __init__(\t\t\t\tself ,\t\t_SCREAMING_SNAKE_CASE ,\t\t_SCREAMING_SNAKE_CASE=\"\" ,\t\t_SCREAMING_SNAKE_CASE=\"\" ,\t\t_SCREAMING_SNAKE_CASE=\"\" ,\t\t_SCREAMING_SNAKE_CASE=\"\" ,\t\t_SCREAMING_SNAKE_CASE=\"\" ,\t\t**_SCREAMING_SNAKE_CASE ,\t\t) ->\t\t\t\tAny:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n super().__init__(**_SCREAMING_SNAKE_CASE\t\t\t\t)\r\n UpperCamelCase = load_vocab_file(_SCREAMING_SNAKE_CASE\t\t\t\t)\r\n UpperCamelCase = dict(enumerate(self.all_tokens\t\t\t\t)\t\t\t\t)\r\n UpperCamelCase = {tok: ind for ind, tok in enumerate(self.all_tokens\t\t\t\t)}\r\n UpperCamelCase = unk_token\r\n UpperCamelCase = cls_token\r\n UpperCamelCase = pad_token\r\n UpperCamelCase = mask_token\r\n UpperCamelCase = eos_token\r\n UpperCamelCase = self.all_tokens\r\n self._create_trie(self.unique_no_split_tokens\t\t\t\t)\r\n\r\n def A__\t\t\t\t\t\t(\t\t\t\tself ,\t\t_SCREAMING_SNAKE_CASE\t\t\t\t) ->\t\t\t\tstr:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n return self._id_to_token.get(_SCREAMING_SNAKE_CASE ,\t\tself.unk_token\t\t\t\t)\r\n\r\n def A__\t\t\t\t\t\t(\t\t\t\tself ,\t\t_SCREAMING_SNAKE_CASE\t\t\t\t) ->\t\t\t\tint:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n return self._token_to_id.get(_SCREAMING_SNAKE_CASE ,\t\tself._token_to_id.get(self.unk_token\t\t\t\t)\t\t\t\t)\r\n\r\n def A__\t\t\t\t\t\t(\t\t\t\tself ,\t\t_SCREAMING_SNAKE_CASE ,\t\t**_SCREAMING_SNAKE_CASE\t\t\t\t) ->\t\t\t\tList[str]:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n return text.split()\r\n\r\n def A__\t\t\t\t\t\t(\t\t\t\tself ,\t\t_SCREAMING_SNAKE_CASE=False\t\t\t\t) ->\t\t\t\tDict:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n return len(self._id_to_token\t\t\t\t)\r\n\r\n def A__\t\t\t\t\t\t(\t\t\t\tself\t\t\t\t) ->\t\t\t\tTuple:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n return {token: i for i, token in enumerate(self.all_tokens\t\t\t\t)}\r\n\r\n def A__\t\t\t\t\t\t(\t\t\t\tself ,\t\t_SCREAMING_SNAKE_CASE\t\t\t\t) ->\t\t\t\tint:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n return self._token_to_id.get(_SCREAMING_SNAKE_CASE ,\t\tself._token_to_id.get(self.unk_token\t\t\t\t)\t\t\t\t)\r\n\r\n def A__\t\t\t\t\t\t(\t\t\t\tself ,\t\t_SCREAMING_SNAKE_CASE\t\t\t\t) ->\t\t\t\tstr:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n return self._id_to_token.get(_SCREAMING_SNAKE_CASE ,\t\tself.unk_token\t\t\t\t)\r\n\r\n def A__\t\t\t\t\t\t(\t\t\t\tself ,\t\t_SCREAMING_SNAKE_CASE ,\t\t_SCREAMING_SNAKE_CASE = None\t\t\t\t) ->\t\t\t\tList[int]:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCamelCase = [self.cls_token_id]\r\n UpperCamelCase = [self.eos_token_id] # No sep token in ESM vocabulary\r\n if token_ids_a is None:\r\n if self.eos_token_id is None:\r\n return cls + token_ids_a\r\n else:\r\n return cls + token_ids_a + sep\r\n elif self.eos_token_id is None:\r\n raise ValueError(\"\"\"Cannot tokenize multiple sequences when EOS token is not set!\"\"\"\t\t\t\t)\r\n return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token\r\n\r\n def A__\t\t\t\t\t\t(\t\t\t\tself ,\t\t_SCREAMING_SNAKE_CASE ,\t\t_SCREAMING_SNAKE_CASE = None ,\t\t_SCREAMING_SNAKE_CASE = False\t\t\t\t) ->\t\t\t\tList[int]:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n if already_has_special_tokens:\r\n if token_ids_a is not None:\r\n raise ValueError(\r\n \"\"\"You should not supply a second sequence if the provided sequence of \"\"\"\r\n \"\"\"ids is already formatted with special tokens for the model.\"\"\"\t\t\t\t)\r\n\r\n return [1 if token in self.all_special_ids else 0 for token in token_ids_a]\r\n UpperCamelCase = [1] + ([0] * len(_SCREAMING_SNAKE_CASE\t\t\t\t)) + [1]\r\n if token_ids_a is not None:\r\n mask += [0] * len(_SCREAMING_SNAKE_CASE\t\t\t\t) + [1]\r\n return mask\r\n\r\n def A__\t\t\t\t\t\t(\t\t\t\tself ,\t\t_SCREAMING_SNAKE_CASE ,\t\t_SCREAMING_SNAKE_CASE\t\t\t\t) ->\t\t\t\tOptional[int]:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE ,\t\t(filename_prefix + \"\"\"-\"\"\" if filename_prefix else \"\"\"\"\"\") + \"\"\"vocab.txt\"\"\"\t\t\t\t)\r\n with open(_SCREAMING_SNAKE_CASE ,\t\t\"\"\"w\"\"\"\t\t\t\t) as f:\r\n f.write(\"\"\"\\n\"\"\".join(self.all_tokens\t\t\t\t)\t\t\t\t)\r\n return (vocab_file,)\r\n\r\n @property\r\n def A__\t\t\t\t\t\t(\t\t\t\tself\t\t\t\t) ->\t\t\t\tint:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n return self.get_vocab_size(with_added_tokens=_SCREAMING_SNAKE_CASE\t\t\t\t)\r\n\r\n\r\n def A__\t\t\t\t\t\t(\t\t\t\tself ,\t\t_SCREAMING_SNAKE_CASE ,\t\t_SCREAMING_SNAKE_CASE = False\t\t\t\t) ->\t\t\t\tint:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n return super()._add_tokens(_SCREAMING_SNAKE_CASE ,\t\tspecial_tokens=_SCREAMING_SNAKE_CASE\t\t\t\t)\r\n"},"style_context_codestyle":{"kind":"number","value":183,"string":"183"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":762,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\r'''simple docstring'''\r\rdef lowerCamelCase\t\t\t\t\t\t( __lowerCamelCase : List[Any]\t\t\t\t\t, __lowerCamelCase : Union[str, Any] )\t\t\t\t\t\t->List[str]:\r _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\t\t\t0\r _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\t\t\tlen(__lowerCamelCase ) - 1\r\r while left <= right:\r # avoid divided by 0 during interpolation\r if sorted_collection[left] == sorted_collection[right]:\r if sorted_collection[left] == item:\r return left\r else:\r return None\r\r _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\t\t\tleft + ((item - sorted_collection[left]) * (right - left)) // (\r sorted_collection[right] - sorted_collection[left]\r )\r\r # out of range check\r if point < 0 or point >= len(__lowerCamelCase ):\r return None\r\r _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\t\t\tsorted_collection[point]\r if current_item == item:\r return point\r else:\r if point < left:\r _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\t\t\tleft\r _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\t\t\tpoint\r elif point > right:\r _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\t\t\tright\r _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\t\t\tpoint\r else:\r if item < current_item:\r _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\t\t\tpoint - 1\r else:\r _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\t\t\tpoint + 1\r return None\r\r\r\r\r\rdef lowerCamelCase\t\t\t\t\t\t( __lowerCamelCase : str\t\t\t\t\t, __lowerCamelCase : Dict\t\t\t\t\t, __lowerCamelCase : Tuple\t\t\t\t\t, __lowerCamelCase : str )\t\t\t\t\t\t->List[str]:\r\r # avoid divided by 0 during interpolation\r if sorted_collection[left] == sorted_collection[right]:\r if sorted_collection[left] == item:\r return left\r else:\r return None\r\r _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\t\t\tleft + ((item - sorted_collection[left]) * (right - left)) // (\r sorted_collection[right] - sorted_collection[left]\r )\r\r # out of range check\r if point < 0 or point >= len(__lowerCamelCase ):\r return None\r\r if sorted_collection[point] == item:\r return point\r elif point < left:\r return interpolation_search_by_recursion(__lowerCamelCase\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t, __lowerCamelCase )\r elif point > right:\r return interpolation_search_by_recursion(__lowerCamelCase\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t, __lowerCamelCase )\r else:\r if sorted_collection[point] > item:\r return interpolation_search_by_recursion(\r __lowerCamelCase\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t, point - 1 )\r else:\r return interpolation_search_by_recursion(\r __lowerCamelCase\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t, point + 1\t\t\t\t\t, __lowerCamelCase )\r\r\r\r\r\rdef lowerCamelCase\t\t\t\t\t\t( __lowerCamelCase : Dict )\t\t\t\t\t\t->int:\r if collection != sorted(__lowerCamelCase ):\r raise ValueError(\"\"\"Collection must be ascending sorted\"\"\" )\r return True\r\r\rif __name__ == \"__main__\":\r import sys\r lowercase_\t\t\t\t\t= 0\r if debug == 1:\r lowercase_\t\t\t\t\t= [10, 30, 40, 45, 50, 66, 77, 93]\r try:\r __assert_sorted(collection)\r except ValueError:\r sys.exit(\"\"\"Sequence must be ascending sorted to apply interpolation search\"\"\")\r lowercase_\t\t\t\t\t= 67\r\r lowercase_\t\t\t\t\t= interpolation_search(collection, target)\r if result is not None:\r print(f\"\"\"{target} found at positions: {result}\"\"\")\r else:\r print(\"\"\"Not found\"\"\")\r\r\r\r"},"code_codestyle":{"kind":"number","value":58,"string":"58"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available\r\n\r\n\r\n_lowerCamelCase :\tAny =\t\t\t\t{\r\n 'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],\r\n 'processing_git': ['GitProcessor'],\r\n}\r\n\r\ntry:\r\n\t\tif not is_torch_available():\r\n\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\tpass\r\nelse:\r\n\t\t_lowerCamelCase :\tDict =\t\t\t\t[\r\n\t\t 'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',\r\n\t\t 'GitForCausalLM',\r\n\t\t 'GitModel',\r\n\t\t 'GitPreTrainedModel',\r\n\t\t 'GitVisionModel',\r\n\t\t]\r\n\r\nif TYPE_CHECKING:\r\n\t\tfrom .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig\r\n\t\tfrom .processing_git import GitProcessor\r\n\r\n\t\ttry:\r\n\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\n\t\telse:\r\n\t\t\t\tfrom .modeling_git import (\r\n\t\t\t\t GIT_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t GitForCausalLM,\r\n\t\t\t\t GitModel,\r\n\t\t\t\t GitPreTrainedModel,\r\n\t\t\t\t GitVisionModel,\r\n\t\t\t\t)\r\n\r\nelse:\r\n\t\timport sys\r\n\r\n\t\t_lowerCamelCase :\tUnion[str, Any] =\t\t\t\t_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":167,"string":"167"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":763,"cells":{"code":{"kind":"string","value":"\"\"\"simple docstring\"\"\"\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...utils import logging\r\n\r\n\r\nSCREAMING_SNAKE_CASE__ =\t\t\t\t\tlogging.get_logger(__name__)\r\n\r\nSCREAMING_SNAKE_CASE__ =\t\t\t\t\t{\r\n # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert\r\n}\r\n\r\n\r\n\r\nclass \t\t\t\t\tlowerCAmelCase_ ( _snake_case ):\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n _lowerCAmelCase : Any \t\t\t\t\t= \"\"\"megatron-bert\"\"\"\r\n\r\n def __init__(\t\t\tself\t\t\t\t\t\t, lowerCAmelCase=2_90_56\t\t\t\t\t\t, lowerCAmelCase=10_24\t\t\t\t\t\t, lowerCAmelCase=24\t\t\t\t\t\t, lowerCAmelCase=16\t\t\t\t\t\t, lowerCAmelCase=40_96\t\t\t\t\t\t, lowerCAmelCase=\"gelu\"\t\t\t\t\t\t, lowerCAmelCase=0.1\t\t\t\t\t\t, lowerCAmelCase=0.1\t\t\t\t\t\t, lowerCAmelCase=5_12\t\t\t\t\t\t, lowerCAmelCase=2\t\t\t\t\t\t, lowerCAmelCase=0.02\t\t\t\t\t\t, lowerCAmelCase=1E-12\t\t\t\t\t\t, lowerCAmelCase=0\t\t\t\t\t\t, lowerCAmelCase=\"absolute\"\t\t\t\t\t\t, lowerCAmelCase=True\t\t\t\t\t\t, **lowerCAmelCase\t\t\t\t\t\t, ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n super().__init__(pad_token_id=UpperCamelCase__\t\t\t\t\t\t, **UpperCamelCase__ )\r\n\r\n snake_case = vocab_size\r\n snake_case = hidden_size\r\n snake_case = num_hidden_layers\r\n snake_case = num_attention_heads\r\n snake_case = hidden_act\r\n snake_case = intermediate_size\r\n snake_case = hidden_dropout_prob\r\n snake_case = attention_probs_dropout_prob\r\n snake_case = max_position_embeddings\r\n snake_case = type_vocab_size\r\n snake_case = initializer_range\r\n snake_case = layer_norm_eps\r\n snake_case = position_embedding_type\r\n snake_case = use_cache\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":363,"string":"363"},"style_context":{"kind":"string","value":"\"\"\"simple docstring\"\"\"\r\nimport math\r\nimport unittest\r\n\r\nfrom transformers import BioGptConfig, is_torch_available\r\nfrom transformers.testing_utils import require_torch, slow, torch_device\r\n\r\nfrom ...generation.test_utils import GenerationTesterMixin\r\nfrom ...test_configuration_common import ConfigTester\r\nfrom ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask\r\nfrom ...test_pipeline_mixin import PipelineTesterMixin\r\n\r\n\r\nif is_torch_available():\r\n\t\t\t\t\timport torch\r\n\r\n\t\t\t\t\tfrom transformers import (\r\n\t\t\t\t\t BioGptForCausalLM,\r\n\t\t\t\t\t BioGptForSequenceClassification,\r\n\t\t\t\t\t BioGptForTokenClassification,\r\n\t\t\t\t\t BioGptModel,\r\n\t\t\t\t\t BioGptTokenizer,\r\n\t\t\t\t\t)\r\n\t\t\t\t\tfrom transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST\r\n\r\n\r\n\r\nclass \t\t\t\t\tlowerCAmelCase_ :\r\n\r\n\r\n\r\n\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\tdef __init__(\t\t\tself\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase=13\t\t\t\t\t\t, lowerCAmelCase=7\t\t\t\t\t\t, lowerCAmelCase=True\t\t\t\t\t\t, lowerCAmelCase=True\t\t\t\t\t\t, lowerCAmelCase=False\t\t\t\t\t\t, lowerCAmelCase=True\t\t\t\t\t\t, lowerCAmelCase=99\t\t\t\t\t\t, lowerCAmelCase=32\t\t\t\t\t\t, lowerCAmelCase=5\t\t\t\t\t\t, lowerCAmelCase=4\t\t\t\t\t\t, lowerCAmelCase=37\t\t\t\t\t\t, lowerCAmelCase=\"gelu\"\t\t\t\t\t\t, lowerCAmelCase=0.1\t\t\t\t\t\t, lowerCAmelCase=0.1\t\t\t\t\t\t, lowerCAmelCase=5_12\t\t\t\t\t\t, lowerCAmelCase=16\t\t\t\t\t\t, lowerCAmelCase=2\t\t\t\t\t\t, lowerCAmelCase=0.02\t\t\t\t\t\t, lowerCAmelCase=3\t\t\t\t\t\t, lowerCAmelCase=4\t\t\t\t\t\t, lowerCAmelCase=None\t\t\t\t\t\t, ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = parent\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = batch_size\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = seq_length\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = is_training\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = use_input_mask\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = use_token_type_ids\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = use_labels\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = vocab_size\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = hidden_size\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = num_hidden_layers\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = num_attention_heads\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = intermediate_size\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = hidden_act\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = hidden_dropout_prob\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = attention_probs_dropout_prob\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = max_position_embeddings\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = type_vocab_size\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = type_sequence_label_size\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = initializer_range\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = num_labels\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = num_choices\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = scope\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = ids_tensor([self.batch_size, self.seq_length]\t\t\t\t\t\t, self.vocab_size )\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = None\r\n\t\t\t\t\t\t\t\t\t\tif self.use_input_mask:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case = random_attention_mask([self.batch_size, self.seq_length] )\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = None\r\n\t\t\t\t\t\t\t\t\t\tif self.use_token_type_ids:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case = ids_tensor([self.batch_size, self.seq_length]\t\t\t\t\t\t, self.type_vocab_size )\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = None\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = None\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = None\r\n\t\t\t\t\t\t\t\t\t\tif self.use_labels:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case = ids_tensor([self.batch_size]\t\t\t\t\t\t, self.type_sequence_label_size )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case = ids_tensor([self.batch_size, self.seq_length]\t\t\t\t\t\t, self.num_labels )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case = ids_tensor([self.batch_size]\t\t\t\t\t\t, self.num_choices )\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = self.get_config()\r\n\r\n\t\t\t\t\t\t\t\t\t\treturn config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\treturn BioGptConfig(\r\n\t\t\t\t\t\t\t\t\t\t vocab_size=self.vocab_size\t\t\t\t\t\t, hidden_size=self.hidden_size\t\t\t\t\t\t, num_hidden_layers=self.num_hidden_layers\t\t\t\t\t\t, num_attention_heads=self.num_attention_heads\t\t\t\t\t\t, intermediate_size=self.intermediate_size\t\t\t\t\t\t, hidden_act=self.hidden_act\t\t\t\t\t\t, hidden_dropout_prob=self.hidden_dropout_prob\t\t\t\t\t\t, attention_probs_dropout_prob=self.attention_probs_dropout_prob\t\t\t\t\t\t, max_position_embeddings=self.max_position_embeddings\t\t\t\t\t\t, type_vocab_size=self.type_vocab_size\t\t\t\t\t\t, is_decoder=lowerCAmelCase\t\t\t\t\t\t, initializer_range=self.initializer_range\t\t\t\t\t\t, )\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = BioGptModel(config=lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tmodel.to(lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tmodel.eval()\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = model(lowerCAmelCase\t\t\t\t\t\t, attention_mask=lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = model(lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tself.parent.assertEqual(result.last_hidden_state.shape\t\t\t\t\t\t, (self.batch_size, self.seq_length, self.hidden_size) )\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = BioGptForCausalLM(config=lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tmodel.to(lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tmodel.eval()\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = model(lowerCAmelCase\t\t\t\t\t\t, attention_mask=lowerCAmelCase\t\t\t\t\t\t, token_type_ids=lowerCAmelCase\t\t\t\t\t\t, labels=lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape\t\t\t\t\t\t, (self.batch_size, self.seq_length, self.vocab_size) )\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, *lowerCAmelCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = BioGptModel(config=lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tmodel.to(lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tmodel.eval()\r\n\r\n\t\t\t\t\t\t\t\t\t\t# create attention mask\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = torch.ones(input_ids.shape\t\t\t\t\t\t, dtype=torch.long\t\t\t\t\t\t, device=lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = self.seq_length // 2\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = 0\r\n\r\n\t\t\t\t\t\t\t\t\t\t# first forward pass\r\n\t\t\t\t\t\t\t\t\t\tsnake_case ,snake_case = model(lowerCAmelCase\t\t\t\t\t\t, attention_mask=lowerCAmelCase ).to_tuple()\r\n\r\n\t\t\t\t\t\t\t\t\t\t# create hypothetical next token and extent to next_input_ids\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = ids_tensor((self.batch_size, 1)\t\t\t\t\t\t, config.vocab_size )\r\n\r\n\t\t\t\t\t\t\t\t\t\t# change a random masked slice from input_ids\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = ids_tensor((1,)\t\t\t\t\t\t, lowerCAmelCase ).item() + 1\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = ids_tensor((self.batch_size, 1)\t\t\t\t\t\t, config.vocab_size ).squeeze(-1 )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = random_other_next_tokens\r\n\r\n\t\t\t\t\t\t\t\t\t\t# append to next input_ids and attn_mask\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = torch.cat([input_ids, next_tokens]\t\t\t\t\t\t, dim=-1 )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = torch.cat(\r\n\t\t\t\t\t\t\t\t\t\t [attn_mask, torch.ones((attn_mask.shape[0], 1)\t\t\t\t\t\t, dtype=torch.long\t\t\t\t\t\t, device=lowerCAmelCase )]\t\t\t\t\t\t, dim=1\t\t\t\t\t\t, )\r\n\r\n\t\t\t\t\t\t\t\t\t\t# get two different outputs\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = model(lowerCAmelCase\t\t\t\t\t\t, attention_mask=lowerCAmelCase )['last_hidden_state']\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = model(lowerCAmelCase\t\t\t\t\t\t, past_key_values=lowerCAmelCase\t\t\t\t\t\t, attention_mask=lowerCAmelCase )['last_hidden_state']\r\n\r\n\t\t\t\t\t\t\t\t\t\t# select random slice\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = ids_tensor((1,)\t\t\t\t\t\t, output_from_past.shape[-1] ).item()\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = output_from_no_past[:, -1, random_slice_idx].detach()\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = output_from_past[:, 0, random_slice_idx].detach()\r\n\r\n\t\t\t\t\t\t\t\t\t\t# test that outputs are equal for slice\r\n\t\t\t\t\t\t\t\t\t\tself.parent.assertTrue(torch.allclose(lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, atol=1E-3 ) )\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, *lowerCAmelCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = BioGptModel(config=lowerCAmelCase ).to(lowerCAmelCase ).eval()\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = torch.ones(input_ids.shape\t\t\t\t\t\t, dtype=torch.long\t\t\t\t\t\t, device=lowerCAmelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\t# first forward pass\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = model(lowerCAmelCase\t\t\t\t\t\t, attention_mask=lowerCAmelCase\t\t\t\t\t\t, use_cache=lowerCAmelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case ,snake_case = outputs.to_tuple()\r\n\r\n\t\t\t\t\t\t\t\t\t\t# create hypothetical multiple next token and extent to next_input_ids\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = ids_tensor((self.batch_size, 3)\t\t\t\t\t\t, config.vocab_size )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = ids_tensor((self.batch_size, 3)\t\t\t\t\t\t, 2 )\r\n\r\n\t\t\t\t\t\t\t\t\t\t# append to next input_ids and\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = torch.cat([input_ids, next_tokens]\t\t\t\t\t\t, dim=-1 )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = torch.cat([attention_mask, next_attn_mask]\t\t\t\t\t\t, dim=-1 )\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = model(lowerCAmelCase\t\t\t\t\t\t, attention_mask=lowerCAmelCase )['last_hidden_state']\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = model(lowerCAmelCase\t\t\t\t\t\t, attention_mask=lowerCAmelCase\t\t\t\t\t\t, past_key_values=lowerCAmelCase )[\r\n\t\t\t\t\t\t\t\t\t\t 'last_hidden_state'\r\n\t\t\t\t\t\t\t\t\t\t]\r\n\r\n\t\t\t\t\t\t\t\t\t\t# select random slice\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = ids_tensor((1,)\t\t\t\t\t\t, output_from_past.shape[-1] ).item()\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = output_from_no_past[:, -3:, random_slice_idx].detach()\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = output_from_past[:, :, random_slice_idx].detach()\r\n\r\n\t\t\t\t\t\t\t\t\t\tself.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )\r\n\r\n\t\t\t\t\t\t\t\t\t\t# test that outputs are equal for slice\r\n\t\t\t\t\t\t\t\t\t\tself.parent.assertTrue(torch.allclose(lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, atol=1E-3 ) )\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, *lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase=False ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = BioGptForCausalLM(lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tmodel.to(lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tif gradient_checkpointing:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmodel.gradient_checkpointing_enable()\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = model(lowerCAmelCase\t\t\t\t\t\t, labels=lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tself.parent.assertEqual(result.loss.shape\t\t\t\t\t\t, () )\r\n\t\t\t\t\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape\t\t\t\t\t\t, (self.batch_size, self.seq_length, self.vocab_size) )\r\n\t\t\t\t\t\t\t\t\t\tresult.loss.backward()\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, *lowerCAmelCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = BioGptModel(lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )\r\n\t\t\t\t\t\t\t\t\t\tfor key in model.state_dict().keys():\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif \"c_proj\" in key and \"weight\" in key:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std )\t\t\t\t\t\t, 0.0_01 )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 )\t\t\t\t\t\t, 0.01 )\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, *lowerCAmelCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = self.num_labels\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = BioGptForTokenClassification(lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tmodel.to(lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tmodel.eval()\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = model(lowerCAmelCase\t\t\t\t\t\t, attention_mask=lowerCAmelCase\t\t\t\t\t\t, token_type_ids=lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape\t\t\t\t\t\t, (self.batch_size, self.seq_length, self.num_labels) )\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = self.prepare_config_and_inputs()\r\n\t\t\t\t\t\t\t\t\t\t(\r\n\t\t\t\t\t\t\t\t\t\t (\r\n\t\t\t\t\t\t\t\t\t\t snake_case\r\n\t\t\t\t\t\t\t\t\t\t) ,(\r\n\t\t\t\t\t\t\t\t\t\t snake_case\r\n\t\t\t\t\t\t\t\t\t\t) ,(\r\n\t\t\t\t\t\t\t\t\t\t snake_case\r\n\t\t\t\t\t\t\t\t\t\t) ,(\r\n\t\t\t\t\t\t\t\t\t\t snake_case\r\n\t\t\t\t\t\t\t\t\t\t) ,(\r\n\t\t\t\t\t\t\t\t\t\t snake_case\r\n\t\t\t\t\t\t\t\t\t\t) ,(\r\n\t\t\t\t\t\t\t\t\t\t snake_case\r\n\t\t\t\t\t\t\t\t\t\t) ,(\r\n\t\t\t\t\t\t\t\t\t\t snake_case\r\n\t\t\t\t\t\t\t\t\t\t) ,\r\n\t\t\t\t\t\t\t\t\t\t) = config_and_inputs\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = {'input_ids': input_ids, 'attention_mask': input_mask}\r\n\t\t\t\t\t\t\t\t\t\treturn config, inputs_dict\r\n\r\n\r\n\r\n\r\n\r\n@require_torch\r\nclass \t\t\t\t\tlowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):\r\n\r\n\r\n\r\n\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\t\t\t\t_lowerCAmelCase : List[Any] \t\t\t\t\t= (\r\n\t\t\t\t (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)\r\n\t\t\t\t if is_torch_available()\r\n\t\t\t\t else ()\r\n\t\t\t\t)\r\n\t\t\t\t_lowerCAmelCase : str \t\t\t\t\t= (BioGptForCausalLM,) if is_torch_available() else ()\r\n\t\t\t\t_lowerCAmelCase : str \t\t\t\t\t= (\r\n\t\t\t\t {\r\n\t\t\t\t \"\"\"feature-extraction\"\"\": BioGptModel,\r\n\t\t\t\t \"\"\"text-classification\"\"\": BioGptForSequenceClassification,\r\n\t\t\t\t \"\"\"text-generation\"\"\": BioGptForCausalLM,\r\n\t\t\t\t \"\"\"token-classification\"\"\": BioGptForTokenClassification,\r\n\t\t\t\t \"\"\"zero-shot\"\"\": BioGptForSequenceClassification,\r\n\t\t\t\t }\r\n\t\t\t\t if is_torch_available()\r\n\t\t\t\t else {}\r\n\t\t\t\t)\r\n\t\t\t\t_lowerCAmelCase : List[str] \t\t\t\t\t= False\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = BioGptModelTester(self )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = ConfigTester(self\t\t\t\t\t\t, config_class=lowerCAmelCase\t\t\t\t\t\t, hidden_size=37 )\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tself.config_tester.run_common_tests()\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\t\t\t\t\tself.model_tester.create_and_check_model(*lowerCAmelCase )\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\t\t\t\t\tfor type in [\"absolute\", \"relative_key\", \"relative_key_query\"]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case = type\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.model_tester.create_and_check_model(*lowerCAmelCase )\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\t\t\t\t\tself.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCAmelCase )\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\t\t\t\t\tself.model_tester.create_and_check_forward_and_backwards(*lowerCAmelCase\t\t\t\t\t\t, gradient_checkpointing=lowerCAmelCase )\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\t\t\t\t\tself.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCAmelCase )\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\t\t\t\t\tself.model_tester.create_and_check_biogpt_weight_initialization(*lowerCAmelCase )\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\t\t\t\t\tself.model_tester.create_and_check_biogpt_for_token_classification(*lowerCAmelCase )\r\n\r\n\t\t\t\t@slow\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )\r\n\t\t\t\t\t\t\t\t\t\tmodel.to(lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' )\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = 'left'\r\n\r\n\t\t\t\t\t\t\t\t\t\t# Define PAD Token = EOS Token = 50256\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = tokenizer.eos_token\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = model.config.eos_token_id\r\n\r\n\t\t\t\t\t\t\t\t\t\t# use different length sentences to test batching\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = [\r\n\t\t\t\t\t\t\t\t\t\t 'Hello, my dog is a little',\r\n\t\t\t\t\t\t\t\t\t\t 'Today, I',\r\n\t\t\t\t\t\t\t\t\t\t]\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = tokenizer(lowerCAmelCase\t\t\t\t\t\t, return_tensors='pt'\t\t\t\t\t\t, padding=lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = inputs['input_ids'].to(lowerCAmelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = model.generate(\r\n\t\t\t\t\t\t\t\t\t\t input_ids=lowerCAmelCase\t\t\t\t\t\t, attention_mask=inputs['attention_mask'].to(lowerCAmelCase )\t\t\t\t\t\t, )\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = tokenizer(sentences[0]\t\t\t\t\t\t, return_tensors='pt' ).input_ids.to(lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = model.generate(input_ids=lowerCAmelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = tokenizer(sentences[1]\t\t\t\t\t\t, return_tensors='pt' ).input_ids.to(lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = model.generate(input_ids=lowerCAmelCase\t\t\t\t\t\t, max_length=model.config.max_length - num_paddings )\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = tokenizer.batch_decode(lowerCAmelCase\t\t\t\t\t\t, skip_special_tokens=lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = tokenizer.decode(output_non_padded[0]\t\t\t\t\t\t, skip_special_tokens=lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = tokenizer.decode(output_padded[0]\t\t\t\t\t\t, skip_special_tokens=lowerCAmelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = [\r\n\t\t\t\t\t\t\t\t\t\t 'Hello, my dog is a little bit bigger than a little bit.',\r\n\t\t\t\t\t\t\t\t\t\t 'Today, I have a good idea of how to use the information',\r\n\t\t\t\t\t\t\t\t\t\t]\r\n\t\t\t\t\t\t\t\t\t\tself.assertListEqual(lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tself.assertListEqual(lowerCAmelCase\t\t\t\t\t\t, [non_padded_sentence, padded_sentence] )\r\n\r\n\t\t\t\t@slow\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tfor model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case = BioGptModel.from_pretrained(lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertIsNotNone(lowerCAmelCase )\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = 3\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = input_dict['input_ids']\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = input_ids.ne(1 ).to(lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = ids_tensor([self.model_tester.batch_size]\t\t\t\t\t\t, self.model_tester.type_sequence_label_size )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = BioGptForSequenceClassification(lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tmodel.to(lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tmodel.eval()\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = model(lowerCAmelCase\t\t\t\t\t\t, attention_mask=lowerCAmelCase\t\t\t\t\t\t, labels=lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tself.assertEqual(result.logits.shape\t\t\t\t\t\t, (self.model_tester.batch_size, self.model_tester.num_labels) )\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = 3\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = 'multi_label_classification'\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = input_dict['input_ids']\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = input_ids.ne(1 ).to(lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = ids_tensor(\r\n\t\t\t\t\t\t\t\t\t\t [self.model_tester.batch_size, config.num_labels]\t\t\t\t\t\t, self.model_tester.type_sequence_label_size ).to(torch.float )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = BioGptForSequenceClassification(lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tmodel.to(lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tmodel.eval()\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = model(lowerCAmelCase\t\t\t\t\t\t, attention_mask=lowerCAmelCase\t\t\t\t\t\t, labels=lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tself.assertEqual(result.logits.shape\t\t\t\t\t\t, (self.model_tester.batch_size, self.model_tester.num_labels) )\r\n\r\n\r\n\r\n\r\n\r\n@require_torch\r\nclass \t\t\t\t\tlowerCAmelCase_ ( unittest.TestCase ):\r\n\r\n\r\n\r\n\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t@slow\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = torch.tensor([[2, 48_05, 9, 6_56, 21]] )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = model(lowerCAmelCase )[0]\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = 4_23_84\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = torch.Size((1, 5, vocab_size) )\r\n\t\t\t\t\t\t\t\t\t\tself.assertEqual(output.shape\t\t\t\t\t\t, lowerCAmelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = torch.tensor(\r\n\t\t\t\t\t\t\t\t\t\t [[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )\r\n\r\n\t\t\t\t\t\t\t\t\t\tself.assertTrue(torch.allclose(output[:, :3, :3]\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t, atol=1E-4 ) )\r\n\r\n\t\t\t\t@slow\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case\t(\t\t\tself ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )\r\n\t\t\t\t\t\t\t\t\t\tmodel.to(lowerCAmelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\ttorch.manual_seed(0 )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = tokenizer('COVID-19 is'\t\t\t\t\t\t, return_tensors='pt' ).to(lowerCAmelCase )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = model.generate(\r\n\t\t\t\t\t\t\t\t\t\t **lowerCAmelCase\t\t\t\t\t\t, min_length=1_00\t\t\t\t\t\t, max_length=10_24\t\t\t\t\t\t, num_beams=5\t\t\t\t\t\t, early_stopping=lowerCAmelCase\t\t\t\t\t\t, )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = tokenizer.decode(output_ids[0]\t\t\t\t\t\t, skip_special_tokens=lowerCAmelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case = (\r\n\t\t\t\t\t\t\t\t\t\t 'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'\r\n\t\t\t\t\t\t\t\t\t\t ' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'\r\n\t\t\t\t\t\t\t\t\t\t ' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'\r\n\t\t\t\t\t\t\t\t\t\t ' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'\r\n\t\t\t\t\t\t\t\t\t\t ' more than 800,000 deaths.'\r\n\t\t\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\tself.assertEqual(lowerCAmelCase\t\t\t\t\t\t, lowerCAmelCase )\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":149,"string":"149"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":764,"cells":{"code":{"kind":"string","value":"\r\"\"\"simple docstring\"\"\"\r\r\rfrom .glue import GlueDataset, GlueDataTrainingArguments\rfrom .language_modeling import (\r LineByLineTextDataset,\r LineByLineWithRefDataset,\r LineByLineWithSOPTextDataset,\r TextDataset,\r TextDatasetForNextSentencePrediction,\r)\rfrom .squad import SquadDataset, SquadDataTrainingArguments"},"code_codestyle":{"kind":"number","value":74,"string":"74"},"style_context":{"kind":"string","value":"\r\"\"\"simple docstring\"\"\"\r\r\rimport unittest\r\rfrom transformers import XLMConfig, is_torch_available\rfrom transformers.testing_utils import require_torch, slow, torch_device\r\rfrom ...generation.test_utils import GenerationTesterMixin\rfrom ...test_configuration_common import ConfigTester\rfrom ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask\rfrom ...test_pipeline_mixin import PipelineTesterMixin\r\r\rif is_torch_available():\r\t\timport torch\r\r\t\tfrom transformers import (\r\t\t XLMForMultipleChoice,\r\t\t XLMForQuestionAnswering,\r\t\t XLMForQuestionAnsweringSimple,\r\t\t XLMForSequenceClassification,\r\t\t XLMForTokenClassification,\r\t\t XLMModel,\r\t\t XLMWithLMHeadModel,\r\t\t)\r\t\tfrom transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST\r\r\r\r\r\r\r\rclass lowerCAmelCase_ :\r\r\r\r\t\t\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\t\t\tdef __init__( self :\t\t\t\t\tOptional[Any] ,A_ :\t\t\t\t\tstr ,A_ :\t\t\t\t\tDict=13 ,A_ :\t\t\t\t\tstr=7 ,A_ :\t\t\t\t\tstr=True ,A_ :\t\t\t\t\tAny=True ,A_ :\t\t\t\t\tOptional[Any]=True ,A_ :\t\t\t\t\tAny=True ,A_ :\t\t\t\t\tOptional[Any]=True ,A_ :\t\t\t\t\tAny=False ,A_ :\t\t\t\t\tstr=False ,A_ :\t\t\t\t\tTuple=False ,A_ :\t\t\t\t\tstr=2 ,A_ :\t\t\t\t\tOptional[int]=99 ,A_ :\t\t\t\t\tUnion[str, Any]=0 ,A_ :\t\t\t\t\tOptional[Any]=32 ,A_ :\t\t\t\t\tOptional[int]=5 ,A_ :\t\t\t\t\tOptional[int]=4 ,A_ :\t\t\t\t\tUnion[str, Any]=0.1 ,A_ :\t\t\t\t\tList[str]=0.1 ,A_ :\t\t\t\t\tUnion[str, Any]=512 ,A_ :\t\t\t\t\tUnion[str, Any]=2 ,A_ :\t\t\t\t\tAny=0.02 ,A_ :\t\t\t\t\tList[str]=2 ,A_ :\t\t\t\t\tint=4 ,A_ :\t\t\t\t\tint=\"last\" ,A_ :\t\t\t\t\tDict=True ,A_ :\t\t\t\t\tUnion[str, Any]=None ,A_ :\t\t\t\t\tAny=0 ,) -> List[Any]:\r\t\t\t\t\t\t\tA = parent\r\t\t\t\t\t\t\tA = batch_size\r\t\t\t\t\t\t\tA = seq_length\r\t\t\t\t\t\t\tA = is_training\r\t\t\t\t\t\t\tA = use_input_lengths\r\t\t\t\t\t\t\tA = use_token_type_ids\r\t\t\t\t\t\t\tA = use_labels\r\t\t\t\t\t\t\tA = gelu_activation\r\t\t\t\t\t\t\tA = sinusoidal_embeddings\r\t\t\t\t\t\t\tA = causal\r\t\t\t\t\t\t\tA = asm\r\t\t\t\t\t\t\tA = n_langs\r\t\t\t\t\t\t\tA = vocab_size\r\t\t\t\t\t\t\tA = n_special\r\t\t\t\t\t\t\tA = hidden_size\r\t\t\t\t\t\t\tA = num_hidden_layers\r\t\t\t\t\t\t\tA = num_attention_heads\r\t\t\t\t\t\t\tA = hidden_dropout_prob\r\t\t\t\t\t\t\tA = attention_probs_dropout_prob\r\t\t\t\t\t\t\tA = max_position_embeddings\r\t\t\t\t\t\t\tA = type_sequence_label_size\r\t\t\t\t\t\t\tA = initializer_range\r\t\t\t\t\t\t\tA = num_labels\r\t\t\t\t\t\t\tA = num_choices\r\t\t\t\t\t\t\tA = summary_type\r\t\t\t\t\t\t\tA = use_proj\r\t\t\t\t\t\t\tA = scope\r\t\t\t\t\t\t\tA = bos_token_id\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tDict\t\t\t\t\t) -> Union[str, Any]:\r\t\t\t\t\t\t\tA = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size\t\t\t\t\t)\r\t\t\t\t\t\t\tA = random_attention_mask([self.batch_size, self.seq_length]\t\t\t\t\t)\r\r\t\t\t\t\t\t\tA = None\r\t\t\t\t\t\t\tif self.use_input_lengths:\r\t\t\t\t\t\t\t\tA = (\r\t\t\t\t\t\t\t\t ids_tensor([self.batch_size] ,vocab_size=2\t\t\t\t\t) + self.seq_length - 2\r\t\t\t\t\t\t\t\t) # small variation of seq_length\r\r\t\t\t\t\t\t\tA = None\r\t\t\t\t\t\t\tif self.use_token_type_ids:\r\t\t\t\t\t\t\t\tA = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs\t\t\t\t\t)\r\r\t\t\t\t\t\t\tA = None\r\t\t\t\t\t\t\tA = None\r\t\t\t\t\t\t\tA = None\r\t\t\t\t\t\t\tif self.use_labels:\r\t\t\t\t\t\t\t\tA = ids_tensor([self.batch_size] ,self.type_sequence_label_size\t\t\t\t\t)\r\t\t\t\t\t\t\t\tA = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels\t\t\t\t\t)\r\t\t\t\t\t\t\t\tA = ids_tensor([self.batch_size] ,2\t\t\t\t\t).float()\r\t\t\t\t\t\t\t\tA = ids_tensor([self.batch_size] ,self.num_choices\t\t\t\t\t)\r\r\t\t\t\t\t\t\tA = self.get_config()\r\r\t\t\t\t\t\t\treturn (\r\t\t\t\t\t\t\t config,\r\t\t\t\t\t\t\t input_ids,\r\t\t\t\t\t\t\t token_type_ids,\r\t\t\t\t\t\t\t input_lengths,\r\t\t\t\t\t\t\t sequence_labels,\r\t\t\t\t\t\t\t token_labels,\r\t\t\t\t\t\t\t is_impossible_labels,\r\t\t\t\t\t\t\t choice_labels,\r\t\t\t\t\t\t\t input_mask,\r\t\t\t\t\t\t\t)\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tOptional[int]\t\t\t\t\t) -> Dict:\r\t\t\t\t\t\t\treturn XLMConfig(\r\t\t\t\t\t\t\t vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tList[Any] ,A_ :\t\t\t\t\tAny ,A_ :\t\t\t\t\tint ,A_ :\t\t\t\t\tDict ,A_ :\t\t\t\t\tstr ,A_ :\t\t\t\t\tOptional[Any] ,A_ :\t\t\t\t\tList[str] ,A_ :\t\t\t\t\tUnion[str, Any] ,A_ :\t\t\t\t\tint ,A_ :\t\t\t\t\tstr ,) -> Any:\r\t\t\t\t\t\t\tA = XLMModel(config=A_\t\t\t\t\t)\r\t\t\t\t\t\t\tmodel.to(A_\t\t\t\t\t)\r\t\t\t\t\t\t\tmodel.eval()\r\t\t\t\t\t\t\tA = model(A_ ,lengths=A_ ,langs=A_\t\t\t\t\t)\r\t\t\t\t\t\t\tA = model(A_ ,langs=A_\t\t\t\t\t)\r\t\t\t\t\t\t\tA = model(A_\t\t\t\t\t)\r\t\t\t\t\t\t\tself.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size)\t\t\t\t\t)\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tList[str] ,A_ :\t\t\t\t\tAny ,A_ :\t\t\t\t\tstr ,A_ :\t\t\t\t\tOptional[int] ,A_ :\t\t\t\t\tUnion[str, Any] ,A_ :\t\t\t\t\tOptional[int] ,A_ :\t\t\t\t\tstr ,A_ :\t\t\t\t\tAny ,A_ :\t\t\t\t\tstr ,A_ :\t\t\t\t\tDict ,) -> Dict:\r\t\t\t\t\t\t\tA = XLMWithLMHeadModel(A_\t\t\t\t\t)\r\t\t\t\t\t\t\tmodel.to(A_\t\t\t\t\t)\r\t\t\t\t\t\t\tmodel.eval()\r\r\t\t\t\t\t\t\tA = model(A_ ,token_type_ids=A_ ,labels=A_\t\t\t\t\t)\r\t\t\t\t\t\t\tself.parent.assertEqual(result.loss.shape ,()\t\t\t\t\t)\r\t\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size)\t\t\t\t\t)\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tUnion[str, Any] ,A_ :\t\t\t\t\tList[str] ,A_ :\t\t\t\t\tUnion[str, Any] ,A_ :\t\t\t\t\tUnion[str, Any] ,A_ :\t\t\t\t\tList[str] ,A_ :\t\t\t\t\tAny ,A_ :\t\t\t\t\tOptional[int] ,A_ :\t\t\t\t\tOptional[int] ,A_ :\t\t\t\t\tOptional[int] ,A_ :\t\t\t\t\tOptional[Any] ,) -> int:\r\t\t\t\t\t\t\tA = XLMForQuestionAnsweringSimple(A_\t\t\t\t\t)\r\t\t\t\t\t\t\tmodel.to(A_\t\t\t\t\t)\r\t\t\t\t\t\t\tmodel.eval()\r\r\t\t\t\t\t\t\tA = model(A_\t\t\t\t\t)\r\r\t\t\t\t\t\t\tA = model(A_ ,start_positions=A_ ,end_positions=A_\t\t\t\t\t)\r\t\t\t\t\t\t\tA = outputs\r\t\t\t\t\t\t\tself.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length)\t\t\t\t\t)\r\t\t\t\t\t\t\tself.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length)\t\t\t\t\t)\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tAny ,A_ :\t\t\t\t\tTuple ,A_ :\t\t\t\t\tOptional[int] ,A_ :\t\t\t\t\tAny ,A_ :\t\t\t\t\tList[Any] ,A_ :\t\t\t\t\tint ,A_ :\t\t\t\t\tTuple ,A_ :\t\t\t\t\tTuple ,A_ :\t\t\t\t\tList[str] ,A_ :\t\t\t\t\tOptional[int] ,) -> List[Any]:\r\t\t\t\t\t\t\tA = XLMForQuestionAnswering(A_\t\t\t\t\t)\r\t\t\t\t\t\t\tmodel.to(A_\t\t\t\t\t)\r\t\t\t\t\t\t\tmodel.eval()\r\r\t\t\t\t\t\t\tA = model(A_\t\t\t\t\t)\r\r\t\t\t\t\t\t\tA = model(\r\t\t\t\t\t\t\t A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,)\r\r\t\t\t\t\t\t\tA = model(\r\t\t\t\t\t\t\t A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,)\r\r\t\t\t\t\t\t\t((A) ,\t\t\t\t\t) = result_with_labels.to_tuple()\r\r\t\t\t\t\t\t\tA = model(A_ ,start_positions=A_ ,end_positions=A_\t\t\t\t\t)\r\r\t\t\t\t\t\t\t((A) ,\t\t\t\t\t) = result_with_labels.to_tuple()\r\r\t\t\t\t\t\t\tself.parent.assertEqual(result_with_labels.loss.shape ,()\t\t\t\t\t)\r\t\t\t\t\t\t\tself.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top)\t\t\t\t\t)\r\t\t\t\t\t\t\tself.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top)\t\t\t\t\t)\r\t\t\t\t\t\t\tself.parent.assertEqual(\r\t\t\t\t\t\t\t result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top)\t\t\t\t\t)\r\t\t\t\t\t\t\tself.parent.assertEqual(\r\t\t\t\t\t\t\t result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top)\t\t\t\t\t)\r\t\t\t\t\t\t\tself.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,)\t\t\t\t\t)\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tList[Any] ,A_ :\t\t\t\t\tTuple ,A_ :\t\t\t\t\tint ,A_ :\t\t\t\t\tOptional[int] ,A_ :\t\t\t\t\tList[str] ,A_ :\t\t\t\t\tstr ,A_ :\t\t\t\t\tOptional[Any] ,A_ :\t\t\t\t\tOptional[int] ,A_ :\t\t\t\t\tOptional[Any] ,A_ :\t\t\t\t\tList[Any] ,) -> Optional[int]:\r\t\t\t\t\t\t\tA = XLMForSequenceClassification(A_\t\t\t\t\t)\r\t\t\t\t\t\t\tmodel.to(A_\t\t\t\t\t)\r\t\t\t\t\t\t\tmodel.eval()\r\r\t\t\t\t\t\t\tA = model(A_\t\t\t\t\t)\r\t\t\t\t\t\t\tA = model(A_ ,labels=A_\t\t\t\t\t)\r\t\t\t\t\t\t\tself.parent.assertEqual(result.loss.shape ,()\t\t\t\t\t)\r\t\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size)\t\t\t\t\t)\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tint ,A_ :\t\t\t\t\tList[Any] ,A_ :\t\t\t\t\tstr ,A_ :\t\t\t\t\tOptional[Any] ,A_ :\t\t\t\t\tList[Any] ,A_ :\t\t\t\t\tOptional[int] ,A_ :\t\t\t\t\tTuple ,A_ :\t\t\t\t\tUnion[str, Any] ,A_ :\t\t\t\t\tOptional[int] ,A_ :\t\t\t\t\tOptional[int] ,) -> List[str]:\r\t\t\t\t\t\t\tA = self.num_labels\r\t\t\t\t\t\t\tA = XLMForTokenClassification(A_\t\t\t\t\t)\r\t\t\t\t\t\t\tmodel.to(A_\t\t\t\t\t)\r\t\t\t\t\t\t\tmodel.eval()\r\r\t\t\t\t\t\t\tA = model(A_ ,attention_mask=A_ ,labels=A_\t\t\t\t\t)\r\t\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels)\t\t\t\t\t)\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tDict ,A_ :\t\t\t\t\tOptional[int] ,A_ :\t\t\t\t\tUnion[str, Any] ,A_ :\t\t\t\t\tList[str] ,A_ :\t\t\t\t\tOptional[int] ,A_ :\t\t\t\t\tList[str] ,A_ :\t\t\t\t\tOptional[Any] ,A_ :\t\t\t\t\tUnion[str, Any] ,A_ :\t\t\t\t\tDict ,A_ :\t\t\t\t\tList[Any] ,) -> List[str]:\r\t\t\t\t\t\t\tA = self.num_choices\r\t\t\t\t\t\t\tA = XLMForMultipleChoice(config=A_\t\t\t\t\t)\r\t\t\t\t\t\t\tmodel.to(A_\t\t\t\t\t)\r\t\t\t\t\t\t\tmodel.eval()\r\t\t\t\t\t\t\tA = input_ids.unsqueeze(1\t\t\t\t\t).expand(-1 ,self.num_choices ,-1\t\t\t\t\t).contiguous()\r\t\t\t\t\t\t\tA = token_type_ids.unsqueeze(1\t\t\t\t\t).expand(-1 ,self.num_choices ,-1\t\t\t\t\t).contiguous()\r\t\t\t\t\t\t\tA = input_mask.unsqueeze(1\t\t\t\t\t).expand(-1 ,self.num_choices ,-1\t\t\t\t\t).contiguous()\r\t\t\t\t\t\t\tA = model(\r\t\t\t\t\t\t\t A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,)\r\t\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices)\t\t\t\t\t)\r\r\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tDict\t\t\t\t\t) -> int:\r\t\t\t\t\t\t\tA = self.prepare_config_and_inputs()\r\t\t\t\t\t\t\t(\r\t\t\t\t\t\t\t (\r\t\t\t\t\t\t\t A\r\t\t\t\t\t\t\t) ,\t\t\t\t\t(\r\t\t\t\t\t\t\t A\r\t\t\t\t\t\t\t) ,\t\t\t\t\t(\r\t\t\t\t\t\t\t A\r\t\t\t\t\t\t\t) ,\t\t\t\t\t(\r\t\t\t\t\t\t\t A\r\t\t\t\t\t\t\t) ,\t\t\t\t\t(\r\t\t\t\t\t\t\t A\r\t\t\t\t\t\t\t) ,\t\t\t\t\t(\r\t\t\t\t\t\t\t A\r\t\t\t\t\t\t\t) ,\t\t\t\t\t(\r\t\t\t\t\t\t\t A\r\t\t\t\t\t\t\t) ,\t\t\t\t\t(\r\t\t\t\t\t\t\t A\r\t\t\t\t\t\t\t) ,\t\t\t\t\t(\r\t\t\t\t\t\t\t A\r\t\t\t\t\t\t\t) ,\t\t\t\t\t\r\t\t\t\t\t\t\t) = config_and_inputs\r\t\t\t\t\t\t\tA = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}\r\t\t\t\t\t\t\treturn config, inputs_dict\r\r\r\r\r\r\r\r@require_torch\rclass lowerCAmelCase_ (\t\t\t\t\t_lowercase\t\t\t\t\t\t, _lowercase\t\t\t\t\t\t, _lowercase\t\t\t\t\t\t, unittest.TestCase\t\t\t):\r\r\r\r\t\t\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\t\t\t_lowerCamelCase:\t\t\tUnion[str, Any]\t\t\t\t\t=\t\t\t(\r\t\t\t\t\t\t (\r\t\t\t\t\t\t XLMModel,\r\t\t\t\t\t\t XLMWithLMHeadModel,\r\t\t\t\t\t\t XLMForQuestionAnswering,\r\t\t\t\t\t\t XLMForSequenceClassification,\r\t\t\t\t\t\t XLMForQuestionAnsweringSimple,\r\t\t\t\t\t\t XLMForTokenClassification,\r\t\t\t\t\t\t XLMForMultipleChoice,\r\t\t\t\t\t\t )\r\t\t\t\t\t\t if is_torch_available()\r\t\t\t\t\t\t else ()\r\t\t\t\t\t\t)\r\t\t\t\t\t\t_lowerCamelCase:\t\t\tstr\t\t\t\t\t=\t\t\t(\r\t\t\t\t\t\t (XLMWithLMHeadModel,) if is_torch_available() else ()\r\t\t\t\t\t\t) # TODO (PVP): Check other models whether language generation is also applicable\r\t\t\t\t\t\t_lowerCamelCase:\t\t\tOptional[int]\t\t\t\t\t=\t\t\t(\r\t\t\t\t\t\t {\r\t\t\t\t\t\t '''feature-extraction''': XLMModel,\r\t\t\t\t\t\t '''fill-mask''': XLMWithLMHeadModel,\r\t\t\t\t\t\t '''question-answering''': XLMForQuestionAnsweringSimple,\r\t\t\t\t\t\t '''text-classification''': XLMForSequenceClassification,\r\t\t\t\t\t\t '''text-generation''': XLMWithLMHeadModel,\r\t\t\t\t\t\t '''token-classification''': XLMForTokenClassification,\r\t\t\t\t\t\t '''zero-shot''': XLMForSequenceClassification,\r\t\t\t\t\t\t }\r\t\t\t\t\t\t if is_torch_available()\r\t\t\t\t\t\t else {}\r\t\t\t\t\t\t)\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tAny ,A_ :\t\t\t\t\tOptional[int] ,A_ :\t\t\t\t\tUnion[str, Any] ,A_ :\t\t\t\t\tUnion[str, Any] ,A_ :\t\t\t\t\tAny ,A_ :\t\t\t\t\tAny\t\t\t\t\t) -> Any:\r\t\t\t\t\t\t\tif (\r\t\t\t\t\t\t\t pipeline_test_casse_name == \"QAPipelineTests\"\r\t\t\t\t\t\t\t and tokenizer_name is not None\r\t\t\t\t\t\t\t and not tokenizer_name.endswith('Fast'\t\t\t\t\t)\r\t\t\t\t\t\t\t):\r\t\t\t\t\t\t\t\t# `QAPipelineTests` fails for a few models when the slower tokenizer are used.\r\t\t\t\t\t\t\t\t# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)\r\t\t\t\t\t\t\t\t# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer\r\t\t\t\t\t\t\t\treturn True\r\r\t\t\t\t\t\t\treturn False\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tint ,A_ :\t\t\t\t\tstr ,A_ :\t\t\t\t\tOptional[int] ,A_ :\t\t\t\t\tList[Any]=False\t\t\t\t\t) -> int:\r\t\t\t\t\t\t\tA = super()._prepare_for_class(A_ ,A_ ,return_labels=A_\t\t\t\t\t)\r\r\t\t\t\t\t\t\tif return_labels:\r\t\t\t\t\t\t\t\tif model_class.__name__ == \"XLMForQuestionAnswering\":\r\t\t\t\t\t\t\t\t\tA = torch.zeros(\r\t\t\t\t\t\t\t\t\t self.model_tester.batch_size ,dtype=torch.long ,device=A_\t\t\t\t\t)\r\t\t\t\t\t\t\t\t\tA = torch.zeros(\r\t\t\t\t\t\t\t\t\t self.model_tester.batch_size ,dtype=torch.long ,device=A_\t\t\t\t\t)\r\r\t\t\t\t\t\t\treturn inputs_dict\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tOptional[int]\t\t\t\t\t) -> Optional[Any]:\r\t\t\t\t\t\t\tA = XLMModelTester(self\t\t\t\t\t)\r\t\t\t\t\t\t\tA = ConfigTester(self ,config_class=A_ ,emb_dim=37\t\t\t\t\t)\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tList[Any]\t\t\t\t\t) -> str:\r\t\t\t\t\t\t\tself.config_tester.run_common_tests()\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tUnion[str, Any]\t\t\t\t\t) -> Any:\r\t\t\t\t\t\t\tA = self.model_tester.prepare_config_and_inputs()\r\t\t\t\t\t\t\tself.model_tester.create_and_check_xlm_model(*A_\t\t\t\t\t)\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tUnion[str, Any]\t\t\t\t\t) -> List[Any]:\r\t\t\t\t\t\t\tA = self.model_tester.prepare_config_and_inputs()\r\t\t\t\t\t\t\tself.model_tester.create_and_check_xlm_lm_head(*A_\t\t\t\t\t)\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tAny\t\t\t\t\t) -> Tuple:\r\t\t\t\t\t\t\tA = self.model_tester.prepare_config_and_inputs()\r\t\t\t\t\t\t\tself.model_tester.create_and_check_xlm_simple_qa(*A_\t\t\t\t\t)\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tList[Any]\t\t\t\t\t) -> List[Any]:\r\t\t\t\t\t\t\tA = self.model_tester.prepare_config_and_inputs()\r\t\t\t\t\t\t\tself.model_tester.create_and_check_xlm_qa(*A_\t\t\t\t\t)\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tUnion[str, Any]\t\t\t\t\t) -> Any:\r\t\t\t\t\t\t\tA = self.model_tester.prepare_config_and_inputs()\r\t\t\t\t\t\t\tself.model_tester.create_and_check_xlm_sequence_classif(*A_\t\t\t\t\t)\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tUnion[str, Any]\t\t\t\t\t) -> Optional[Any]:\r\t\t\t\t\t\t\tA = self.model_tester.prepare_config_and_inputs()\r\t\t\t\t\t\t\tself.model_tester.create_and_check_xlm_token_classif(*A_\t\t\t\t\t)\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tOptional[Any]\t\t\t\t\t) -> Any:\r\t\t\t\t\t\t\tA = self.model_tester.prepare_config_and_inputs()\r\t\t\t\t\t\t\tself.model_tester.create_and_check_xlm_for_multiple_choice(*A_\t\t\t\t\t)\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tTuple ,A_ :\t\t\t\t\tUnion[str, Any] ,A_ :\t\t\t\t\tAny ,A_ :\t\t\t\t\tstr ,A_ :\t\t\t\t\tTuple ,A_ :\t\t\t\t\tAny ,A_ :\t\t\t\t\tAny=False ,A_ :\t\t\t\t\tAny=1\t\t\t\t\t) -> List[Any]:\r\t\t\t\t\t\t\tself.assertIsInstance(A_ ,A_\t\t\t\t\t)\r\t\t\t\t\t\t\tself.assertListEqual(\r\t\t\t\t\t\t\t [isinstance(A_ ,A_\t\t\t\t\t) for iter_attentions in attentions] ,[True] * len(A_\t\t\t\t\t)\t\t\t\t\t)\r\t\t\t\t\t\t\tself.assertEqual(len(A_\t\t\t\t\t) ,(max_length - min_length) * num_beam_groups\t\t\t\t\t)\r\r\t\t\t\t\t\t\tfor idx, iter_attentions in enumerate(A_\t\t\t\t\t):\r\t\t\t\t\t\t\t\t# adds PAD dummy token\r\t\t\t\t\t\t\t\tA = min_length + idx + 1\r\t\t\t\t\t\t\t\tA = min_length + idx + 1\r\r\t\t\t\t\t\t\t\tA = (\r\t\t\t\t\t\t\t\t batch_size * num_beam_groups,\r\t\t\t\t\t\t\t\t config.num_attention_heads,\r\t\t\t\t\t\t\t\t tgt_len,\r\t\t\t\t\t\t\t\t src_len,\r\t\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\t# check attn size\r\t\t\t\t\t\t\t\tself.assertListEqual(\r\t\t\t\t\t\t\t\t [layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_\t\t\t\t\t)\t\t\t\t\t)\r\r\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tDict ,A_ :\t\t\t\t\tOptional[int] ,A_ :\t\t\t\t\tstr ,A_ :\t\t\t\t\tOptional[int] ,A_ :\t\t\t\t\tint ,A_ :\t\t\t\t\tAny ,A_ :\t\t\t\t\tstr=False ,A_ :\t\t\t\t\tAny=1\t\t\t\t\t) -> Tuple:\r\t\t\t\t\t\t\tself.assertIsInstance(A_ ,A_\t\t\t\t\t)\r\t\t\t\t\t\t\tself.assertListEqual(\r\t\t\t\t\t\t\t [isinstance(A_ ,A_\t\t\t\t\t) for iter_hidden_states in hidden_states] ,[True] * len(A_\t\t\t\t\t) ,)\r\t\t\t\t\t\t\tself.assertEqual(len(A_\t\t\t\t\t) ,(max_length - min_length) * num_beam_groups\t\t\t\t\t)\r\r\t\t\t\t\t\t\tfor idx, iter_hidden_states in enumerate(A_\t\t\t\t\t):\r\t\t\t\t\t\t\t\t# adds PAD dummy token\r\t\t\t\t\t\t\t\tA = min_length + idx + 1\r\t\t\t\t\t\t\t\tA = (batch_size * num_beam_groups, seq_len, config.hidden_size)\r\t\t\t\t\t\t\t\t# check hidden size\r\t\t\t\t\t\t\t\tself.assertListEqual(\r\t\t\t\t\t\t\t\t [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_\t\t\t\t\t) ,)\r\t\t\t\t\t\t\tpass\r\r\r\r\r\t\t\t\t\t\t@slow\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tList[Any]\t\t\t\t\t) -> Optional[Any]:\r\t\t\t\t\t\t\tfor model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\r\t\t\t\t\t\t\t\tA = XLMModel.from_pretrained(A_\t\t\t\t\t)\r\t\t\t\t\t\t\t\tself.assertIsNotNone(A_\t\t\t\t\t)\r\r\r\r\r\r\r\r@require_torch\rclass lowerCAmelCase_ (\t\t\t\t\tunittest.TestCase\t\t\t):\r\r\r\r\t\t\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\t\t\t@slow\r\t\t\t\t\t\tdef _SCREAMING_SNAKE_CASE\t\t\t\t\t( self :\t\t\t\t\tDict\t\t\t\t\t) -> str:\r\t\t\t\t\t\t\tA = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048'\t\t\t\t\t)\r\t\t\t\t\t\t\tmodel.to(A_\t\t\t\t\t)\r\t\t\t\t\t\t\tA = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_\t\t\t\t\t) # the president\r\t\t\t\t\t\t\tA = [\r\t\t\t\t\t\t\t 14,\r\t\t\t\t\t\t\t 447,\r\t\t\t\t\t\t\t 14,\r\t\t\t\t\t\t\t 447,\r\t\t\t\t\t\t\t 14,\r\t\t\t\t\t\t\t 447,\r\t\t\t\t\t\t\t 14,\r\t\t\t\t\t\t\t 447,\r\t\t\t\t\t\t\t 14,\r\t\t\t\t\t\t\t 447,\r\t\t\t\t\t\t\t 14,\r\t\t\t\t\t\t\t 447,\r\t\t\t\t\t\t\t 14,\r\t\t\t\t\t\t\t 447,\r\t\t\t\t\t\t\t 14,\r\t\t\t\t\t\t\t 447,\r\t\t\t\t\t\t\t 14,\r\t\t\t\t\t\t\t 447,\r\t\t\t\t\t\t\t 14,\r\t\t\t\t\t\t\t 447,\r\t\t\t\t\t\t\t] # the president the president the president the president the president the president the president the president the president the president\r\t\t\t\t\t\t\t# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference\r\t\t\t\t\t\t\tA = model.generate(A_ ,do_sample=A_\t\t\t\t\t)\r\t\t\t\t\t\t\tself.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_\t\t\t\t\t)"},"style_context_codestyle":{"kind":"number","value":74,"string":"74"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":765,"cells":{"code":{"kind":"string","value":"\r\r\r\"\"\"simple docstring\"\"\"\rdef \t\tlowercase\t\t\t\t\t\t\t(\t\t\t\t\t_snake_case\t\t\t\t\t\t:\t\t\t\tlist\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tint = 0\t\t\t\t\t)\t\t\t\t\t->list:\r\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t__snake_case :\t\t\t\t\t\tOptional[int] = length or len(_snake_case\t\t\t\t\t)\r\t__snake_case :\t\t\t\t\t\tList[str] = False\r\tfor i in range(length - 1\t\t\t\t\t):\r\t\tif list_data[i] > list_data[i + 1]:\r\t\t\t__snake_case ,\t\t\t\t\t\t\t__snake_case :\t\t\t\t\t\tstr = list_data[i + 1], list_data[i]\r\t\t\t__snake_case :\t\t\t\t\t\tDict = True\r\r\treturn list_data if not swapped else bubble_sort(_snake_case\t\t\t\t\t, length - 1\t\t\t\t\t)\r\r\rif __name__ == \"__main__\":\r\t\t\timport doctest\r\r\t\t\tdoctest.testmod()\r\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":24,"string":"24"},"style_context":{"kind":"string","value":"\r\r\r\"\"\"simple docstring\"\"\"\rimport argparse\rimport json\rimport os\r\rimport fairseq\rimport torch\rfrom fairseq.data import Dictionary\r\rfrom transformers import (\r WavaVecaConfig,\r WavaVecaCTCTokenizer,\r WavaVecaFeatureExtractor,\r WavaVecaForCTC,\r WavaVecaForPreTraining,\r WavaVecaProcessor,\r logging,\r)\rfrom transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification\r\r\rlogging.set_verbosity_info()\rSCREAMING_SNAKE_CASE\t\t\t: Dict = logging.get_logger(__name__)\r\rSCREAMING_SNAKE_CASE\t\t\t: str = {\r \"\"\"post_extract_proj\"\"\": \"\"\"feature_projection.projection\"\"\",\r \"\"\"encoder.pos_conv.0\"\"\": \"\"\"encoder.pos_conv_embed.conv\"\"\",\r \"\"\"self_attn.k_proj\"\"\": \"\"\"encoder.layers.*.attention.k_proj\"\"\",\r \"\"\"self_attn.v_proj\"\"\": \"\"\"encoder.layers.*.attention.v_proj\"\"\",\r \"\"\"self_attn.q_proj\"\"\": \"\"\"encoder.layers.*.attention.q_proj\"\"\",\r \"\"\"self_attn.out_proj\"\"\": \"\"\"encoder.layers.*.attention.out_proj\"\"\",\r \"\"\"self_attn_layer_norm\"\"\": \"\"\"encoder.layers.*.layer_norm\"\"\",\r \"\"\"fc1\"\"\": \"\"\"encoder.layers.*.feed_forward.intermediate_dense\"\"\",\r \"\"\"fc2\"\"\": \"\"\"encoder.layers.*.feed_forward.output_dense\"\"\",\r \"\"\"final_layer_norm\"\"\": \"\"\"encoder.layers.*.final_layer_norm\"\"\",\r \"\"\"encoder.layer_norm\"\"\": \"\"\"encoder.layer_norm\"\"\",\r \"\"\"adapter_layer\"\"\": \"\"\"encoder.layers.*.adapter_layer\"\"\",\r \"\"\"w2v_model.layer_norm\"\"\": \"\"\"feature_projection.layer_norm\"\"\",\r \"\"\"quantizer.weight_proj\"\"\": \"\"\"quantizer.weight_proj\"\"\",\r \"\"\"quantizer.vars\"\"\": \"\"\"quantizer.codevectors\"\"\",\r \"\"\"project_q\"\"\": \"\"\"project_q\"\"\",\r \"\"\"final_proj\"\"\": \"\"\"project_hid\"\"\",\r \"\"\"w2v_encoder.proj\"\"\": \"\"\"lm_head\"\"\",\r \"\"\"mask_emb\"\"\": \"\"\"masked_spec_embed\"\"\",\r \"\"\"pooling_layer.linear\"\"\": \"\"\"projector\"\"\",\r \"\"\"pooling_layer.projection\"\"\": \"\"\"classifier\"\"\",\r}\rSCREAMING_SNAKE_CASE\t\t\t: int = [\r \"\"\"lm_head\"\"\",\r \"\"\"quantizer.weight_proj\"\"\",\r \"\"\"quantizer.codevectors\"\"\",\r \"\"\"project_q\"\"\",\r \"\"\"project_hid\"\"\",\r \"\"\"projector\"\"\",\r \"\"\"classifier\"\"\",\r]\r\r\r\r\r\r\rdef \t\tlowercase\t\t\t\t\t\t\t(\t\t\t\t\t_snake_case\t\t\t\t\t\t:\t\t\t\tOptional[int]\t\t\t\t\t)\t\t\t\t\t->int:\r\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t__snake_case :\t\t\t\t\t\tint = {}\r\twith open(_snake_case\t\t\t\t\t, '''r'''\t\t\t\t\t) as file:\r\t\tfor line_number, line in enumerate(_snake_case\t\t\t\t\t):\r\t\t\t__snake_case :\t\t\t\t\t\tUnion[str, Any] = line.strip()\r\t\t\tif line:\r\t\t\t\t__snake_case :\t\t\t\t\t\tstr = line.split()\r\t\t\t\t__snake_case :\t\t\t\t\t\tUnion[str, Any] = line_number\r\t\t\t\t__snake_case :\t\t\t\t\t\tDict = words[0]\r\t\t\t\t__snake_case :\t\t\t\t\t\tstr = value\r\treturn result\r\r\r\r\r\r\rdef \t\tlowercase\t\t\t\t\t\t\t(\t\t\t\t\t_snake_case\t\t\t\t\t\t:\t\t\t\tOptional[Any]\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tList[str]\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tTuple\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tAny\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tList[str]\t\t\t\t\t)\t\t\t\t\t->List[str]:\r\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\tfor attribute in key.split('''.'''\t\t\t\t\t):\r\t\t__snake_case :\t\t\t\t\t\tDict = getattr(_snake_case\t\t\t\t\t, _snake_case\t\t\t\t\t)\r\r\t__snake_case :\t\t\t\t\t\tAny = None\r\tfor param_key in PARAM_MAPPING.keys():\r\t\tif full_name.endswith(_snake_case\t\t\t\t\t):\r\t\t\t__snake_case :\t\t\t\t\t\tint = PARAM_MAPPING[full_name.split('''.'''\t\t\t\t\t)[-1]]\r\t\t\t__snake_case :\t\t\t\t\t\tstr = '''param'''\r\r\tif weight_type is not None and weight_type != \"param\":\r\t\t__snake_case :\t\t\t\t\t\tUnion[str, Any] = getattr(_snake_case\t\t\t\t\t, _snake_case\t\t\t\t\t).shape\r\telif weight_type is not None and weight_type == \"param\":\r\t\t__snake_case :\t\t\t\t\t\tOptional[Any] = hf_pointer\r\t\tfor attribute in hf_param_name.split('''.'''\t\t\t\t\t):\r\t\t\t__snake_case :\t\t\t\t\t\tDict = getattr(_snake_case\t\t\t\t\t, _snake_case\t\t\t\t\t)\r\t\t__snake_case :\t\t\t\t\t\tList[str] = shape_pointer.shape\r\r\t\t# let's reduce dimension\r\t\t__snake_case :\t\t\t\t\t\tint = value[0]\r\telse:\r\t\t__snake_case :\t\t\t\t\t\tint = hf_pointer.shape\r\r\tif hf_shape != value.shape:\r\t\traise ValueError(\r\t\t f\"\"\"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be\"\"\"\r\t\t f\"\"\" {value.shape} for {full_name}\"\"\"\t\t\t\t\t)\r\r\tif weight_type == \"weight\":\r\t\t__snake_case :\t\t\t\t\t\tList[Any] = value\r\telif weight_type == \"weight_g\":\r\t\t__snake_case :\t\t\t\t\t\tTuple = value\r\telif weight_type == \"weight_v\":\r\t\t__snake_case :\t\t\t\t\t\tstr = value\r\telif weight_type == \"bias\":\r\t\t__snake_case :\t\t\t\t\t\tstr = value\r\telif weight_type == \"param\":\r\t\tfor attribute in hf_param_name.split('''.'''\t\t\t\t\t):\r\t\t\t__snake_case :\t\t\t\t\t\tList[Any] = getattr(_snake_case\t\t\t\t\t, _snake_case\t\t\t\t\t)\r\t\t__snake_case :\t\t\t\t\t\tint = value\r\telse:\r\t\t__snake_case :\t\t\t\t\t\tList[Any] = value\r\r\tlogger.info(f\"\"\"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.\"\"\"\t\t\t\t\t)\r\r\r\r\r\r\rdef \t\tlowercase\t\t\t\t\t\t\t(\t\t\t\t\t_snake_case\t\t\t\t\t\t:\t\t\t\tAny\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tList[Any]\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tDict\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tList[str]\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tint\t\t\t\t\t)\t\t\t\t\t->int:\r\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t__snake_case :\t\t\t\t\t\tOptional[Any] = None\r\tfor param_key in PARAM_MAPPING.keys():\r\t\tif full_name.endswith(_snake_case\t\t\t\t\t):\r\t\t\t__snake_case :\t\t\t\t\t\tDict = PARAM_MAPPING[full_name.split('''.'''\t\t\t\t\t)[-1]]\r\t\t\t__snake_case :\t\t\t\t\t\tList[str] = '''param'''\r\r\tif weight_type is not None and weight_type != \"param\":\r\t\t__snake_case :\t\t\t\t\t\tstr = '''.'''.join([key, weight_type]\t\t\t\t\t)\r\telif weight_type is not None and weight_type == \"param\":\r\t\t__snake_case :\t\t\t\t\t\tTuple = '''.'''.join([key, hf_param_name]\t\t\t\t\t)\r\telse:\r\t\t__snake_case :\t\t\t\t\t\tOptional[int] = key\r\r\t__snake_case :\t\t\t\t\t\tList[Any] = value if '''lm_head''' in full_key else value[0]\r\r\rSCREAMING_SNAKE_CASE\t\t\t: Tuple = {\r \"\"\"W_a\"\"\": \"\"\"linear_1.weight\"\"\",\r \"\"\"W_b\"\"\": \"\"\"linear_2.weight\"\"\",\r \"\"\"b_a\"\"\": \"\"\"linear_1.bias\"\"\",\r \"\"\"b_b\"\"\": \"\"\"linear_2.bias\"\"\",\r \"\"\"ln_W\"\"\": \"\"\"norm.weight\"\"\",\r \"\"\"ln_b\"\"\": \"\"\"norm.bias\"\"\",\r}\r\r\r\r\r\r\rdef \t\tlowercase\t\t\t\t\t\t\t(\t\t\t\t\t_snake_case\t\t\t\t\t\t:\t\t\t\tstr\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tList[Any]\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tTuple=None\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tint=None\t\t\t\t\t)\t\t\t\t\t->Dict:\r\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t__snake_case :\t\t\t\t\t\tTuple = False\r\tfor key, mapped_key in MAPPING.items():\r\t\t__snake_case :\t\t\t\t\t\tint = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key\r\t\tif key in name or key.split('''w2v_model.'''\t\t\t\t\t)[-1] == name.split('''.'''\t\t\t\t\t)[0]:\r\t\t\t__snake_case :\t\t\t\t\t\tint = True\r\t\t\tif \"*\" in mapped_key:\r\t\t\t\t__snake_case :\t\t\t\t\t\tList[Any] = name.split(_snake_case\t\t\t\t\t)[0].split('''.'''\t\t\t\t\t)[-2]\r\t\t\t\t__snake_case :\t\t\t\t\t\tTuple = mapped_key.replace('''*'''\t\t\t\t\t, _snake_case\t\t\t\t\t)\r\t\t\tif \"weight_g\" in name:\r\t\t\t\t__snake_case :\t\t\t\t\t\tUnion[str, Any] = '''weight_g'''\r\t\t\telif \"weight_v\" in name:\r\t\t\t\t__snake_case :\t\t\t\t\t\tList[str] = '''weight_v'''\r\t\t\telif \"bias\" in name:\r\t\t\t\t__snake_case :\t\t\t\t\t\tAny = '''bias'''\r\t\t\telif \"weight\" in name:\r\t\t\t\t# TODO: don't match quantizer.weight_proj\r\t\t\t\t__snake_case :\t\t\t\t\t\tList[Any] = '''weight'''\r\t\t\telse:\r\t\t\t\t__snake_case :\t\t\t\t\t\tUnion[str, Any] = None\r\t\t\tif hf_dict is not None:\r\t\t\t\trename_dict(_snake_case\t\t\t\t\t, _snake_case\t\t\t\t\t, _snake_case\t\t\t\t\t, _snake_case\t\t\t\t\t, _snake_case\t\t\t\t\t)\r\t\t\telse:\r\t\t\t\tset_recursively(_snake_case\t\t\t\t\t, _snake_case\t\t\t\t\t, _snake_case\t\t\t\t\t, _snake_case\t\t\t\t\t, _snake_case\t\t\t\t\t)\r\t\t\treturn is_used\r\treturn is_used\r\r\r\r\r\r\rdef \t\tlowercase\t\t\t\t\t\t\t(\t\t\t\t\t_snake_case\t\t\t\t\t\t:\t\t\t\tstr\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tDict\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tList[str]\t\t\t\t\t)\t\t\t\t\t->Any:\r\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t__snake_case :\t\t\t\t\t\tUnion[str, Any] = []\r\t__snake_case :\t\t\t\t\t\tUnion[str, Any] = fairseq_model.state_dict()\r\r\t__snake_case :\t\t\t\t\t\tstr = hf_model.wavaveca.feature_extractor\r\r\tfor name, value in fairseq_dict.items():\r\t\t__snake_case :\t\t\t\t\t\tstr = False\r\t\tif \"conv_layers\" in name:\r\t\t\tload_conv_layer(\r\t\t\t _snake_case\t\t\t\t\t, _snake_case\t\t\t\t\t, _snake_case\t\t\t\t\t, _snake_case\t\t\t\t\t, hf_model.config.feat_extract_norm == '''group'''\t\t\t\t\t, )\r\t\t\t__snake_case :\t\t\t\t\t\tUnion[str, Any] = True\r\t\telse:\r\t\t\t__snake_case :\t\t\t\t\t\tOptional[Any] = load_wavaveca_layer(_snake_case\t\t\t\t\t, _snake_case\t\t\t\t\t, _snake_case\t\t\t\t\t)\r\t\tif not is_used:\r\t\t\tunused_weights.append(_snake_case\t\t\t\t\t)\r\r\tlogger.warning(f\"\"\"Unused weights: {unused_weights}\"\"\"\t\t\t\t\t)\r\r\r\r\r\r\rdef \t\tlowercase\t\t\t\t\t\t\t(\t\t\t\t\t_snake_case\t\t\t\t\t\t:\t\t\t\tAny\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tstr\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tAny\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tTuple\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tList[str]\t\t\t\t\t)\t\t\t\t\t->Optional[int]:\r\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t__snake_case :\t\t\t\t\t\tUnion[str, Any] = full_name.split('''conv_layers.'''\t\t\t\t\t)[-1]\r\t__snake_case :\t\t\t\t\t\tstr = name.split('''.'''\t\t\t\t\t)\r\t__snake_case :\t\t\t\t\t\tOptional[int] = int(items[0]\t\t\t\t\t)\r\t__snake_case :\t\t\t\t\t\tAny = int(items[1]\t\t\t\t\t)\r\r\tif type_id == 0:\r\t\tif \"bias\" in name:\r\t\t\tif value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:\r\t\t\t\traise ValueError(\r\t\t\t\t f\"\"\"{full_name} has size {value.shape}, but\"\"\"\r\t\t\t\t f\"\"\" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.\"\"\"\t\t\t\t\t)\r\t\t\t__snake_case :\t\t\t\t\t\tint = value\r\t\t\tlogger.info(f\"\"\"Feat extract conv layer {layer_id} was initialized from {full_name}.\"\"\"\t\t\t\t\t)\r\t\telif \"weight\" in name:\r\t\t\tif value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:\r\t\t\t\traise ValueError(\r\t\t\t\t f\"\"\"{full_name} has size {value.shape}, but\"\"\"\r\t\t\t\t f\"\"\" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.\"\"\"\t\t\t\t\t)\r\t\t\t__snake_case :\t\t\t\t\t\tAny = value\r\t\t\tlogger.info(f\"\"\"Feat extract conv layer {layer_id} was initialized from {full_name}.\"\"\"\t\t\t\t\t)\r\telif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):\r\t\tif \"bias\" in name:\r\t\t\tif value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:\r\t\t\t\traise ValueError(\r\t\t\t\t f\"\"\"{full_name} has size {value.shape}, but\"\"\"\r\t\t\t\t f\"\"\" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.\"\"\"\t\t\t\t\t)\r\t\t\t__snake_case :\t\t\t\t\t\tAny = value\r\t\t\tlogger.info(f\"\"\"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.\"\"\"\t\t\t\t\t)\r\t\telif \"weight\" in name:\r\t\t\tif value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:\r\t\t\t\traise ValueError(\r\t\t\t\t f\"\"\"{full_name} has size {value.shape}, but\"\"\"\r\t\t\t\t f\"\"\" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.\"\"\"\t\t\t\t\t)\r\t\t\t__snake_case :\t\t\t\t\t\tList[str] = value\r\t\t\tlogger.info(f\"\"\"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.\"\"\"\t\t\t\t\t)\r\telse:\r\t\tunused_weights.append(_snake_case\t\t\t\t\t)\r\r\r\r\r\r\r@torch.no_grad()\rdef \t\tlowercase\t\t\t\t\t\t\t(\t\t\t\t\t_snake_case\t\t\t\t\t\t:\t\t\t\tint\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tUnion[str, Any]\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tAny=None\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tstr=None\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tList[Any]=True\t\t\t\t\t, _snake_case\t\t\t\t\t\t:\t\t\t\tint=False\t\t\t\t\t)\t\t\t\t\t->Dict:\r\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\tif config_path is not None:\r\t\t__snake_case :\t\t\t\t\t\tOptional[Any] = WavaVecaConfig.from_pretrained(_snake_case\t\t\t\t\t)\r\telse:\r\t\t__snake_case :\t\t\t\t\t\tTuple = WavaVecaConfig()\r\r\tif is_seq_class:\r\t\t__snake_case :\t\t\t\t\t\tOptional[int] = read_txt_into_dict(_snake_case\t\t\t\t\t)\r\t\t__snake_case :\t\t\t\t\t\tList[Any] = idalabel\r\t\t__snake_case :\t\t\t\t\t\tint = WavaVecaForSequenceClassification(_snake_case\t\t\t\t\t)\r\t\t__snake_case :\t\t\t\t\t\tint = WavaVecaFeatureExtractor(\r\t\t feature_size=1\t\t\t\t\t, sampling_rate=16_000\t\t\t\t\t, padding_value=0\t\t\t\t\t, do_normalize=_snake_case\t\t\t\t\t, return_attention_mask=_snake_case\t\t\t\t\t, )\r\t\tfeature_extractor.save_pretrained(_snake_case\t\t\t\t\t)\r\r\telif is_finetuned:\r\t\tif dict_path:\r\t\t\t__snake_case :\t\t\t\t\t\tint = Dictionary.load(_snake_case\t\t\t\t\t)\r\r\t\t\t# important change bos & pad token id since CTC symbol is and\r\t\t\t# not as in fairseq\r\t\t\t__snake_case :\t\t\t\t\t\tTuple = target_dict.pad_index\r\t\t\t__snake_case :\t\t\t\t\t\tint = target_dict.bos_index\r\t\t\t__snake_case :\t\t\t\t\t\tTuple = target_dict.eos_index\r\t\t\t__snake_case :\t\t\t\t\t\tOptional[Any] = len(target_dict.symbols\t\t\t\t\t)\r\t\t\t__snake_case :\t\t\t\t\t\tAny = os.path.join(_snake_case\t\t\t\t\t, '''vocab.json'''\t\t\t\t\t)\r\t\t\tif not os.path.isdir(_snake_case\t\t\t\t\t):\r\t\t\t\tlogger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case\t\t\t\t\t)\t\t\t\t\t)\r\t\t\t\treturn\r\t\t\tos.makedirs(_snake_case\t\t\t\t\t, exist_ok=_snake_case\t\t\t\t\t)\r\t\t\t__snake_case :\t\t\t\t\t\tOptional[Any] = target_dict.indices\r\r\t\t\t# fairseq has the and switched\r\t\t\t__snake_case :\t\t\t\t\t\tDict = 0\r\t\t\t__snake_case :\t\t\t\t\t\tList[Any] = 1\r\t\t\twith open(_snake_case\t\t\t\t\t, '''w'''\t\t\t\t\t, encoding='''utf-8'''\t\t\t\t\t) as vocab_handle:\r\t\t\t\tjson.dump(_snake_case\t\t\t\t\t, _snake_case\t\t\t\t\t)\r\t\t\t__snake_case :\t\t\t\t\t\tList[Any] = WavaVecaCTCTokenizer(\r\t\t\t _snake_case\t\t\t\t\t, unk_token=target_dict.unk_word\t\t\t\t\t, pad_token=target_dict.pad_word\t\t\t\t\t, bos_token=target_dict.bos_word\t\t\t\t\t, eos_token=target_dict.eos_word\t\t\t\t\t, word_delimiter_token='''|'''\t\t\t\t\t, do_lower_case=_snake_case\t\t\t\t\t, )\r\t\t\t__snake_case :\t\t\t\t\t\tTuple = True if config.feat_extract_norm == '''layer''' else False\r\t\t\t__snake_case :\t\t\t\t\t\tstr = WavaVecaFeatureExtractor(\r\t\t\t feature_size=1\t\t\t\t\t, sampling_rate=16_000\t\t\t\t\t, padding_value=0\t\t\t\t\t, do_normalize=_snake_case\t\t\t\t\t, return_attention_mask=_snake_case\t\t\t\t\t, )\r\t\t\t__snake_case :\t\t\t\t\t\tTuple = WavaVecaProcessor(feature_extractor=_snake_case\t\t\t\t\t, tokenizer=_snake_case\t\t\t\t\t)\r\t\t\tprocessor.save_pretrained(_snake_case\t\t\t\t\t)\r\r\t\t__snake_case :\t\t\t\t\t\tOptional[int] = WavaVecaForCTC(_snake_case\t\t\t\t\t)\r\telse:\r\t\t__snake_case :\t\t\t\t\t\tTuple = WavaVecaForPreTraining(_snake_case\t\t\t\t\t)\r\r\tif is_finetuned or is_seq_class:\r\t\t__snake_case ,\t\t\t\t\t\t\t__snake_case ,\t\t\t\t\t\t\t__snake_case :\t\t\t\t\t\tList[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(\r\t\t [checkpoint_path]\t\t\t\t\t, arg_overrides={'''data''': '''/'''.join(dict_path.split('''/'''\t\t\t\t\t)[:-1]\t\t\t\t\t)}\t\t\t\t\t)\r\telse:\r\t\t__snake_case :\t\t\t\t\t\tDict = argparse.Namespace(task='''audio_pretraining'''\t\t\t\t\t)\r\t\t__snake_case :\t\t\t\t\t\tOptional[int] = fairseq.tasks.setup_task(_snake_case\t\t\t\t\t)\r\r\t\t__snake_case ,\t\t\t\t\t\t\t__snake_case ,\t\t\t\t\t\t\t__snake_case :\t\t\t\t\t\tList[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path]\t\t\t\t\t, task=_snake_case\t\t\t\t\t)\r\r\t__snake_case :\t\t\t\t\t\tint = model[0].eval()\r\r\trecursively_load_weights(_snake_case\t\t\t\t\t, _snake_case\t\t\t\t\t, not is_finetuned\t\t\t\t\t)\r\r\thf_wavavec.save_pretrained(_snake_case\t\t\t\t\t)\r\r\rif __name__ == \"__main__\":\r\t\t\tSCREAMING_SNAKE_CASE\t\t\t: Optional[Any] = argparse.ArgumentParser()\r\t\t\tparser.add_argument(\"\"\"--pytorch_dump_folder_path\"\"\", default=None, type=str, help=\"\"\"Path to the output PyTorch model.\"\"\")\r\t\t\tparser.add_argument(\"\"\"--checkpoint_path\"\"\", default=None, type=str, help=\"\"\"Path to fairseq checkpoint\"\"\")\r\t\t\tparser.add_argument(\"\"\"--dict_path\"\"\", default=None, type=str, help=\"\"\"Path to dict of fine-tuned model\"\"\")\r\t\t\tparser.add_argument(\"\"\"--config_path\"\"\", default=None, type=str, help=\"\"\"Path to hf config.json of model to convert\"\"\")\r\t\t\tparser.add_argument(\r\t\t\t \"\"\"--not_finetuned\"\"\", action=\"\"\"store_true\"\"\", help=\"\"\"Whether the model to convert is a fine-tuned model or not\"\"\"\r\t\t\t)\r\t\t\tparser.add_argument(\r\t\t\t \"\"\"--is_seq_class\"\"\",\r\t\t\t action=\"\"\"store_true\"\"\",\r\t\t\t help=\"\"\"Whether the model to convert is a fine-tuned sequence classification model or not\"\"\",\r\t\t\t)\r\t\t\tSCREAMING_SNAKE_CASE\t\t\t: Any = parser.parse_args()\r\r\t\t\tSCREAMING_SNAKE_CASE\t\t\t: Tuple = not args.not_finetuned and not args.is_seq_class\r\t\t\tconvert_wavaveca_checkpoint(\r\t\t\t args.checkpoint_path,\r\t\t\t args.pytorch_dump_folder_path,\r\t\t\t args.config_path,\r\t\t\t args.dict_path,\r\t\t\t is_finetuned,\r\t\t\t args.is_seq_class,\r\t\t\t)\r\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":24,"string":"24"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":766,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\r'''simple docstring'''\r\rimport os\rimport sys\rimport tempfile\r\rimport torch\r\rfrom .state import AcceleratorState\rfrom .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment\r\r\r\r\r\rdef lowerCamelCase\t\t\t\t\t\t( __lowerCamelCase : Optional[Any]\t\t\t\t\t, __lowerCamelCase : int=()\t\t\t\t\t, __lowerCamelCase : List[Any]=None\t\t\t\t\t, __lowerCamelCase : Optional[Any]=\"no\"\t\t\t\t\t, __lowerCamelCase : Optional[int]=\"29500\" )\t\t\t\t\t\t->List[str]:\r _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\t\t\tFalse\r _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\t\t\tFalse\r if any(key.startswith(\"\"\"KAGGLE\"\"\" ) for key in os.environ.keys() ):\r _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\t\t\tTrue\r elif \"IPython\" in sys.modules:\r _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\t\t\t\"google.colab\" in str(sys.modules[\"\"\"IPython\"\"\"].get_ipython() )\r\r try:\r _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\t\t\tPrecisionType(mixed_precision.lower() )\r except ValueError:\r raise ValueError(\r F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )\r\r if (in_colab or in_kaggle) and (os.environ.get(\"\"\"TPU_NAME\"\"\"\t\t\t\t\t, __lowerCamelCase ) is not None):\r # TPU launch\r import torch_xla.distributed.xla_multiprocessing as xmp\r\r if len(AcceleratorState._shared_state ) > 0:\r raise ValueError(\r \"\"\"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside \"\"\"\r \"\"\"your training function. Restart your notebook and make sure no cells initializes an \"\"\"\r \"\"\"`Accelerator`.\"\"\" )\r if num_processes is None:\r _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\t\t\t8\r\r _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\t\t\tPrepareForLaunch(__lowerCamelCase\t\t\t\t\t, distributed_type=\"\"\"TPU\"\"\" )\r print(F'Launching a training on {num_processes} TPU cores.' )\r xmp.spawn(__lowerCamelCase\t\t\t\t\t, args=__lowerCamelCase\t\t\t\t\t, nprocs=__lowerCamelCase\t\t\t\t\t, start_method=\"\"\"fork\"\"\" )\r elif in_colab:\r # No need for a distributed launch otherwise as it's either CPU or one GPU.\r if torch.cuda.is_available():\r print(\"\"\"Launching training on one GPU.\"\"\" )\r else:\r print(\"\"\"Launching training on one CPU.\"\"\" )\r function(*__lowerCamelCase )\r else:\r if num_processes is None:\r raise ValueError(\r \"\"\"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.\"\"\" )\r\r if num_processes > 1:\r # Multi-GPU launch\r from torch.multiprocessing import start_processes\r from torch.multiprocessing.spawn import ProcessRaisedException\r\r if len(AcceleratorState._shared_state ) > 0:\r raise ValueError(\r \"\"\"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized \"\"\"\r \"\"\"inside your training function. Restart your notebook and make sure no cells initializes an \"\"\"\r \"\"\"`Accelerator`.\"\"\" )\r\r if torch.cuda.is_initialized():\r raise ValueError(\r \"\"\"To launch a multi-GPU training from your notebook, you need to avoid running any instruction \"\"\"\r \"\"\"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA \"\"\"\r \"\"\"function.\"\"\" )\r\r # torch.distributed will expect a few environment variable to be here. We set the ones common to each\r # process here (the other ones will be set be the launcher).\r with patch_environment(\r world_size=__lowerCamelCase\t\t\t\t\t, master_addr=\"\"\"127.0.01\"\"\"\t\t\t\t\t, master_port=__lowerCamelCase\t\t\t\t\t, mixed_precision=__lowerCamelCase ):\r _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\t\t\tPrepareForLaunch(__lowerCamelCase\t\t\t\t\t, distributed_type=\"\"\"MULTI_GPU\"\"\" )\r print(F'Launching training on {num_processes} GPUs.' )\r try:\r start_processes(__lowerCamelCase\t\t\t\t\t, args=__lowerCamelCase\t\t\t\t\t, nprocs=__lowerCamelCase\t\t\t\t\t, start_method=\"\"\"fork\"\"\" )\r except ProcessRaisedException as e:\r if \"Cannot re-initialize CUDA in forked subprocess\" in e.args[0]:\r raise RuntimeError(\r \"\"\"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. \"\"\"\r \"\"\"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. \"\"\"\r \"\"\"Please review your imports and test them when running the `notebook_launcher()` to identify \"\"\"\r \"\"\"which one is problematic.\"\"\" ) from e\r\r else:\r # No need for a distributed launch otherwise as it's either CPU, GPU or MPS.\r if is_mps_available():\r _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\t\t\t\"1\"\r print(\"\"\"Launching training on MPS.\"\"\" )\r elif torch.cuda.is_available():\r print(\"\"\"Launching training on one GPU.\"\"\" )\r else:\r print(\"\"\"Launching training on CPU.\"\"\" )\r function(*__lowerCamelCase )\r\r\r\r\r\r\rdef lowerCamelCase\t\t\t\t\t\t( __lowerCamelCase : List[str]\t\t\t\t\t, __lowerCamelCase : Union[str, Any]=()\t\t\t\t\t, __lowerCamelCase : Any=2 )\t\t\t\t\t\t->Any:\r from torch.multiprocessing import start_processes\r\r with tempfile.NamedTemporaryFile() as tmp_file:\r # torch.distributed will expect a few environment variable to be here. We set the ones common to each\r # process here (the other ones will be set be the launcher).\r with patch_environment(\r world_size=__lowerCamelCase\t\t\t\t\t, master_addr=\"\"\"127.0.01\"\"\"\t\t\t\t\t, master_port=\"\"\"29500\"\"\"\t\t\t\t\t, accelerate_mixed_precision=\"\"\"no\"\"\"\t\t\t\t\t, accelerate_debug_rdv_file=tmp_file.name\t\t\t\t\t, accelerate_use_cpu=\"\"\"yes\"\"\"\t\t\t\t\t, ):\r _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\t\t\tPrepareForLaunch(__lowerCamelCase\t\t\t\t\t, debug=__lowerCamelCase )\r start_processes(__lowerCamelCase\t\t\t\t\t, args=__lowerCamelCase\t\t\t\t\t, nprocs=__lowerCamelCase\t\t\t\t\t, start_method=\"\"\"fork\"\"\" )\r\r\r\r"},"code_codestyle":{"kind":"number","value":58,"string":"58"},"style_context":{"kind":"string","value":"def _snake_case\t( lowerCAmelCase\t\t\t\t\t\t\t: list\t):\r\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r\t\tSCREAMING_SNAKE_CASE_ :\t\tDict\t\t\t\t = len(lowerCAmelCase\t)\r\t\tfor i in range(1 , lowerCAmelCase\t):\r\t\t\t\tSCREAMING_SNAKE_CASE_ :\t\tint\t\t\t\t = collection[i]\r\t\t\t\tSCREAMING_SNAKE_CASE_ :\t\tAny\t\t\t\t = 0\r\t\t\t\tSCREAMING_SNAKE_CASE_ :\t\tTuple\t\t\t\t = i - 1\r\r\t\t\t\twhile low <= high:\r\t\t\t\t\t\tSCREAMING_SNAKE_CASE_ :\t\tint\t\t\t\t = (low + high) // 2\r\t\t\t\t\t\tif val < collection[mid]:\r\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_ :\t\tOptional[Any]\t\t\t\t = mid - 1\r\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_ :\t\tTuple\t\t\t\t = mid + 1\r\t\t\t\tfor j in range(lowerCAmelCase , lowerCAmelCase , -1\t):\r\t\t\t\t\t\tSCREAMING_SNAKE_CASE_ :\t\tUnion[str, Any]\t\t\t\t = collection[j - 1]\r\t\t\t\tSCREAMING_SNAKE_CASE_ :\t\tint\t\t\t\t = val\r\t\treturn collection\r\r\rif __name__ == \"__main__\":\r\t\t__lowerCamelCase\t\t\t\t\t:\tDict = input('''Enter numbers separated by a comma:\\n''').strip()\r\t\t__lowerCamelCase\t\t\t\t\t:\tList[str] = [int(item) for item in user_input.split(''',''')]\r\t\tprint(binary_insertion_sort(unsorted))\r"},"style_context_codestyle":{"kind":"number","value":18,"string":"18"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":767,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\nimport argparse\r\nimport re\r\n\r\nimport numpy as np\r\nimport requests\r\nimport torch\r\nfrom huggingface_hub import hf_hub_download\r\nfrom PIL import Image\r\n\r\nfrom transformers import (\r\n SamConfig,\r\n SamImageProcessor,\r\n SamModel,\r\n SamProcessor,\r\n SamVisionConfig,\r\n)\r\n\r\n\r\nUpperCamelCase__ : List[str]\t = {\r\n 'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',\r\n 'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',\r\n 'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',\r\n 'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',\r\n 'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',\r\n 'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',\r\n 'mask_downscaling.0': 'mask_embed.conv1',\r\n 'mask_downscaling.1': 'mask_embed.layer_norm1',\r\n 'mask_downscaling.3': 'mask_embed.conv2',\r\n 'mask_downscaling.4': 'mask_embed.layer_norm2',\r\n 'mask_downscaling.6': 'mask_embed.conv3',\r\n 'point_embeddings': 'point_embed',\r\n 'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',\r\n 'image_encoder': 'vision_encoder',\r\n 'neck.0': 'neck.conv1',\r\n 'neck.1': 'neck.layer_norm1',\r\n 'neck.2': 'neck.conv2',\r\n 'neck.3': 'neck.layer_norm2',\r\n 'patch_embed.proj': 'patch_embed.projection',\r\n '.norm': '.layer_norm',\r\n 'blocks': 'layers',\r\n}\r\n\r\n\r\n\r\ndef UpperCAmelCase ( a_\t\t\t\t\t\t) -> Dict:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n A_ :\t\t\t\t\t\tOptional[int]\t\t=\t\t\t{}\r\n state_dict.pop(\"\"\"pixel_mean\"\"\" ,\t\t\t\t\t\ta_\t\t\t\t\t\t)\r\n state_dict.pop(\"\"\"pixel_std\"\"\" ,\t\t\t\t\t\ta_\t\t\t\t\t\t)\r\n\r\n A_ :\t\t\t\t\t\tList[Any]\t\t=\t\t\tR\"\"\".*.output_hypernetworks_mlps.(\\d+).layers.(\\d+).*\"\"\"\r\n\r\n for key, value in state_dict.items():\r\n for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():\r\n if key_to_modify in key:\r\n A_ :\t\t\t\t\t\tTuple\t\t=\t\t\tkey.replace(a_ ,\t\t\t\t\t\ta_\t\t\t\t\t\t)\r\n\r\n if re.match(a_ ,\t\t\t\t\t\ta_\t\t\t\t\t\t):\r\n A_ :\t\t\t\t\t\tDict\t\t=\t\t\tint(re.match(a_ ,\t\t\t\t\t\ta_\t\t\t\t\t\t).group(2\t\t\t\t\t\t)\t\t\t\t\t\t)\r\n if layer_nb == 0:\r\n A_ :\t\t\t\t\t\tAny\t\t=\t\t\tkey.replace(\"\"\"layers.0\"\"\" ,\t\t\t\t\t\t\"\"\"proj_in\"\"\"\t\t\t\t\t\t)\r\n elif layer_nb == 1:\r\n A_ :\t\t\t\t\t\tList[str]\t\t=\t\t\tkey.replace(\"\"\"layers.1\"\"\" ,\t\t\t\t\t\t\"\"\"layers.0\"\"\"\t\t\t\t\t\t)\r\n elif layer_nb == 2:\r\n A_ :\t\t\t\t\t\tTuple\t\t=\t\t\tkey.replace(\"\"\"layers.2\"\"\" ,\t\t\t\t\t\t\"\"\"proj_out\"\"\"\t\t\t\t\t\t)\r\n\r\n A_ :\t\t\t\t\t\tList[Any]\t\t=\t\t\tvalue\r\n\r\n A_ :\t\t\t\t\t\tTuple\t\t=\t\t\tmodel_state_dict[\r\n \"\"\"prompt_encoder.shared_embedding.positional_embedding\"\"\"\r\n ]\r\n\r\n return model_state_dict\r\n\r\n\r\n\r\ndef UpperCAmelCase ( a_ ,\t\t\t\t\t\ta_ ,\t\t\t\t\t\ta_ ,\t\t\t\t\t\ta_=\"ybelkada/segment-anything\"\t\t\t\t\t\t) -> Tuple:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n A_ :\t\t\t\t\t\tOptional[int]\t\t=\t\t\thf_hub_download(a_ ,\t\t\t\t\t\tF\"checkpoints/{model_name}.pth\"\t\t\t\t\t\t)\r\n\r\n if \"sam_vit_b\" in model_name:\r\n A_ :\t\t\t\t\t\tAny\t\t=\t\t\tSamConfig()\r\n elif \"sam_vit_l\" in model_name:\r\n A_ :\t\t\t\t\t\tOptional[Any]\t\t=\t\t\tSamVisionConfig(\r\n hidden_size=1_0_2_4 ,\t\t\t\t\t\tnum_hidden_layers=2_4 ,\t\t\t\t\t\tnum_attention_heads=1_6 ,\t\t\t\t\t\tglobal_attn_indexes=[5, 1_1, 1_7, 2_3] ,\t\t\t\t\t\t)\r\n\r\n A_ :\t\t\t\t\t\tint\t\t=\t\t\tSamConfig(\r\n vision_config=a_ ,\t\t\t\t\t\t)\r\n elif \"sam_vit_h\" in model_name:\r\n A_ :\t\t\t\t\t\tList[str]\t\t=\t\t\tSamVisionConfig(\r\n hidden_size=1_2_8_0 ,\t\t\t\t\t\tnum_hidden_layers=3_2 ,\t\t\t\t\t\tnum_attention_heads=1_6 ,\t\t\t\t\t\tglobal_attn_indexes=[7, 1_5, 2_3, 3_1] ,\t\t\t\t\t\t)\r\n\r\n A_ :\t\t\t\t\t\tAny\t\t=\t\t\tSamConfig(\r\n vision_config=a_ ,\t\t\t\t\t\t)\r\n\r\n A_ :\t\t\t\t\t\tAny\t\t=\t\t\ttorch.load(a_ ,\t\t\t\t\t\tmap_location=\"\"\"cpu\"\"\"\t\t\t\t\t\t)\r\n A_ :\t\t\t\t\t\tint\t\t=\t\t\treplace_keys(a_\t\t\t\t\t\t)\r\n\r\n A_ :\t\t\t\t\t\tOptional[int]\t\t=\t\t\tSamImageProcessor()\r\n\r\n A_ :\t\t\t\t\t\tList[Any]\t\t=\t\t\tSamProcessor(image_processor=a_\t\t\t\t\t\t)\r\n A_ :\t\t\t\t\t\tOptional[Any]\t\t=\t\t\tSamModel(a_\t\t\t\t\t\t)\r\n\r\n hf_model.load_state_dict(a_\t\t\t\t\t\t)\r\n A_ :\t\t\t\t\t\tList[Any]\t\t=\t\t\thf_model.to(\"\"\"cuda\"\"\"\t\t\t\t\t\t)\r\n\r\n A_ :\t\t\t\t\t\tstr\t\t=\t\t\t\"\"\"https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png\"\"\"\r\n A_ :\t\t\t\t\t\tUnion[str, Any]\t\t=\t\t\tImage.open(requests.get(a_ ,\t\t\t\t\t\tstream=a_\t\t\t\t\t\t).raw\t\t\t\t\t\t).convert(\"\"\"RGB\"\"\"\t\t\t\t\t\t)\r\n\r\n A_ :\t\t\t\t\t\tint\t\t=\t\t\t[[[4_0_0, 6_5_0]]]\r\n A_ :\t\t\t\t\t\tAny\t\t=\t\t\t[[1]]\r\n\r\n A_ :\t\t\t\t\t\tint\t\t=\t\t\tprocessor(images=np.array(a_\t\t\t\t\t\t) ,\t\t\t\t\t\treturn_tensors=\"\"\"pt\"\"\"\t\t\t\t\t\t).to(\"\"\"cuda\"\"\"\t\t\t\t\t\t)\r\n\r\n with torch.no_grad():\r\n A_ :\t\t\t\t\t\tDict\t\t=\t\t\thf_model(**a_\t\t\t\t\t\t)\r\n A_ :\t\t\t\t\t\tint\t\t=\t\t\toutput.iou_scores.squeeze()\r\n\r\n if model_name == \"sam_vit_h_4b8939\":\r\n assert scores[-1].item() == 0.579890251159668\r\n\r\n A_ :\t\t\t\t\t\tUnion[str, Any]\t\t=\t\t\tprocessor(\r\n images=np.array(a_\t\t\t\t\t\t) ,\t\t\t\t\t\tinput_points=a_ ,\t\t\t\t\t\tinput_labels=a_ ,\t\t\t\t\t\treturn_tensors=\"\"\"pt\"\"\"\t\t\t\t\t\t).to(\"\"\"cuda\"\"\"\t\t\t\t\t\t)\r\n\r\n with torch.no_grad():\r\n A_ :\t\t\t\t\t\tOptional[int]\t\t=\t\t\thf_model(**a_\t\t\t\t\t\t)\r\n A_ :\t\t\t\t\t\tAny\t\t=\t\t\toutput.iou_scores.squeeze()\r\n\r\n assert scores[-1].item() == 0.9712603092193604\r\n\r\n A_ :\t\t\t\t\t\tList[str]\t\t=\t\t\t((7_5, 2_7_5, 1_7_2_5, 8_5_0),)\r\n\r\n A_ :\t\t\t\t\t\tList[Any]\t\t=\t\t\tprocessor(images=np.array(a_\t\t\t\t\t\t) ,\t\t\t\t\t\tinput_boxes=a_ ,\t\t\t\t\t\treturn_tensors=\"\"\"pt\"\"\"\t\t\t\t\t\t).to(\"\"\"cuda\"\"\"\t\t\t\t\t\t)\r\n\r\n with torch.no_grad():\r\n A_ :\t\t\t\t\t\tTuple\t\t=\t\t\thf_model(**a_\t\t\t\t\t\t)\r\n A_ :\t\t\t\t\t\tTuple\t\t=\t\t\toutput.iou_scores.squeeze()\r\n\r\n assert scores[-1].item() == 0.8686015605926514\r\n\r\n # Test with 2 points and 1 image.\r\n A_ :\t\t\t\t\t\tstr\t\t=\t\t\t[[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]]\r\n A_ :\t\t\t\t\t\tDict\t\t=\t\t\t[[1, 1]]\r\n\r\n A_ :\t\t\t\t\t\tint\t\t=\t\t\tprocessor(\r\n images=np.array(a_\t\t\t\t\t\t) ,\t\t\t\t\t\tinput_points=a_ ,\t\t\t\t\t\tinput_labels=a_ ,\t\t\t\t\t\treturn_tensors=\"\"\"pt\"\"\"\t\t\t\t\t\t).to(\"\"\"cuda\"\"\"\t\t\t\t\t\t)\r\n\r\n with torch.no_grad():\r\n A_ :\t\t\t\t\t\tList[Any]\t\t=\t\t\thf_model(**a_\t\t\t\t\t\t)\r\n A_ :\t\t\t\t\t\tDict\t\t=\t\t\toutput.iou_scores.squeeze()\r\n\r\n assert scores[-1].item() == 0.9936047792434692\r\n\r\n\r\nif __name__ == \"__main__\":\r\n UpperCamelCase__ : Union[str, Any]\t = argparse.ArgumentParser()\r\n UpperCamelCase__ : Union[str, Any]\t = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']\r\n parser.add_argument(\r\n '--model_name',\r\n default='sam_vit_h_4b8939',\r\n choices=choices,\r\n type=str,\r\n help='Path to hf config.json of model to convert',\r\n )\r\n parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')\r\n parser.add_argument(\r\n '--push_to_hub',\r\n action="https://huggingface.co/datasets/infinityofspace/python_codestyles-mixed1-500/viewer/default/store_true",\r\n help='Whether to push the model and processor to the hub after converting',\r\n )\r\n parser.add_argument(\r\n '--model_hub_id',\r\n default='ybelkada/segment-anything',\r\n choices=choices,\r\n type=str,\r\n help='Path to hf config.json of model to convert',\r\n )\r\n\r\n UpperCamelCase__ : Optional[int]\t = parser.parse_args()\r\n\r\n convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)\r\n"},"code_codestyle":{"kind":"number","value":365,"string":"365"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:\r\n# hack it in for now:\r\nimport sys\r\nfrom pathlib import Path\r\n\r\n\r\nUpperCamelCase__ : Optional[Any]\t = Path(__file__).resolve().parents[3] / 'src'\r\nsys.path.insert(1, str(git_repo_path))\r\n\r\nimport dataclasses # noqa\r\nimport io # noqa\r\nimport itertools # noqa\r\nimport json # noqa\r\nimport os # noqa\r\nimport unittest # noqa\r\nfrom copy import deepcopy # noqa\r\n\r\nfrom parameterized import parameterized # noqa\r\nfrom transformers import TrainingArguments, is_torch_available # noqa\r\nfrom transformers.deepspeed import is_deepspeed_available # noqa\r\nfrom transformers.file_utils import WEIGHTS_NAME # noqa\r\nfrom transformers.testing_utils import ( # noqa\r\n CaptureLogger,\r\n ExtendSysPath,\r\n TestCasePlus,\r\n execute_subprocess_async,\r\n get_gpu_count,\r\n mockenv_context,\r\n require_deepspeed,\r\n require_torch_gpu,\r\n require_torch_multi_gpu,\r\n slow,\r\n)\r\nfrom transformers.trainer_utils import set_seed # noqa\r\n\r\n\r\nset_seed(42)\r\n\r\nUpperCamelCase__ : Tuple\t = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}\r\n\r\nUpperCamelCase__ : Optional[Any]\t = 'zero2'\r\nUpperCamelCase__ : Optional[int]\t = 'zero3'\r\nUpperCamelCase__ : Dict\t = [ZEROa, ZEROa]\r\n\r\n\r\n\r\ndef UpperCAmelCase ( a_ ,\t\t\t\t\t\ta_ ,\t\t\t\t\t\ta_\t\t\t\t\t\t) -> int:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n A_ :\t\t\t\t\t\tint\t\t=\t\t\tparameterized.to_safe_name(\"\"\"_\"\"\".join(str(a_\t\t\t\t\t\t) for x in param.args\t\t\t\t\t\t)\t\t\t\t\t\t)\r\n return F\"{func.__name__}_{param_based_name}\"\r\n\r\n\r\n# Cartesian-product of zero stages with models to test\r\nUpperCamelCase__ : Tuple\t = list(itertools.product(stages, models.keys()))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n@slow\r\n@require_deepspeed\r\n@require_torch_gpu\r\nclass _lowerCAmelCase\t\t\t\t\t( __A ):\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n @parameterized.expand(_lowerCamelCase\t\t\t, name_func=_lowerCamelCase )\r\n def UpperCAmelCase_ ( self\t\t\t, _lowerCamelCase\t\t\t, _lowerCamelCase ) -> Tuple:\r\n self.run_and_check(\r\n stage=_lowerCamelCase\t\t\t, model=_lowerCamelCase\t\t\t, distributed=_lowerCamelCase\t\t\t, fpaa=_lowerCamelCase\t\t\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n @require_torch_multi_gpu\r\n @parameterized.expand(_lowerCamelCase\t\t\t, name_func=_lowerCamelCase )\r\n def UpperCAmelCase_ ( self\t\t\t, _lowerCamelCase\t\t\t, _lowerCamelCase ) -> Optional[int]:\r\n self.run_and_check(\r\n stage=_lowerCamelCase\t\t\t, model=_lowerCamelCase\t\t\t, distributed=_lowerCamelCase\t\t\t, fpaa=_lowerCamelCase\t\t\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n @parameterized.expand(_lowerCamelCase\t\t\t, name_func=_lowerCamelCase )\r\n def UpperCAmelCase_ ( self\t\t\t, _lowerCamelCase\t\t\t, _lowerCamelCase ) -> Dict:\r\n self.run_and_check(\r\n stage=_lowerCamelCase\t\t\t, model=_lowerCamelCase\t\t\t, distributed=_lowerCamelCase\t\t\t, fpaa=_lowerCamelCase\t\t\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n @require_torch_multi_gpu\r\n @parameterized.expand(_lowerCamelCase\t\t\t, name_func=_lowerCamelCase )\r\n def UpperCAmelCase_ ( self\t\t\t, _lowerCamelCase\t\t\t, _lowerCamelCase ) -> int:\r\n self.run_and_check(\r\n stage=_lowerCamelCase\t\t\t, model=_lowerCamelCase\t\t\t, distributed=_lowerCamelCase\t\t\t, fpaa=_lowerCamelCase\t\t\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase_ ( self\t\t\t, _lowerCamelCase ) -> Optional[Any]:\r\n # XXX: run_asr is premature and doesn't save any results\r\n # so all we check for now is that the process didn't fail\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase_ ( self\t\t\t, _lowerCamelCase\t\t\t, _lowerCamelCase\t\t\t, _lowerCamelCase = 10\t\t\t, _lowerCamelCase = True\t\t\t, _lowerCamelCase = True\t\t\t, _lowerCamelCase = True\t\t\t, ) -> List[str]:\r\n A_ :\t\t\t\t\t\tUnion[str, Any]\t\t=\t\t\tmodels[model]\r\n\r\n A_ :\t\t\t\t\t\tTuple\t\t=\t\t\tself.run_trainer(\r\n stage=_lowerCamelCase\t\t\t, model_name=_lowerCamelCase\t\t\t, eval_steps=_lowerCamelCase\t\t\t, num_train_epochs=1\t\t\t, distributed=_lowerCamelCase\t\t\t, fpaa=_lowerCamelCase\t\t\t, )\r\n\r\n self.do_checks(_lowerCamelCase )\r\n\r\n return output_dir\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase_ ( self\t\t\t, _lowerCamelCase\t\t\t, _lowerCamelCase\t\t\t, _lowerCamelCase = 10\t\t\t, _lowerCamelCase = 1\t\t\t, _lowerCamelCase = True\t\t\t, _lowerCamelCase = True\t\t\t, ) -> Any:\r\n A_ :\t\t\t\t\t\tDict\t\t=\t\t\tself.get_auto_remove_tmp_dir(\"\"\"./xxx\"\"\"\t\t\t, after=_lowerCamelCase )\r\n A_ :\t\t\t\t\t\tstr\t\t=\t\t\tF\"\\n --model_name_or_path {model_name}\\n --dataset_name hf-internal-testing/librispeech_asr_dummy\\n --dataset_config_name clean\\n --train_split_name validation\\n --validation_split_name validation\\n --output_dir {output_dir}\\n --num_train_epochs {str(_lowerCamelCase )}\\n --per_device_train_batch_size 2\\n --per_device_eval_batch_size 2\\n --evaluation_strategy steps\\n --learning_rate 5e-4\\n --warmup_steps 8\\n --orthography timit\\n --preprocessing_num_workers 1\\n --group_by_length\\n --freeze_feature_extractor\\n --report_to none\\n --save_steps 0\\n --eval_steps {eval_steps}\\n --report_to none\\n \".split()\r\n\r\n if fpaa:\r\n args.extend([\"\"\"--fp16\"\"\"] )\r\n\r\n # currently ds_config_wav2vec2_zero.json requires \"zero_optimization.find_unused_parameters\": true,\r\n # hence the separate config files\r\n A_ :\t\t\t\t\t\tList[str]\t\t=\t\t\tF\"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json\".split()\r\n A_ :\t\t\t\t\t\tUnion[str, Any]\t\t=\t\t\t[F\"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py\"]\r\n A_ :\t\t\t\t\t\tTuple\t\t=\t\t\tself.get_launcher(_lowerCamelCase )\r\n\r\n A_ :\t\t\t\t\t\tOptional[int]\t\t=\t\t\tlauncher + script + args + ds_args\r\n # keep for quick debug\r\n # print(\" \".join([f\"\\nPYTHONPATH={self.src_dir_str}\"] +cmd)); die\r\n execute_subprocess_async(_lowerCamelCase\t\t\t, env=self.get_env() )\r\n\r\n return output_dir\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase_ ( self\t\t\t, _lowerCamelCase=False ) -> Any:\r\n # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup\r\n # - it won't be able to handle that\r\n # 2. for now testing with just 2 gpus max (since some quality tests may give different\r\n # results with mode gpus because we use very little data)\r\n A_ :\t\t\t\t\t\tint\t\t=\t\t\tmin(2\t\t\t, get_gpu_count() ) if distributed else 1\r\n return F\"deepspeed --num_nodes 1 --num_gpus {num_gpus}\".split()\r\n"},"style_context_codestyle":{"kind":"number","value":164,"string":"164"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":768,"cells":{"code":{"kind":"string","value":"\"\"\"simple docstring\"\"\"\r\nfrom __future__ import annotations\r\n\r\nimport time\r\n\r\nimport numpy as np\r\n\r\nSCREAMING_SNAKE_CASE__\t\t\t\t\t = [8, 5, 9, 7]\r\nSCREAMING_SNAKE_CASE__\t\t\t\t\t = [\r\n [2, 0, 1, 1],\r\n [0, 1, 2, 1],\r\n [4, 0, 0, 3],\r\n [0, 2, 1, 0],\r\n [1, 0, 3, 0],\r\n]\r\nSCREAMING_SNAKE_CASE__\t\t\t\t\t = [\r\n [3, 2, 1, 4],\r\n [0, 2, 5, 2],\r\n [5, 1, 0, 5],\r\n [1, 5, 3, 0],\r\n [3, 0, 3, 3],\r\n]\r\n\r\n\r\n\r\nclass lowerCAmelCase_\t\t\t:\r\n\r\n\r\n\r\n\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\tdef __init__( self\t\t\t\t\t\t,\t\t\tlowerCAmelCase\t\t\t\t\t\t,\t\t\tlowerCAmelCase\t\t\t\t\t\t,\t\t\tlowerCAmelCase\t\t\t\t\t\t,\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case\t =\tclaim_vector\r\n\t\t\t\t\t\t\t\t\t\tsnake_case\t =\tallocated_resources_table\r\n\t\t\t\t\t\t\t\t\t\tsnake_case\t =\tmaximum_claim_table\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case ( self\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\treturn [\r\n\t\t\t\t\t\t\t\t\t\t sum(p_item[i] for p_item in self.__allocated_resources_table\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t for i in range(len(self.__allocated_resources_table[0]\t\t\t)\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t]\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case ( self\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\treturn np.array(self.__claim_vector\t\t\t) - np.array(\r\n\t\t\t\t\t\t\t\t\t\t self.__processes_resource_summation()\t\t\t)\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case ( self\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\treturn [\r\n\t\t\t\t\t\t\t\t\t\t list(np.array(self.__maximum_claim_table[i]\t\t\t) - np.array(__a\t\t\t)\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t for i, allocated_resource in enumerate(self.__allocated_resources_table\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t]\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case ( self\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\treturn {self.__need().index(__a\t\t\t): i for i in self.__need()}\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case ( self\t\t\t\t\t\t,\t\t\t**lowerCAmelCase\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tsnake_case\t =\tself.__need()\r\n\t\t\t\t\t\t\t\t\t\tsnake_case\t =\tself.__allocated_resources_table\r\n\t\t\t\t\t\t\t\t\t\tsnake_case\t =\tself.__available_resources()\r\n\t\t\t\t\t\t\t\t\t\tsnake_case\t =\tself.__need_index_manager()\r\n\t\t\t\t\t\t\t\t\t\tfor kw, val in kwargs.items():\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif kw and val is True:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.__pretty_data()\r\n\t\t\t\t\t\t\t\t\t\tprint('_' * 50 + '\\n'\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\twhile need_list:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case\t =\tFalse\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor each_need in need_list:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case\t =\tTrue\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor index, need in enumerate(__a\t\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif need > available_resources[index]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case\t =\tFalse\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif execution:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case\t =\tTrue\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# get the original index of the process from ind_ctrl db\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor original_need_index, need_clone in need_index_manager.items():\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif each_need == need_clone:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case\t =\toriginal_need_index\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint(F\"\"\"Process {process_number + 1} is executing.\"\"\"\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# remove the process run from stack\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tneed_list.remove(__a\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# update available/freed resources stack\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case\t =\tnp.array(__a\t\t\t) + np.array(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t alloc_resources_table[process_number]\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'Updated available resource stack for processes: '\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t + ' '.join([str(__a\t\t\t) for x in available_resources]\t\t\t)\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif safe:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint('The process is in a safe state.\\n'\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint('System in unsafe state. Aborting...\\n'\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\n\r\n\t\t\t\tdef \t\t\t\t\t\tsnake_case ( self\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\tprint(' ' * 9 + 'Allocated Resource Table'\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\tfor item in self.__allocated_resources_table:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t F\"\"\"P{self.__allocated_resources_table.index(__a\t\t\t) + 1}\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t + ' '.join(F\"\"\"{it:>8}\"\"\" for it in item\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t + '\\n'\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\tprint(' ' * 9 + 'System Resource Table'\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\tfor item in self.__maximum_claim_table:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t F\"\"\"P{self.__maximum_claim_table.index(__a\t\t\t) + 1}\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t + ' '.join(F\"\"\"{it:>8}\"\"\" for it in item\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t + '\\n'\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\tprint(\r\n\t\t\t\t\t\t\t\t\t\t 'Current Usage by Active Processes: '\r\n\t\t\t\t\t\t\t\t\t\t + ' '.join(str(__a\t\t\t) for x in self.__claim_vector\t\t\t)\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\tprint(\r\n\t\t\t\t\t\t\t\t\t\t 'Initial Available Resources: '\r\n\t\t\t\t\t\t\t\t\t\t + ' '.join(str(__a\t\t\t) for x in self.__available_resources()\t\t\t)\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\ttime.sleep(1\t\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t\timport doctest\r\n\r\n\t\t\t\t\tdoctest.testmod()\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":150,"string":"150"},"style_context":{"kind":"string","value":"\n\n\n'''simple docstring'''\n\n\n\nfrom __future__ import annotations\n\nimport copy\nimport inspect\nimport unittest\n\nimport numpy as np\n\nfrom transformers import is_tf_available, is_vision_available\nfrom transformers.models.auto import get_values\nfrom transformers.testing_utils import require_tf, slow\nfrom transformers.utils import cached_property\n\nfrom ...test_configuration_common import ConfigTester\nfrom ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask\nfrom ...test_pipeline_mixin import PipelineTesterMixin\n\n\nif is_tf_available():\n\timport tensorflow as tf\n\n\tfrom transformers import (\n\t TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,\n\t TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,\n\t TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,\n\t TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,\n\t TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,\n\t LayoutLMvaConfig,\n\t TFLayoutLMvaForQuestionAnswering,\n\t TFLayoutLMvaForSequenceClassification,\n\t TFLayoutLMvaForTokenClassification,\n\t TFLayoutLMvaModel,\n\t)\n\nif is_vision_available():\n\tfrom PIL import Image\n\n\tfrom transformers import LayoutLMvaImageProcessor\n\n\n\n\n\nclass \t\t\t\t\t\t\t__UpperCamelCase\t\t\t\t:\n\n\n\n\n\tdef __init__( self\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a=2\t\t\t\t\t\t,\t\t\t\t__a=3\t\t\t\t\t\t,\t\t\t\t__a=4\t\t\t\t\t\t,\t\t\t\t__a=2\t\t\t\t\t\t,\t\t\t\t__a=7\t\t\t\t\t\t,\t\t\t\t__a=True\t\t\t\t\t\t,\t\t\t\t__a=True\t\t\t\t\t\t,\t\t\t\t__a=True\t\t\t\t\t\t,\t\t\t\t__a=True\t\t\t\t\t\t,\t\t\t\t__a=99\t\t\t\t\t\t,\t\t\t\t__a=36\t\t\t\t\t\t,\t\t\t\t__a=2\t\t\t\t\t\t,\t\t\t\t__a=4\t\t\t\t\t\t,\t\t\t\t__a=37\t\t\t\t\t\t,\t\t\t\t__a=\"gelu\"\t\t\t\t\t\t,\t\t\t\t__a=0.1\t\t\t\t\t\t,\t\t\t\t__a=0.1\t\t\t\t\t\t,\t\t\t\t__a=512\t\t\t\t\t\t,\t\t\t\t__a=16\t\t\t\t\t\t,\t\t\t\t__a=2\t\t\t\t\t\t,\t\t\t\t__a=0.02\t\t\t\t\t\t,\t\t\t\t__a=6\t\t\t\t\t\t,\t\t\t\t__a=6\t\t\t\t\t\t,\t\t\t\t__a=3\t\t\t\t\t\t,\t\t\t\t__a=4\t\t\t\t\t\t,\t\t\t\t__a=None\t\t\t\t\t\t,\t\t\t\t__a=1000\t\t\t\t\t\t,\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a\t\t\t: Optional[Any]\t\t\t\t\t = parent\n\t\t\t\t\t\t__a\t\t\t: int\t\t\t\t\t = batch_size\n\t\t\t\t\t\t__a\t\t\t: Any\t\t\t\t\t = num_channels\n\t\t\t\t\t\t__a\t\t\t: Optional[int]\t\t\t\t\t = image_size\n\t\t\t\t\t\t__a\t\t\t: Dict\t\t\t\t\t = patch_size\n\t\t\t\t\t\t__a\t\t\t: int\t\t\t\t\t = is_training\n\t\t\t\t\t\t__a\t\t\t: Union[str, Any]\t\t\t\t\t = use_input_mask\n\t\t\t\t\t\t__a\t\t\t: Optional[int]\t\t\t\t\t = use_token_type_ids\n\t\t\t\t\t\t__a\t\t\t: Dict\t\t\t\t\t = use_labels\n\t\t\t\t\t\t__a\t\t\t: str\t\t\t\t\t = vocab_size\n\t\t\t\t\t\t__a\t\t\t: List[Any]\t\t\t\t\t = hidden_size\n\t\t\t\t\t\t__a\t\t\t: Union[str, Any]\t\t\t\t\t = num_hidden_layers\n\t\t\t\t\t\t__a\t\t\t: str\t\t\t\t\t = num_attention_heads\n\t\t\t\t\t\t__a\t\t\t: Union[str, Any]\t\t\t\t\t = intermediate_size\n\t\t\t\t\t\t__a\t\t\t: Any\t\t\t\t\t = hidden_act\n\t\t\t\t\t\t__a\t\t\t: List[str]\t\t\t\t\t = hidden_dropout_prob\n\t\t\t\t\t\t__a\t\t\t: List[str]\t\t\t\t\t = attention_probs_dropout_prob\n\t\t\t\t\t\t__a\t\t\t: List[Any]\t\t\t\t\t = max_position_embeddings\n\t\t\t\t\t\t__a\t\t\t: Tuple\t\t\t\t\t = type_vocab_size\n\t\t\t\t\t\t__a\t\t\t: Any\t\t\t\t\t = type_sequence_label_size\n\t\t\t\t\t\t__a\t\t\t: Optional[int]\t\t\t\t\t = initializer_range\n\t\t\t\t\t\t__a\t\t\t: Any\t\t\t\t\t = coordinate_size\n\t\t\t\t\t\t__a\t\t\t: List[Any]\t\t\t\t\t = shape_size\n\t\t\t\t\t\t__a\t\t\t: Optional[int]\t\t\t\t\t = num_labels\n\t\t\t\t\t\t__a\t\t\t: Dict\t\t\t\t\t = num_choices\n\t\t\t\t\t\t__a\t\t\t: Union[str, Any]\t\t\t\t\t = scope\n\t\t\t\t\t\t__a\t\t\t: Union[str, Any]\t\t\t\t\t = range_bbox\n\n\t\t\t\t\t\t# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)\n\t\t\t\t\t\t__a\t\t\t: Optional[int]\t\t\t\t\t = text_seq_length\n\t\t\t\t\t\t__a\t\t\t: Any\t\t\t\t\t = (image_size // patch_size) ** 2 + 1\n\t\t\t\t\t\t__a\t\t\t: Dict\t\t\t\t\t = self.text_seq_length + self.image_seq_length\n\n\n\n\n\tdef __UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a\t\t\t: str\t\t\t\t\t = ids_tensor([self.batch_size, self.text_seq_length]\t\t\t\t\t\t,\t\t\t\tself.vocab_size\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t__a\t\t\t: Tuple\t\t\t\t\t = ids_tensor([self.batch_size, self.text_seq_length, 4]\t\t\t\t\t\t,\t\t\t\tself.range_bbox\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a\t\t\t: Any\t\t\t\t\t = bbox.numpy()\n\t\t\t\t\t\t# Ensure that bbox is legal\n\t\t\t\t\t\tfor i in range(bbox.shape[0]\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\tfor j in range(bbox.shape[1]\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif bbox[i, j, 3] < bbox[i, j, 1]:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: List[Any]\t\t\t\t\t = bbox[i, j, 3]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Tuple\t\t\t\t\t = bbox[i, j, 1]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: str\t\t\t\t\t = tmp_coordinate\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif bbox[i, j, 2] < bbox[i, j, 0]:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: int\t\t\t\t\t = bbox[i, j, 2]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Dict\t\t\t\t\t = bbox[i, j, 0]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: int\t\t\t\t\t = tmp_coordinate\n\t\t\t\t\t\t__a\t\t\t: Optional[int]\t\t\t\t\t = tf.constant(__a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t__a\t\t\t: Dict\t\t\t\t\t = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t__a\t\t\t: str\t\t\t\t\t = None\n\t\t\t\t\t\tif self.use_input_mask:\n\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Optional[Any]\t\t\t\t\t = random_attention_mask([self.batch_size, self.text_seq_length]\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t__a\t\t\t: str\t\t\t\t\t = None\n\t\t\t\t\t\tif self.use_token_type_ids:\n\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: List[Any]\t\t\t\t\t = ids_tensor([self.batch_size, self.text_seq_length]\t\t\t\t\t\t,\t\t\t\tself.type_vocab_size\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t__a\t\t\t: Optional[Any]\t\t\t\t\t = None\n\t\t\t\t\t\t__a\t\t\t: Optional[int]\t\t\t\t\t = None\n\t\t\t\t\t\tif self.use_labels:\n\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: List[Any]\t\t\t\t\t = ids_tensor([self.batch_size]\t\t\t\t\t\t,\t\t\t\tself.type_sequence_label_size\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Optional[int]\t\t\t\t\t = ids_tensor([self.batch_size, self.text_seq_length]\t\t\t\t\t\t,\t\t\t\tself.num_labels\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t__a\t\t\t: int\t\t\t\t\t = LayoutLMvaConfig(\n\t\t\t\t\t\t vocab_size=self.vocab_size\t\t\t\t\t\t,\t\t\t\thidden_size=self.hidden_size\t\t\t\t\t\t,\t\t\t\tnum_hidden_layers=self.num_hidden_layers\t\t\t\t\t\t,\t\t\t\tnum_attention_heads=self.num_attention_heads\t\t\t\t\t\t,\t\t\t\tintermediate_size=self.intermediate_size\t\t\t\t\t\t,\t\t\t\thidden_act=self.hidden_act\t\t\t\t\t\t,\t\t\t\thidden_dropout_prob=self.hidden_dropout_prob\t\t\t\t\t\t,\t\t\t\tattention_probs_dropout_prob=self.attention_probs_dropout_prob\t\t\t\t\t\t,\t\t\t\tmax_position_embeddings=self.max_position_embeddings\t\t\t\t\t\t,\t\t\t\ttype_vocab_size=self.type_vocab_size\t\t\t\t\t\t,\t\t\t\tinitializer_range=self.initializer_range\t\t\t\t\t\t,\t\t\t\tcoordinate_size=self.coordinate_size\t\t\t\t\t\t,\t\t\t\tshape_size=self.shape_size\t\t\t\t\t\t,\t\t\t\tinput_size=self.image_size\t\t\t\t\t\t,\t\t\t\tpatch_size=self.patch_size\t\t\t\t\t\t,\t\t\t\t)\n\n\t\t\t\t\t\treturn config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels\n\n\n\n\n\tdef __UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a\t\t\t: Dict\t\t\t\t\t = TFLayoutLMvaModel(config=__a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t# text + image\n\t\t\t\t\t\t__a\t\t\t: List[Any]\t\t\t\t\t = model(__a\t\t\t\t\t\t,\t\t\t\tpixel_values=__a\t\t\t\t\t\t,\t\t\t\ttraining=__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a\t\t\t: Any\t\t\t\t\t = model(\n\t\t\t\t\t\t __a\t\t\t\t\t\t,\t\t\t\tbbox=__a\t\t\t\t\t\t,\t\t\t\tpixel_values=__a\t\t\t\t\t\t,\t\t\t\tattention_mask=__a\t\t\t\t\t\t,\t\t\t\ttoken_type_ids=__a\t\t\t\t\t\t,\t\t\t\ttraining=__a\t\t\t\t\t\t,\t\t\t\t)\n\t\t\t\t\t\t__a\t\t\t: Optional[int]\t\t\t\t\t = model(__a\t\t\t\t\t\t,\t\t\t\tbbox=__a\t\t\t\t\t\t,\t\t\t\tpixel_values=__a\t\t\t\t\t\t,\t\t\t\ttraining=__a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\tself.parent.assertEqual(result.last_hidden_state.shape\t\t\t\t\t\t,\t\t\t\t(self.batch_size, self.seq_length, self.hidden_size)\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t# text only\n\t\t\t\t\t\t__a\t\t\t: Any\t\t\t\t\t = model(__a\t\t\t\t\t\t,\t\t\t\ttraining=__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.parent.assertEqual(\n\t\t\t\t\t\t result.last_hidden_state.shape\t\t\t\t\t\t,\t\t\t\t(self.batch_size, self.text_seq_length, self.hidden_size)\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t# image only\n\t\t\t\t\t\t__a\t\t\t: str\t\t\t\t\t = model({'pixel_values': pixel_values}\t\t\t\t\t\t,\t\t\t\ttraining=__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.parent.assertEqual(\n\t\t\t\t\t\t result.last_hidden_state.shape\t\t\t\t\t\t,\t\t\t\t(self.batch_size, self.image_seq_length, self.hidden_size)\t\t\t\t\t\t\t)\n\n\n\n\n\tdef __UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a\t\t\t: Any\t\t\t\t\t = self.num_labels\n\t\t\t\t\t\t__a\t\t\t: Dict\t\t\t\t\t = TFLayoutLMvaForSequenceClassification(config=__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a\t\t\t: List[str]\t\t\t\t\t = model(\n\t\t\t\t\t\t __a\t\t\t\t\t\t,\t\t\t\tbbox=__a\t\t\t\t\t\t,\t\t\t\tpixel_values=__a\t\t\t\t\t\t,\t\t\t\tattention_mask=__a\t\t\t\t\t\t,\t\t\t\ttoken_type_ids=__a\t\t\t\t\t\t,\t\t\t\tlabels=__a\t\t\t\t\t\t,\t\t\t\ttraining=__a\t\t\t\t\t\t,\t\t\t\t)\n\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape\t\t\t\t\t\t,\t\t\t\t(self.batch_size, self.num_labels)\t\t\t\t\t\t\t)\n\n\n\n\n\tdef __UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a\t\t\t: str\t\t\t\t\t = self.num_labels\n\t\t\t\t\t\t__a\t\t\t: Optional[Any]\t\t\t\t\t = TFLayoutLMvaForTokenClassification(config=__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a\t\t\t: List[str]\t\t\t\t\t = model(\n\t\t\t\t\t\t __a\t\t\t\t\t\t,\t\t\t\tbbox=__a\t\t\t\t\t\t,\t\t\t\tpixel_values=__a\t\t\t\t\t\t,\t\t\t\tattention_mask=__a\t\t\t\t\t\t,\t\t\t\ttoken_type_ids=__a\t\t\t\t\t\t,\t\t\t\tlabels=__a\t\t\t\t\t\t,\t\t\t\ttraining=__a\t\t\t\t\t\t,\t\t\t\t)\n\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape\t\t\t\t\t\t,\t\t\t\t(self.batch_size, self.text_seq_length, self.num_labels)\t\t\t\t\t\t\t)\n\n\n\n\n\tdef __UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a\t\t\t: List[Any]\t\t\t\t\t = 2\n\t\t\t\t\t\t__a\t\t\t: Any\t\t\t\t\t = TFLayoutLMvaForQuestionAnswering(config=__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a\t\t\t: Any\t\t\t\t\t = model(\n\t\t\t\t\t\t __a\t\t\t\t\t\t,\t\t\t\tbbox=__a\t\t\t\t\t\t,\t\t\t\tpixel_values=__a\t\t\t\t\t\t,\t\t\t\tattention_mask=__a\t\t\t\t\t\t,\t\t\t\ttoken_type_ids=__a\t\t\t\t\t\t,\t\t\t\tstart_positions=__a\t\t\t\t\t\t,\t\t\t\tend_positions=__a\t\t\t\t\t\t,\t\t\t\ttraining=__a\t\t\t\t\t\t,\t\t\t\t)\n\t\t\t\t\t\tself.parent.assertEqual(result.start_logits.shape\t\t\t\t\t\t,\t\t\t\t(self.batch_size, self.seq_length)\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.parent.assertEqual(result.end_logits.shape\t\t\t\t\t\t,\t\t\t\t(self.batch_size, self.seq_length)\t\t\t\t\t\t\t)\n\n\n\n\n\n\tdef __UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a\t\t\t: Dict\t\t\t\t\t = self.prepare_config_and_inputs()\n\t\t\t\t\t\t((__a) ,\t\t\t\t(__a) ,\t\t\t\t(__a) ,\t\t\t\t(__a) ,\t\t\t\t(__a) ,\t\t\t\t(__a) ,\t\t\t\t(__a) ,\t\t\t\t(__a))\t\t\t: Dict\t\t\t\t\t = config_and_inputs\n\t\t\t\t\t\t__a\t\t\t: Any\t\t\t\t\t = {\n\t\t\t\t\t\t 'input_ids': input_ids,\n\t\t\t\t\t\t 'bbox': bbox,\n\t\t\t\t\t\t 'pixel_values': pixel_values,\n\t\t\t\t\t\t 'token_type_ids': token_type_ids,\n\t\t\t\t\t\t 'attention_mask': input_mask,\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn config, inputs_dict\n\n\n\n\n\n@require_tf\nclass \t\t\t\t\t\t\t__UpperCamelCase\t\t\t\t(\t\tlowerCAmelCase_\t\t, lowerCAmelCase_\t\t, unittest.TestCase\t\t\t\t\t):\n\tA_\t\t\t\t= (\n\t (\n\t TFLayoutLMvaModel,\n\t TFLayoutLMvaForQuestionAnswering,\n\t TFLayoutLMvaForSequenceClassification,\n\t TFLayoutLMvaForTokenClassification,\n\t )\n\t if is_tf_available()\n\t else ()\n\t)\n\tA_\t\t\t\t= (\n\t {\"document-question-answering\": TFLayoutLMvaForQuestionAnswering, \"feature-extraction\": TFLayoutLMvaModel}\n\t if is_tf_available()\n\t else {}\n\t)\n\n\tA_\t\t\t\t= False\n\tA_\t\t\t\t= False\n\tA_\t\t\t\t= False\n\n\n\n\n\tdef __UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\treturn True\n\n\n\n\n\tdef __UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a=False\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a\t\t\t: str\t\t\t\t\t = copy.deepcopy(__a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\tif model_class in get_values(__a\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: str\t\t\t\t\t = {\n\t\t\t\t\t\t\t\t\t\t\t k: tf.tile(tf.expand_dims(__a\t\t\t\t\t\t,\t\t\t\t1\t\t\t\t\t\t\t)\t\t\t\t\t\t,\t\t\t\t(1, self.model_tester.num_choices) + (1,) * (v.ndim - 1)\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t if isinstance(__a\t\t\t\t\t\t,\t\t\t\ttf.Tensor\t\t\t\t\t\t\t) and v.ndim > 0\n\t\t\t\t\t\t\t\t\t\t\t else v\n\t\t\t\t\t\t\t\t\t\t\t for k, v in inputs_dict.items()\n\t\t\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif return_labels:\n\t\t\t\t\t\t\t\t\t\t\tif model_class in get_values(__a\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Optional[int]\t\t\t\t\t = tf.ones(self.model_tester.batch_size\t\t\t\t\t\t,\t\t\t\tdtype=tf.intaa\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\telif model_class in get_values(__a\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: int\t\t\t\t\t = tf.zeros(self.model_tester.batch_size\t\t\t\t\t\t,\t\t\t\tdtype=tf.intaa\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Optional[Any]\t\t\t\t\t = tf.zeros(self.model_tester.batch_size\t\t\t\t\t\t,\t\t\t\tdtype=tf.intaa\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\telif model_class in get_values(__a\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Any\t\t\t\t\t = tf.zeros(self.model_tester.batch_size\t\t\t\t\t\t,\t\t\t\tdtype=tf.intaa\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\telif model_class in get_values(__a\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Union[str, Any]\t\t\t\t\t = tf.zeros(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t (self.model_tester.batch_size, self.model_tester.text_seq_length)\t\t\t\t\t\t,\t\t\t\tdtype=tf.intaa\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\treturn inputs_dict\n\n\n\n\n\tdef __UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a\t\t\t: Optional[Any]\t\t\t\t\t = TFLayoutLMvaModelTester(self\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a\t\t\t: Optional[int]\t\t\t\t\t = ConfigTester(self\t\t\t\t\t\t,\t\t\t\tconfig_class=__a\t\t\t\t\t\t,\t\t\t\thidden_size=37\t\t\t\t\t\t\t)\n\n\n\n\n\tdef __UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\tself.config_tester.run_common_tests()\n\n\n\n\n\tdef __UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a ,\t\t\t\t__a\t\t\t: Dict\t\t\t\t\t = self.model_tester.prepare_config_and_inputs_for_common()\n\t\t\t\t\t\tfor model_class in self.all_model_classes:\n\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Dict\t\t\t\t\t = model_class(__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\tif getattr(__a\t\t\t\t\t\t,\t\t\t\t'hf_compute_loss'\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# The number of elements in the loss should be the same as the number of elements in the label\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Union[str, Any]\t\t\t\t\t = self._prepare_for_class(inputs_dict.copy()\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\treturn_labels=__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: str\t\t\t\t\t = prepared_for_class[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t sorted(prepared_for_class.keys() - inputs_dict.keys()\t\t\t\t\t\t,\t\t\t\treverse=__a\t\t\t\t\t\t\t)[0]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Dict\t\t\t\t\t = added_label.shape.as_list()[:1]\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Test that model correctly compute the loss with kwargs\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: int\t\t\t\t\t = self._prepare_for_class(inputs_dict.copy()\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\treturn_labels=__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Dict\t\t\t\t\t = prepared_for_class.pop('input_ids'\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Tuple\t\t\t\t\t = model(__a\t\t\t\t\t\t,\t\t\t\t**__a\t\t\t\t\t\t\t)[0]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Test that model correctly compute the loss when we mask some positions\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: int\t\t\t\t\t = self._prepare_for_class(inputs_dict.copy()\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\treturn_labels=__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: str\t\t\t\t\t = prepared_for_class.pop('input_ids'\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif \"labels\" in prepared_for_class:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Union[str, Any]\t\t\t\t\t = prepared_for_class['labels'].numpy()\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif len(labels.shape\t\t\t\t\t\t\t) > 1 and labels.shape[1] != 1:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: List[Any]\t\t\t\t\t = -100\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: List[str]\t\t\t\t\t = tf.convert_to_tensor(__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Any\t\t\t\t\t = model(__a\t\t\t\t\t\t,\t\t\t\t**__a\t\t\t\t\t\t\t)[0]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(not np.any(np.isnan(loss.numpy()\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\n\n # Test that model correctly compute the loss with a dict\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Union[str, Any]\t\t\t\t\t = self._prepare_for_class(inputs_dict.copy()\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\treturn_labels=__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: str\t\t\t\t\t = model(__a\t\t\t\t\t\t\t)[0]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Test that model correctly compute the loss with a tuple\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Tuple\t\t\t\t\t = self._prepare_for_class(inputs_dict.copy()\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\treturn_labels=__a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Get keys that were added with the _prepare_for_class function\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Dict\t\t\t\t\t = prepared_for_class.keys() - inputs_dict.keys()\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Any\t\t\t\t\t = inspect.signature(model.call\t\t\t\t\t\t\t).parameters\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: str\t\t\t\t\t = list(signature.keys()\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Create a dictionary holding the location of the tensors in the tuple\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: List[Any]\t\t\t\t\t = {0: 'input_ids'}\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor label_key in label_keys:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: List[Any]\t\t\t\t\t = signature_names.index(__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Union[str, Any]\t\t\t\t\t = label_key\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: List[str]\t\t\t\t\t = sorted(tuple_index_mapping.items()\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Initialize a list with their default values, update the values and convert to a tuple\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Union[str, Any]\t\t\t\t\t = []\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor name in signature_names:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif name != \"kwargs\":\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlist_input.append(signature[name].default\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor index, value in sorted_tuple_index_mapping:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Optional[Any]\t\t\t\t\t = prepared_for_class[value]\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: str\t\t\t\t\t = tuple(__a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Send to model\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Tuple\t\t\t\t\t = model(tuple_input[:-1]\t\t\t\t\t\t\t)[0]\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]\t\t\t\t\t\t\t)\n\n\n\n\n\tdef __UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t(\n\t\t\t\t\t\t (\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t\n\t\t\t\t\t\t)\t\t\t: Optional[int]\t\t\t\t\t = self.model_tester.prepare_config_and_inputs()\n\t\t\t\t\t\tself.model_tester.create_and_check_model(__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t\t)\n\n\n\n\n\tdef __UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t(\n\t\t\t\t\t\t (\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t\n\t\t\t\t\t\t)\t\t\t: Dict\t\t\t\t\t = self.model_tester.prepare_config_and_inputs()\n\t\t\t\t\t\tfor type in [\"absolute\", \"relative_key\", \"relative_key_query\"]:\n\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: Any\t\t\t\t\t = type\n\t\t\t\t\t\t\t\t\t\t\tself.model_tester.create_and_check_model(__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t\t)\n\n\n\n\n\tdef __UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t(\n\t\t\t\t\t\t (\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t\n\t\t\t\t\t\t)\t\t\t: List[str]\t\t\t\t\t = self.model_tester.prepare_config_and_inputs()\n\t\t\t\t\t\tself.model_tester.create_and_check_for_sequence_classification(\n\t\t\t\t\t\t __a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t\t)\n\n\n\n\n\tdef __UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t(\n\t\t\t\t\t\t (\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t\n\t\t\t\t\t\t)\t\t\t: Optional[Any]\t\t\t\t\t = self.model_tester.prepare_config_and_inputs()\n\t\t\t\t\t\tself.model_tester.create_and_check_for_token_classification(\n\t\t\t\t\t\t __a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t\t)\n\n\n\n\n\tdef __UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t(\n\t\t\t\t\t\t (\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t(\n\t\t\t\t\t\t __a\n\t\t\t\t\t\t) ,\t\t\t\t\n\t\t\t\t\t\t)\t\t\t: int\t\t\t\t\t = self.model_tester.prepare_config_and_inputs()\n\t\t\t\t\t\tself.model_tester.create_and_check_for_question_answering(\n\t\t\t\t\t\t __a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t\t)\n\n\n\n\n\n\t@slow\n\tdef __UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\tfor model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t: List[Any]\t\t\t\t\t = TFLayoutLMvaModel.from_pretrained(__a\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\tself.assertIsNotNone(__a\t\t\t\t\t\t\t)\n\n\n\n\n\n\ndef lowerCamelCase\t\t\t\t\t\t\t():\n\t\t\t\t\t__a\t\t\t: Optional[Any]\t\t\t\t\t = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )\n\t\t\t\t\treturn image\n\n\n\n\n\n@require_tf\nclass \t\t\t\t\t\t\t__UpperCamelCase\t\t\t\t(\t\tunittest.TestCase\t\t\t\t\t):\n\n\n\n\n\t@cached_property\n\tdef __UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\treturn LayoutLMvaImageProcessor(apply_ocr=__a\t\t\t\t\t\t\t) if is_vision_available() else None\n\n\n\n\n\n\t@slow\n\tdef __UpperCAmelCase\t\t\t\t\t\t\t( self\t\t\t\t\t\t\t):\n\n\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t__a\t\t\t: str\t\t\t\t\t = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base'\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t__a\t\t\t: Tuple\t\t\t\t\t = self.default_image_processor\n\t\t\t\t\t\t__a\t\t\t: List[Any]\t\t\t\t\t = prepare_img()\n\t\t\t\t\t\t__a\t\t\t: int\t\t\t\t\t = image_processor(images=__a\t\t\t\t\t\t,\t\t\t\treturn_tensors='tf'\t\t\t\t\t\t\t).pixel_values\n\n\t\t\t\t\t\t__a\t\t\t: Union[str, Any]\t\t\t\t\t = tf.constant([[1, 2]]\t\t\t\t\t\t\t)\n\t\t\t\t\t\t__a\t\t\t: Optional[Any]\t\t\t\t\t = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]\t\t\t\t\t\t\t)\t\t\t\t\t\t,\t\t\t\taxis=0\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t# forward pass\n\t\t\t\t\t\t__a\t\t\t: Tuple\t\t\t\t\t = model(input_ids=__a\t\t\t\t\t\t,\t\t\t\tbbox=__a\t\t\t\t\t\t,\t\t\t\tpixel_values=__a\t\t\t\t\t\t,\t\t\t\ttraining=__a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t# verify the logits\n\t\t\t\t\t\t__a\t\t\t: List[Any]\t\t\t\t\t = (1, 199, 768)\n\t\t\t\t\t\tself.assertEqual(outputs.last_hidden_state.shape\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t__a\t\t\t: Optional[Any]\t\t\t\t\t = tf.constant(\n\t\t\t\t\t\t [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]]\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\tself.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3]\t\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\tatol=1E-4\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":27,"string":"27"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":769,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\nimport pickle\r\nimport shutil\r\nimport tempfile\r\nimport unittest\r\n\r\nfrom transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast\r\nfrom transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow\r\nfrom transformers.utils import cached_property\r\n\r\nfrom ...test_tokenization_common import TokenizerTesterMixin\r\n\r\n\r\nsnake_case__\t\t\t\t = get_tests_dir(\"\"\"fixtures/test_sentencepiece.model\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_sentencepiece\r\n@require_tokenizers\r\nclass UpperCamelCase_ (a__, unittest.TestCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\t\t\t\t_lowerCAmelCase = XLMRobertaTokenizer\r\n\t\t\t\t\t\t_lowerCAmelCase = XLMRobertaTokenizerFast\r\n\t\t\t\t\t\t_lowerCAmelCase = True\r\n\t\t\t\t\t\t_lowerCAmelCase = True\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef _a ( self : str ):\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tsuper().setUp()\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t# We have a SentencePiece fixture for testing\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: int\t=\t\t\t\t\t\t\tXLMRobertaTokenizer(_lowerCamelCase\t\t\t\t\t\t\t,\tkeep_accents=_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\ttokenizer.save_pretrained(self.tmpdirname )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef _a ( self : Optional[int] ):\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Union[str, Any]\t=\t\t\t\t\t\t\t''''''\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Optional[int]\t=\t\t\t\t\t\t\t1\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase )\t\t\t\t\t\t\t,\t_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase )\t\t\t\t\t\t\t,\t_lowerCamelCase )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef _a ( self : Any ):\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: List[str]\t=\t\t\t\t\t\t\tlist(self.get_tokenizer().get_vocab().keys() )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(vocab_keys[0]\t\t\t\t\t\t\t,\t'''''' )\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(vocab_keys[1]\t\t\t\t\t\t\t,\t'''''' )\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(vocab_keys[-1]\t\t\t\t\t\t\t,\t'''''' )\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(len(_lowerCamelCase )\t\t\t\t\t\t\t,\t1002 )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef _a ( self : List[Any] ):\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(self.get_tokenizer().vocab_size\t\t\t\t\t\t\t,\t1002 )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef _a ( self : List[Any] ):\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: List[str]\t=\t\t\t\t\t\t\tXLMRobertaTokenizer(_lowerCamelCase\t\t\t\t\t\t\t,\tkeep_accents=_lowerCamelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Dict\t=\t\t\t\t\t\t\ttokenizer.tokenize('''This is a test''' )\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertListEqual(_lowerCamelCase\t\t\t\t\t\t\t,\t['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertListEqual(\r\n\t\t\t\t\t\t\t\t\t\t\t tokenizer.convert_tokens_to_ids(_lowerCamelCase )\t\t\t\t\t\t\t,\t[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]]\t\t\t\t\t\t\t,\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: int\t=\t\t\t\t\t\t\ttokenizer.tokenize('''I was born in 92000, and this is falsé.''' )\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertListEqual(\r\n\t\t\t\t\t\t\t\t\t\t\t _lowerCamelCase\t\t\t\t\t\t\t,\t[\r\n\t\t\t\t\t\t\t\t\t\t\t SPIECE_UNDERLINE + '''I''',\r\n\t\t\t\t\t\t\t\t\t\t\t SPIECE_UNDERLINE + '''was''',\r\n\t\t\t\t\t\t\t\t\t\t\t SPIECE_UNDERLINE + '''b''',\r\n\t\t\t\t\t\t\t\t\t\t\t '''or''',\r\n\t\t\t\t\t\t\t\t\t\t\t '''n''',\r\n\t\t\t\t\t\t\t\t\t\t\t SPIECE_UNDERLINE + '''in''',\r\n\t\t\t\t\t\t\t\t\t\t\t SPIECE_UNDERLINE + '''''',\r\n\t\t\t\t\t\t\t\t\t\t\t '''9''',\r\n\t\t\t\t\t\t\t\t\t\t\t '''2''',\r\n\t\t\t\t\t\t\t\t\t\t\t '''0''',\r\n\t\t\t\t\t\t\t\t\t\t\t '''0''',\r\n\t\t\t\t\t\t\t\t\t\t\t '''0''',\r\n\t\t\t\t\t\t\t\t\t\t\t ''',''',\r\n\t\t\t\t\t\t\t\t\t\t\t SPIECE_UNDERLINE + '''and''',\r\n\t\t\t\t\t\t\t\t\t\t\t SPIECE_UNDERLINE + '''this''',\r\n\t\t\t\t\t\t\t\t\t\t\t SPIECE_UNDERLINE + '''is''',\r\n\t\t\t\t\t\t\t\t\t\t\t SPIECE_UNDERLINE + '''f''',\r\n\t\t\t\t\t\t\t\t\t\t\t '''al''',\r\n\t\t\t\t\t\t\t\t\t\t\t '''s''',\r\n\t\t\t\t\t\t\t\t\t\t\t '''é''',\r\n\t\t\t\t\t\t\t\t\t\t\t '''.''',\r\n\t\t\t\t\t\t\t\t\t\t\t ]\t\t\t\t\t\t\t,\t)\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Union[str, Any]\t=\t\t\t\t\t\t\ttokenizer.convert_tokens_to_ids(_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertListEqual(\r\n\t\t\t\t\t\t\t\t\t\t\t _lowerCamelCase\t\t\t\t\t\t\t,\t[\r\n\t\t\t\t\t\t\t\t\t\t\t value + tokenizer.fairseq_offset\r\n\t\t\t\t\t\t\t\t\t\t\t for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]\r\n\t\t\t\t\t\t\t\t\t\t\t # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^\r\n\t\t\t\t\t\t\t\t\t\t\t ]\t\t\t\t\t\t\t,\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: str\t=\t\t\t\t\t\t\ttokenizer.convert_ids_to_tokens(_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertListEqual(\r\n\t\t\t\t\t\t\t\t\t\t\t _lowerCamelCase\t\t\t\t\t\t\t,\t[\r\n\t\t\t\t\t\t\t\t\t\t\t SPIECE_UNDERLINE + '''I''',\r\n\t\t\t\t\t\t\t\t\t\t\t SPIECE_UNDERLINE + '''was''',\r\n\t\t\t\t\t\t\t\t\t\t\t SPIECE_UNDERLINE + '''b''',\r\n\t\t\t\t\t\t\t\t\t\t\t '''or''',\r\n\t\t\t\t\t\t\t\t\t\t\t '''n''',\r\n\t\t\t\t\t\t\t\t\t\t\t SPIECE_UNDERLINE + '''in''',\r\n\t\t\t\t\t\t\t\t\t\t\t SPIECE_UNDERLINE + '''''',\r\n\t\t\t\t\t\t\t\t\t\t\t '''''',\r\n\t\t\t\t\t\t\t\t\t\t\t '''2''',\r\n\t\t\t\t\t\t\t\t\t\t\t '''0''',\r\n\t\t\t\t\t\t\t\t\t\t\t '''0''',\r\n\t\t\t\t\t\t\t\t\t\t\t '''0''',\r\n\t\t\t\t\t\t\t\t\t\t\t ''',''',\r\n\t\t\t\t\t\t\t\t\t\t\t SPIECE_UNDERLINE + '''and''',\r\n\t\t\t\t\t\t\t\t\t\t\t SPIECE_UNDERLINE + '''this''',\r\n\t\t\t\t\t\t\t\t\t\t\t SPIECE_UNDERLINE + '''is''',\r\n\t\t\t\t\t\t\t\t\t\t\t SPIECE_UNDERLINE + '''f''',\r\n\t\t\t\t\t\t\t\t\t\t\t '''al''',\r\n\t\t\t\t\t\t\t\t\t\t\t '''s''',\r\n\t\t\t\t\t\t\t\t\t\t\t '''''',\r\n\t\t\t\t\t\t\t\t\t\t\t '''.''',\r\n\t\t\t\t\t\t\t\t\t\t\t ]\t\t\t\t\t\t\t,\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef _a ( self : Optional[Any] ):\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tif not self.test_slow_tokenizer:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# as we don't have a slow version, we can't compare the outputs between slow and fast versions\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Dict\t=\t\t\t\t\t\t\t(self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})\r\n\t\t\t\t\t\t\t\t\t\t\tfor tokenizer, pretrained_name, kwargs in self.tokenizers_list:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twith self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: Optional[int]\t=\t\t\t\t\t\t\tself.rust_tokenizer_class.from_pretrained(_lowerCamelCase\t\t\t\t\t\t\t,\t**_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: List[str]\t=\t\t\t\t\t\t\tself.tokenizer_class.from_pretrained(_lowerCamelCase\t\t\t\t\t\t\t,\t**_lowerCamelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: Optional[int]\t=\t\t\t\t\t\t\ttempfile.mkdtemp()\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: List[Any]\t=\t\t\t\t\t\t\ttokenizer_r.save_pretrained(_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: List[Any]\t=\t\t\t\t\t\t\ttokenizer_p.save_pretrained(_lowerCamelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Checks it save with the same files + the tokenizer.json file for the fast one\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: List[str]\t=\t\t\t\t\t\t\ttuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertSequenceEqual(_lowerCamelCase\t\t\t\t\t\t\t,\t_lowerCamelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Checks everything loads correctly in the same way\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: Union[str, Any]\t=\t\t\t\t\t\t\ttokenizer_r.from_pretrained(_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: List[Any]\t=\t\t\t\t\t\t\ttokenizer_p.from_pretrained(_lowerCamelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Check special tokens are set accordingly on Rust and Python\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor key in tokenizer_pp.special_tokens_map:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(hasattr(_lowerCamelCase\t\t\t\t\t\t\t,\t_lowerCamelCase ) )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# self.assertEqual(getattr(tokenizer_rp, key + \"_id\"), getattr(tokenizer_pp, key + \"_id\"))\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tshutil.rmtree(_lowerCamelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Save tokenizer rust, legacy_format=True\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: Optional[Any]\t=\t\t\t\t\t\t\ttempfile.mkdtemp()\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: Tuple\t=\t\t\t\t\t\t\ttokenizer_r.save_pretrained(_lowerCamelCase\t\t\t\t\t\t\t,\tlegacy_format=_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: Tuple\t=\t\t\t\t\t\t\ttokenizer_p.save_pretrained(_lowerCamelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Checks it save with the same files\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertSequenceEqual(_lowerCamelCase\t\t\t\t\t\t\t,\t_lowerCamelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Checks everything loads correctly in the same way\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: Any\t=\t\t\t\t\t\t\ttokenizer_r.from_pretrained(_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: Union[str, Any]\t=\t\t\t\t\t\t\ttokenizer_p.from_pretrained(_lowerCamelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Check special tokens are set accordingly on Rust and Python\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor key in tokenizer_pp.special_tokens_map:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(hasattr(_lowerCamelCase\t\t\t\t\t\t\t,\t_lowerCamelCase ) )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tshutil.rmtree(_lowerCamelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Save tokenizer rust, legacy_format=False\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: Tuple\t=\t\t\t\t\t\t\ttempfile.mkdtemp()\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: Optional[Any]\t=\t\t\t\t\t\t\ttokenizer_r.save_pretrained(_lowerCamelCase\t\t\t\t\t\t\t,\tlegacy_format=_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: Optional[int]\t=\t\t\t\t\t\t\ttokenizer_p.save_pretrained(_lowerCamelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Checks it saved the tokenizer.json file\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Checks everything loads correctly in the same way\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: Optional[Any]\t=\t\t\t\t\t\t\ttokenizer_r.from_pretrained(_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: List[str]\t=\t\t\t\t\t\t\ttokenizer_p.from_pretrained(_lowerCamelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Check special tokens are set accordingly on Rust and Python\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor key in tokenizer_pp.special_tokens_map:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(hasattr(_lowerCamelCase\t\t\t\t\t\t\t,\t_lowerCamelCase ) )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tshutil.rmtree(_lowerCamelCase )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t@cached_property\r\n\t\t\t\t\t\tdef _a ( self : Tuple ):\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\treturn XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef _a ( self : Tuple ):\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\twith tempfile.NamedTemporaryFile() as f:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tshutil.copyfile(_lowerCamelCase\t\t\t\t\t\t\t,\tf.name )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: int\t=\t\t\t\t\t\t\tXLMRobertaTokenizer(f.name\t\t\t\t\t\t\t,\tkeep_accents=_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: Tuple\t=\t\t\t\t\t\t\tpickle.dumps(_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tpickle.loads(_lowerCamelCase )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef _a ( self : List[Any] ):\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tif not self.test_rust_tokenizer:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Union[str, Any]\t=\t\t\t\t\t\t\tself.get_tokenizer()\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: List[Any]\t=\t\t\t\t\t\t\tself.get_rust_tokenizer()\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Union[str, Any]\t=\t\t\t\t\t\t\t'''I was born in 92000, and this is falsé.'''\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Any\t=\t\t\t\t\t\t\ttokenizer.tokenize(_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: List[str]\t=\t\t\t\t\t\t\trust_tokenizer.tokenize(_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertListEqual(_lowerCamelCase\t\t\t\t\t\t\t,\t_lowerCamelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Any\t=\t\t\t\t\t\t\ttokenizer.encode(_lowerCamelCase\t\t\t\t\t\t\t,\tadd_special_tokens=_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Tuple\t=\t\t\t\t\t\t\trust_tokenizer.encode(_lowerCamelCase\t\t\t\t\t\t\t,\tadd_special_tokens=_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertListEqual(_lowerCamelCase\t\t\t\t\t\t\t,\t_lowerCamelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: List[Any]\t=\t\t\t\t\t\t\tself.get_rust_tokenizer()\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Any\t=\t\t\t\t\t\t\ttokenizer.encode(_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Tuple\t=\t\t\t\t\t\t\trust_tokenizer.encode(_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertListEqual(_lowerCamelCase\t\t\t\t\t\t\t,\t_lowerCamelCase )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t@slow\r\n\t\t\t\t\t\tdef _a ( self : List[Any] ):\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Tuple\t=\t\t\t\t\t\t\t'''Hello World!'''\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Optional[Any]\t=\t\t\t\t\t\t\t[0, 35378, 6661, 38, 2]\r\n\t\t\t\t\t\t\t\t\t\t\t# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer\r\n\t\t\t\t\t\t\t\t\t\t\t# xlmr.eval()\r\n\t\t\t\t\t\t\t\t\t\t\t# xlmr.encode(symbols)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertListEqual(_lowerCamelCase\t\t\t\t\t\t\t,\tself.big_tokenizer.encode(_lowerCamelCase ) )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t@slow\r\n\t\t\t\t\t\tdef _a ( self : Tuple ):\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Dict\t=\t\t\t\t\t\t\t(\r\n\t\t\t\t\t\t\t\t\t\t\t '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will'''\r\n\t\t\t\t\t\t\t\t\t\t\t ''' add words that should not exsist and be tokenized to , such as saoneuhaoesuth'''\r\n\t\t\t\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Dict\t=\t\t\t\t\t\t\t[\r\n\t\t\t\t\t\t\t\t\t\t\t 0,\r\n\t\t\t\t\t\t\t\t\t\t\t 3293,\r\n\t\t\t\t\t\t\t\t\t\t\t 83,\r\n\t\t\t\t\t\t\t\t\t\t\t 10,\r\n\t\t\t\t\t\t\t\t\t\t\t 4552,\r\n\t\t\t\t\t\t\t\t\t\t\t 4989,\r\n\t\t\t\t\t\t\t\t\t\t\t 7986,\r\n\t\t\t\t\t\t\t\t\t\t\t 678,\r\n\t\t\t\t\t\t\t\t\t\t\t 10,\r\n\t\t\t\t\t\t\t\t\t\t\t 5915,\r\n\t\t\t\t\t\t\t\t\t\t\t 111,\r\n\t\t\t\t\t\t\t\t\t\t\t 179459,\r\n\t\t\t\t\t\t\t\t\t\t\t 124850,\r\n\t\t\t\t\t\t\t\t\t\t\t 4,\r\n\t\t\t\t\t\t\t\t\t\t\t 6044,\r\n\t\t\t\t\t\t\t\t\t\t\t 237,\r\n\t\t\t\t\t\t\t\t\t\t\t 12,\r\n\t\t\t\t\t\t\t\t\t\t\t 6,\r\n\t\t\t\t\t\t\t\t\t\t\t 5,\r\n\t\t\t\t\t\t\t\t\t\t\t 6,\r\n\t\t\t\t\t\t\t\t\t\t\t 4,\r\n\t\t\t\t\t\t\t\t\t\t\t 6780,\r\n\t\t\t\t\t\t\t\t\t\t\t 705,\r\n\t\t\t\t\t\t\t\t\t\t\t 15,\r\n\t\t\t\t\t\t\t\t\t\t\t 1388,\r\n\t\t\t\t\t\t\t\t\t\t\t 44,\r\n\t\t\t\t\t\t\t\t\t\t\t 378,\r\n\t\t\t\t\t\t\t\t\t\t\t 10114,\r\n\t\t\t\t\t\t\t\t\t\t\t 711,\r\n\t\t\t\t\t\t\t\t\t\t\t 152,\r\n\t\t\t\t\t\t\t\t\t\t\t 20,\r\n\t\t\t\t\t\t\t\t\t\t\t 6,\r\n\t\t\t\t\t\t\t\t\t\t\t 5,\r\n\t\t\t\t\t\t\t\t\t\t\t 22376,\r\n\t\t\t\t\t\t\t\t\t\t\t 642,\r\n\t\t\t\t\t\t\t\t\t\t\t 1221,\r\n\t\t\t\t\t\t\t\t\t\t\t 15190,\r\n\t\t\t\t\t\t\t\t\t\t\t 34153,\r\n\t\t\t\t\t\t\t\t\t\t\t 450,\r\n\t\t\t\t\t\t\t\t\t\t\t 5608,\r\n\t\t\t\t\t\t\t\t\t\t\t 959,\r\n\t\t\t\t\t\t\t\t\t\t\t 1119,\r\n\t\t\t\t\t\t\t\t\t\t\t 57702,\r\n\t\t\t\t\t\t\t\t\t\t\t 136,\r\n\t\t\t\t\t\t\t\t\t\t\t 186,\r\n\t\t\t\t\t\t\t\t\t\t\t 47,\r\n\t\t\t\t\t\t\t\t\t\t\t 1098,\r\n\t\t\t\t\t\t\t\t\t\t\t 29367,\r\n\t\t\t\t\t\t\t\t\t\t\t 47,\r\n\t\t\t\t\t\t\t\t\t\t\t # 4426, # What fairseq tokenizes from \"\": \"_<\"\r\n\t\t\t\t\t\t\t\t\t\t\t # 3678, # What fairseq tokenizes from \"\": \"unk\"\r\n\t\t\t\t\t\t\t\t\t\t\t # 2740, # What fairseq tokenizes from \"\": \">\"\r\n\t\t\t\t\t\t\t\t\t\t\t 3, # What we tokenize from \"\": \"\"\r\n\t\t\t\t\t\t\t\t\t\t\t 6, # Residue from the tokenization: an extra sentencepiece underline\r\n\t\t\t\t\t\t\t\t\t\t\t 4,\r\n\t\t\t\t\t\t\t\t\t\t\t 6044,\r\n\t\t\t\t\t\t\t\t\t\t\t 237,\r\n\t\t\t\t\t\t\t\t\t\t\t 6284,\r\n\t\t\t\t\t\t\t\t\t\t\t 50901,\r\n\t\t\t\t\t\t\t\t\t\t\t 528,\r\n\t\t\t\t\t\t\t\t\t\t\t 31,\r\n\t\t\t\t\t\t\t\t\t\t\t 90,\r\n\t\t\t\t\t\t\t\t\t\t\t 34,\r\n\t\t\t\t\t\t\t\t\t\t\t 927,\r\n\t\t\t\t\t\t\t\t\t\t\t 2,\r\n\t\t\t\t\t\t\t\t\t\t\t]\r\n\t\t\t\t\t\t\t\t\t\t\t# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer\r\n\t\t\t\t\t\t\t\t\t\t\t# xlmr.eval()\r\n\t\t\t\t\t\t\t\t\t\t\t# xlmr.encode(symbols)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertListEqual(_lowerCamelCase\t\t\t\t\t\t\t,\tself.big_tokenizer.encode(_lowerCamelCase ) )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t@slow\r\n\t\t\t\t\t\tdef _a ( self : Optional[int] ):\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Optional[Any]\t=\t\t\t\t\t\t\t{'''input_ids''': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501\r\n\t\t\t\t\t\t\t\t\t\t\t# fmt: on\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tself.tokenizer_integration_test_util(\r\n\t\t\t\t\t\t\t\t\t\t\t expected_encoding=_lowerCamelCase\t\t\t\t\t\t\t,\tmodel_name='''xlm-roberta-base'''\t\t\t\t\t\t\t,\trevision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3'''\t\t\t\t\t\t\t,\t)\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":4,"string":"4"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\nimport unittest\r\nfrom queue import Empty\r\nfrom threading import Thread\r\n\r\nfrom transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available\r\nfrom transformers.testing_utils import CaptureStdout, require_torch, torch_device\r\n\r\nfrom ..test_modeling_common import ids_tensor\r\n\r\n\r\nif is_torch_available():\r\n\t\t\t\t\t\t\timport torch\r\n\r\n\t\t\t\t\t\t\tfrom transformers import AutoModelForCausalLM\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_torch\r\nclass UpperCamelCase_ (unittest.TestCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\t\t\t\tdef _a ( self : Dict ):\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Optional[int]\t=\t\t\t\t\t\t\tAutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Tuple\t=\t\t\t\t\t\t\tAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Dict\t=\t\t\t\t\t\t\t-1\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: List[str]\t=\t\t\t\t\t\t\tids_tensor((1, 5)\t\t\t\t\t\t\t,\tvocab_size=model.config.vocab_size ).to(_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Any\t=\t\t\t\t\t\t\tmodel.generate(_lowerCamelCase\t\t\t\t\t\t\t,\tmax_new_tokens=10\t\t\t\t\t\t\t,\tdo_sample=_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: List[str]\t=\t\t\t\t\t\t\ttokenizer.decode(greedy_ids[0] )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\twith CaptureStdout() as cs:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: List[str]\t=\t\t\t\t\t\t\tTextStreamer(_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmodel.generate(_lowerCamelCase\t\t\t\t\t\t\t,\tmax_new_tokens=10\t\t\t\t\t\t\t,\tdo_sample=_lowerCamelCase\t\t\t\t\t\t\t,\tstreamer=_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\t# The greedy text should be printed to stdout, except for the final \"\\n\" in the streamer\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Dict\t=\t\t\t\t\t\t\tcs.out[:-1]\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(_lowerCamelCase\t\t\t\t\t\t\t,\t_lowerCamelCase )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef _a ( self : Tuple ):\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Optional[int]\t=\t\t\t\t\t\t\tAutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: List[str]\t=\t\t\t\t\t\t\tAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Dict\t=\t\t\t\t\t\t\t-1\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: List[str]\t=\t\t\t\t\t\t\tids_tensor((1, 5)\t\t\t\t\t\t\t,\tvocab_size=model.config.vocab_size ).to(_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Optional[int]\t=\t\t\t\t\t\t\tmodel.generate(_lowerCamelCase\t\t\t\t\t\t\t,\tmax_new_tokens=10\t\t\t\t\t\t\t,\tdo_sample=_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: str\t=\t\t\t\t\t\t\ttokenizer.decode(greedy_ids[0] )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: int\t=\t\t\t\t\t\t\tTextIteratorStreamer(_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: List[Any]\t=\t\t\t\t\t\t\t{'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: List[Any]\t=\t\t\t\t\t\t\tThread(target=model.generate\t\t\t\t\t\t\t,\tkwargs=_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tthread.start()\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: List[Any]\t=\t\t\t\t\t\t\t''''''\r\n\t\t\t\t\t\t\t\t\t\t\tfor new_text in streamer:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tstreamer_text += new_text\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(_lowerCamelCase\t\t\t\t\t\t\t,\t_lowerCamelCase )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef _a ( self : int ):\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: List[str]\t=\t\t\t\t\t\t\tAutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: List[Any]\t=\t\t\t\t\t\t\tAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: List[str]\t=\t\t\t\t\t\t\t-1\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Any\t=\t\t\t\t\t\t\tids_tensor((1, 5)\t\t\t\t\t\t\t,\tvocab_size=model.config.vocab_size ).to(_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Tuple\t=\t\t\t\t\t\t\tmodel.generate(_lowerCamelCase\t\t\t\t\t\t\t,\tmax_new_tokens=10\t\t\t\t\t\t\t,\tdo_sample=_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Tuple\t=\t\t\t\t\t\t\tgreedy_ids[:, input_ids.shape[1] :]\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Tuple\t=\t\t\t\t\t\t\ttokenizer.decode(new_greedy_ids[0] )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\twith CaptureStdout() as cs:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: Any\t=\t\t\t\t\t\t\tTextStreamer(_lowerCamelCase\t\t\t\t\t\t\t,\tskip_prompt=_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmodel.generate(_lowerCamelCase\t\t\t\t\t\t\t,\tmax_new_tokens=10\t\t\t\t\t\t\t,\tdo_sample=_lowerCamelCase\t\t\t\t\t\t\t,\tstreamer=_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\t# The greedy text should be printed to stdout, except for the final \"\\n\" in the streamer\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Any\t=\t\t\t\t\t\t\tcs.out[:-1]\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(_lowerCamelCase\t\t\t\t\t\t\t,\t_lowerCamelCase )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef _a ( self : List[Any] ):\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: List[Any]\t=\t\t\t\t\t\t\tAutoTokenizer.from_pretrained('''distilgpt2''' )\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Tuple\t=\t\t\t\t\t\t\tAutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: List[Any]\t=\t\t\t\t\t\t\t-1\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Union[str, Any]\t=\t\t\t\t\t\t\ttorch.ones((1, 5)\t\t\t\t\t\t\t,\tdevice=_lowerCamelCase ).long() * model.config.bos_token_id\r\n\t\t\t\t\t\t\t\t\t\t\twith CaptureStdout() as cs:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: List[Any]\t=\t\t\t\t\t\t\tTextStreamer(_lowerCamelCase\t\t\t\t\t\t\t,\tskip_special_tokens=_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmodel.generate(_lowerCamelCase\t\t\t\t\t\t\t,\tmax_new_tokens=1\t\t\t\t\t\t\t,\tdo_sample=_lowerCamelCase\t\t\t\t\t\t\t,\tstreamer=_lowerCamelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t# The prompt contains a special token, so the streamer should not print it. As such, the output text, when\r\n\t\t\t\t\t\t\t\t\t\t\t# re-tokenized, must only contain one token\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: List[str]\t=\t\t\t\t\t\t\tcs.out[:-1] # Remove the final \"\\n\"\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: List[Any]\t=\t\t\t\t\t\t\ttokenizer(_lowerCamelCase\t\t\t\t\t\t\t,\treturn_tensors='''pt''' )\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(streamer_text_tokenized.input_ids.shape\t\t\t\t\t\t\t,\t(1, 1) )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef _a ( self : Union[str, Any] ):\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: str\t=\t\t\t\t\t\t\tAutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: str\t=\t\t\t\t\t\t\tAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Union[str, Any]\t=\t\t\t\t\t\t\t-1\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: Union[str, Any]\t=\t\t\t\t\t\t\tids_tensor((1, 5)\t\t\t\t\t\t\t,\tvocab_size=model.config.vocab_size ).to(_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: List[str]\t=\t\t\t\t\t\t\tTextIteratorStreamer(_lowerCamelCase\t\t\t\t\t\t\t,\ttimeout=0.0_01 )\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: str\t=\t\t\t\t\t\t\t{'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}\r\n\t\t\t\t\t\t\t\t\t\t\tA_\t: List[str]\t=\t\t\t\t\t\t\tThread(target=model.generate\t\t\t\t\t\t\t,\tkwargs=_lowerCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\tthread.start()\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t# The streamer will timeout after 0.001 seconds, so an exception will be raised\r\n\t\t\t\t\t\t\t\t\t\t\twith self.assertRaises(_lowerCamelCase ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA_\t: str\t=\t\t\t\t\t\t\t''''''\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor new_text in streamer:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tstreamer_text += new_text\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":4,"string":"4"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":770,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\ndef SCREAMING_SNAKE_CASE(\t\t\t\t\t__lowercase ) -> bool:\r\n A:\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t=\t\t\t\t\t\tn ** (1 / 3)\r\n return (val * val * val) == n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(perfect_cube(27))\r\n print(perfect_cube(4))\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":319,"string":"319"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\nfrom collections import OrderedDict\r\nfrom typing import Mapping\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...onnx import OnnxConfig\r\nfrom ...utils import logging\r\n\r\n\r\nUpperCamelCase \t\t\t= logging.get_logger(__name__)\r\n\r\nUpperCamelCase \t\t\t= {\r\n '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',\r\n '''YituTech/conv-bert-medium-small''': (\r\n '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''\r\n ),\r\n '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',\r\n # See all ConvBERT models at https://huggingface.co/models?filter=convbert\r\n}\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass \t\tlowerCAmelCase_ ( UpperCAmelCase_\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n UpperCamelCase_ :\tOptional[Any]\t\t\t\t =\t\t\t\t\t\t\t\"\"\"convbert\"\"\"\r\n\r\n\r\n\r\n def __init__( self\t\t\t\t\t\t\t:\t\t\t\tDict , SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t:\t\t\t\tDict=3_05_22 , SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t:\t\t\t\tint=7_68 , SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t:\t\t\t\tList[str]=12 , SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t:\t\t\t\tList[str]=12 , SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t:\t\t\t\tDict=30_72 , SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t:\t\t\t\tOptional[int]=\"gelu\" , SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t:\t\t\t\tList[Any]=0.1 , SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t:\t\t\t\tint=0.1 , SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t:\t\t\t\tUnion[str, Any]=5_12 , SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t:\t\t\t\tList[Any]=2 , SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t:\t\t\t\tList[str]=0.02 , SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t:\t\t\t\tint=1E-12 , SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t:\t\t\t\tUnion[str, Any]=1 , SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t:\t\t\t\tint=0 , SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t:\t\t\t\tstr=2 , SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t:\t\t\t\tList[Any]=7_68 , SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t:\t\t\t\tOptional[Any]=2 , SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t:\t\t\t\tAny=9 , SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t:\t\t\t\tTuple=1 , SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t:\t\t\t\tList[Any]=None , **SCREAMING_SNAKE_CASE_\t\t\t\t\t\t\t:\t\t\t\tList[str] , ) ->\t\t\t\t\t\tList[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n super().__init__(\r\n pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )\r\n\r\n A:\t\t\t\t\t\tDict\t\t\t\t\t\t=\t\t\t\t\t\tvocab_size\r\n A:\t\t\t\t\t\tTuple\t\t\t\t\t\t=\t\t\t\t\t\thidden_size\r\n A:\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t=\t\t\t\t\t\tnum_hidden_layers\r\n A:\t\t\t\t\t\tList[str]\t\t\t\t\t\t=\t\t\t\t\t\tnum_attention_heads\r\n A:\t\t\t\t\t\tint\t\t\t\t\t\t=\t\t\t\t\t\tintermediate_size\r\n A:\t\t\t\t\t\tint\t\t\t\t\t\t=\t\t\t\t\t\thidden_act\r\n A:\t\t\t\t\t\tList[str]\t\t\t\t\t\t=\t\t\t\t\t\thidden_dropout_prob\r\n A:\t\t\t\t\t\tint\t\t\t\t\t\t=\t\t\t\t\t\tattention_probs_dropout_prob\r\n A:\t\t\t\t\t\tTuple\t\t\t\t\t\t=\t\t\t\t\t\tmax_position_embeddings\r\n A:\t\t\t\t\t\tAny\t\t\t\t\t\t=\t\t\t\t\t\ttype_vocab_size\r\n A:\t\t\t\t\t\tstr\t\t\t\t\t\t=\t\t\t\t\t\tinitializer_range\r\n A:\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t=\t\t\t\t\t\tlayer_norm_eps\r\n A:\t\t\t\t\t\tstr\t\t\t\t\t\t=\t\t\t\t\t\tembedding_size\r\n A:\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t=\t\t\t\t\t\thead_ratio\r\n A:\t\t\t\t\t\tList[Any]\t\t\t\t\t\t=\t\t\t\t\t\tconv_kernel_size\r\n A:\t\t\t\t\t\tList[Any]\t\t\t\t\t\t=\t\t\t\t\t\tnum_groups\r\n A:\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t=\t\t\t\t\t\tclassifier_dropout\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass \t\tlowerCAmelCase_ ( UpperCAmelCase_\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n @property\r\n def _snake_case\t\t\t( self\t\t\t\t\t\t\t:\t\t\t\tOptional[Any] ) ->\t\t\t\t\t\tMapping[str, Mapping[int, str]]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n if self.task == \"multiple-choice\":\r\n A:\t\t\t\t\t\tTuple\t\t\t\t\t\t=\t\t\t\t\t\t{0: '''batch''', 1: '''choice''', 2: '''sequence'''}\r\n else:\r\n A:\t\t\t\t\t\tList[str]\t\t\t\t\t\t=\t\t\t\t\t\t{0: '''batch''', 1: '''sequence'''}\r\n return OrderedDict(\r\n [\r\n ('''input_ids''', dynamic_axis),\r\n ('''attention_mask''', dynamic_axis),\r\n ('''token_type_ids''', dynamic_axis),\r\n ] )\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":319,"string":"319"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":771,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\r'''simple docstring'''\r\r\r\r\r\r\r\rimport unittest\rfrom pathlib import Path\rfrom tempfile import TemporaryDirectory\r\rfrom transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available\rfrom transformers.models.gpta.tokenization_gpta import GPTaTokenizer\rfrom transformers.testing_utils import require_keras_nlp, require_tf, slow\r\r\rif is_tf_available():\r\t\t\t\t\timport tensorflow as tf\r\rif is_keras_nlp_available():\r\t\t\t\t\tfrom transformers.models.gpta import TFGPTaTokenizer\r\r\rlowerCAmelCase_ : str\t\t\t\t = ['gpt2']\rlowerCAmelCase_ : Any\t\t\t\t = 'gpt2'\r\rif is_tf_available():\r\t\t\t\t\tclass __SCREAMING_SNAKE_CASE (tf.Module\t\t\t\t):\r\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r\t\t\t\t\t\t\t\t\tdef __init__( self\t\t\t\t\t: Optional[int] , __a\t\t\t\t\t: Any ):\r\t\t\t\t\t\t\t\t\t\t\t\t\tsuper().__init__()\r\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\ttokenizer\r\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\tAutoConfig.from_pretrained(__a )\r\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\tTFGPTaLMHeadModel.from_config(__a )\r\r\r\r\r\t\t\t\t\t\t\t\t\t@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name=\"text\" ),) )\r\t\t\t\t\t\t\t\t\tdef UpperCamelCase__ ( self\t\t\t\t\t: Optional[Any] , __a\t\t\t\t\t: Union[str, Any] ):\r\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\tself.tokenizer(__a )\r\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\ttokenized[\"input_ids\"].to_tensor()\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\ttf.cast(input_ids_dense > 0 , tf.intaa )\r\t\t\t\t\t\t\t\t\t\t\t\t\t# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\tself.model(input_ids=__a , attention_mask=__a )[\"logits\"]\r\r\t\t\t\t\t\t\t\t\t\t\t\t\treturn outputs\r@require_tf\r@require_keras_nlp\rclass __SCREAMING_SNAKE_CASE (unittest.TestCase\t\t\t\t):\r\r\r\r\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r\t\t\t\tdef UpperCamelCase__ ( self\t\t\t\t\t: Union[str, Any] ):\r\t\t\t\t\t\t\t\tsuper().setUp()\r\r\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\t[GPTaTokenizer.from_pretrained(__a ) for checkpoint in (TOKENIZER_CHECKPOINTS)]\r\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\t[TFGPTaTokenizer.from_pretrained(__a ) for checkpoint in TOKENIZER_CHECKPOINTS]\r\t\t\t\t\t\t\t\tassert len(self.tokenizers ) == len(self.tf_tokenizers )\r\r\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\t[\r\t\t\t\t\t\t\t\t \"This is a straightforward English test sentence.\",\r\t\t\t\t\t\t\t\t \"This one has some weird characters\\rto\\nsee\\r\\nif those\\u00E9break things.\",\r\t\t\t\t\t\t\t\t \"Now we're going to add some Chinese: 一 二 三 一二三\",\r\t\t\t\t\t\t\t\t \"And some much more rare Chinese: 齉 堃 齉堃\",\r\t\t\t\t\t\t\t\t \"Je vais aussi écrire en français pour tester les accents\",\r\t\t\t\t\t\t\t\t \"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ\",\r\t\t\t\t\t\t\t\t]\r\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\tlist(zip(self.test_sentences , self.test_sentences[::-1] ) )\r\r\r\r\r\t\t\t\tdef UpperCamelCase__ ( self\t\t\t\t\t: List[str] ):\r\t\t\t\t\t\t\t\tfor tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):\r\t\t\t\t\t\t\t\t\t\t\t\tfor test_inputs in self.test_sentences:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\ttokenizer([test_inputs] , return_tensors=\"tf\" )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\ttf_tokenizer([test_inputs] )\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor key in python_outputs.keys():\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# convert them to numpy to avoid messing with ragged tensors\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\tpython_outputs[key].numpy()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\ttf_outputs[key].numpy()\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(tf.reduce_all(tf.cast(__a , tf.intaa ) == tf_outputs_values ) )\r\r\r\r\r\t\t\t\t@slow\r\t\t\t\tdef UpperCamelCase__ ( self\t\t\t\t\t: Dict ):\r\t\t\t\t\t\t\t\tfor tf_tokenizer in self.tf_tokenizers:\r\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\ttf.function(__a )\r\t\t\t\t\t\t\t\t\t\t\t\tfor test_inputs in self.test_sentences:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\ttf.constant(__a )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\tcompiled_tokenizer(__a )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\ttf_tokenizer(__a )\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor key in eager_outputs.keys():\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )\r\r\r\r\r\t\t\t\t@slow\r\t\t\t\tdef UpperCamelCase__ ( self\t\t\t\t\t: str ):\r\t\t\t\t\t\t\t\tfor tf_tokenizer in self.tf_tokenizers:\r\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\tModelToSave(tokenizer=__a )\r\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\ttf.convert_to_tensor([self.test_sentences[0]] )\r\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\tmodel.serving(__a ) # Build model with some sample inputs\r\t\t\t\t\t\t\t\t\t\t\t\twith TemporaryDirectory() as tempdir:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\tPath(__a ) / \"saved.model\"\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttf.saved_model.save(__a , __a , signatures={\"serving_default\": model.serving} )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\ttf.saved_model.load(__a )\r\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\tloaded_model.signatures[\"serving_default\"](__a )[\"output_0\"]\r\t\t\t\t\t\t\t\t\t\t\t\t# We may see small differences because the loaded model is compiled, so we need an epsilon for the test\r\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(tf.reduce_all(out == loaded_output ) )\r\r\r\r\r\t\t\t\t@slow\r\t\t\t\tdef UpperCamelCase__ ( self\t\t\t\t\t: List[str] ):\r\t\t\t\t\t\t\t\tfor tf_tokenizer in self.tf_tokenizers:\r\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\ttf.convert_to_tensor([self.test_sentences[0]] )\r\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\ttf_tokenizer(__a ) # Build model with some sample inputs\r\r\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\ttf_tokenizer.get_config()\r\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\tTFGPTaTokenizer.from_config(__a )\r\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\tmodel_from_config(__a )\r\r\t\t\t\t\t\t\t\t\t\t\t\tfor key in from_config_output.keys():\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )\r\r\r\r\r\t\t\t\t@slow\r\t\t\t\tdef UpperCamelCase__ ( self\t\t\t\t\t: Tuple ):\r\t\t\t\t\t\t\t\tfor tf_tokenizer in self.tf_tokenizers:\r\t\t\t\t\t\t\t\t\t\t\t\t# for the test to run\r\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\t12_31_23\r\r\t\t\t\t\t\t\t\t\t\t\t\tfor max_length in [3, 5, 10_24]:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\ttf.convert_to_tensor([self.test_sentences[0]] )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\ttf_tokenizer(__a , max_length=__a )\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\tout[\"input_ids\"].numpy().shape[1]\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tassert out_length == max_length\r"},"code_codestyle":{"kind":"number","value":346,"string":"346"},"style_context":{"kind":"string","value":"\r\r\r\r\r\r'''simple docstring'''\r\r\r\r\r\r\r\rdef \t\t\t_lowerCamelCase (\t\t\tlowercase\t\t\t\t\t\t:\t\t\t\tint = 6008_5147_5143\t\t\t\t\t\t) -> int:\r\r\t\t\t\ttry:\r\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\tint(lowercase\t\t\t\t\t\t)\r\t\t\t\texcept (TypeError, ValueError):\r\t\t\t\t\t\t\t\traise TypeError(\"Parameter n must be int or castable to int.\"\t\t\t\t\t\t)\r\t\t\t\tif n <= 0:\r\t\t\t\t\t\t\t\traise ValueError(\"Parameter n must be greater than or equal to one.\"\t\t\t\t\t\t)\r\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\t2\r\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\t0\r\t\t\t\tif n == 2:\r\t\t\t\t\t\t\t\treturn 2\r\t\t\t\twhile n > 2:\r\t\t\t\t\t\t\t\twhile n % i != 0:\r\t\t\t\t\t\t\t\t\t\t\t\ti += 1\r\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\ti\r\t\t\t\t\t\t\t\twhile n % i == 0:\r\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t =\t\t\tn // i\r\t\t\t\t\t\t\t\ti += 1\r\t\t\t\treturn int(lowercase\t\t\t\t\t\t)\r\r\rif __name__ == \"__main__\":\r\t\t\t\t\tprint(f\"\"\"{solution() = }\"\"\")\r"},"style_context_codestyle":{"kind":"number","value":346,"string":"346"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":772,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available\r\n\r\n\r\nUpperCamelCase_ =\t\t\t\t\t\t\t{\r\n \"configuration_pegasus_x\": [\"PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP\", \"PegasusXConfig\"],\r\n}\r\n\r\ntry:\r\n if not is_torch_available():\r\n raise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n pass\r\nelse:\r\n UpperCamelCase_ =\t\t\t\t\t\t\t[\r\n \"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST\",\r\n \"PegasusXForConditionalGeneration\",\r\n \"PegasusXModel\",\r\n \"PegasusXPreTrainedModel\",\r\n ]\r\n\r\n\r\nif TYPE_CHECKING:\r\n from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig\r\n\r\n try:\r\n if not is_torch_available():\r\n raise OptionalDependencyNotAvailable()\r\n except OptionalDependencyNotAvailable:\r\n pass\r\n else:\r\n from .modeling_pegasus_x import (\r\n PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n PegasusXForConditionalGeneration,\r\n PegasusXModel,\r\n PegasusXPreTrainedModel,\r\n )\r\n\r\n\r\nelse:\r\n import sys\r\n\r\n UpperCamelCase_ =\t\t\t\t\t\t\t_LazyModule(__name__, globals()[\"__file__\"], _import_structure, module_spec=__spec__)\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":251,"string":"251"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n# coding=utf-8\r\n# Copyright 2020 The HuggingFace Inc. team.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n# this script dumps information about the environment\r\n\r\nimport os\r\nimport sys\r\n\r\nimport transformers\r\n\r\n\r\n_lowercase\t\t: Dict\t\t\t\t\t=\"3\"\r\n\r\nprint(\"Python version:\", sys.version)\r\nprint(\"transformers version:\", transformers.__version__)\r\n\r\ntry:\r\n\t\timport torch\r\n\r\n\t\tprint(\"Torch version:\", torch.__version__)\r\n\t\tprint(\"Cuda available:\", torch.cuda.is_available())\r\n\t\tprint(\"Cuda version:\", torch.version.cuda)\r\n\t\tprint(\"CuDNN version:\", torch.backends.cudnn.version())\r\n\t\tprint(\"Number of GPUs available:\", torch.cuda.device_count())\r\n\t\tprint(\"NCCL version:\", torch.cuda.nccl.version())\r\nexcept ImportError:\r\n\t\tprint(\"Torch version:\", None)\r\n\r\ntry:\r\n\t\timport deepspeed\r\n\r\n\t\tprint(\"DeepSpeed version:\", deepspeed.__version__)\r\nexcept ImportError:\r\n\t\tprint(\"DeepSpeed version:\", None)\r\n\r\ntry:\r\n\t\timport tensorflow as tf\r\n\r\n\t\tprint(\"TensorFlow version:\", tf.__version__)\r\n\t\tprint(\"TF GPUs available:\", bool(tf.config.list_physical_devices(\"GPU\")))\r\n\t\tprint(\"Number of TF GPUs available:\", len(tf.config.list_physical_devices(\"GPU\")))\r\nexcept ImportError:\r\n\t\tprint(\"TensorFlow version:\", None)\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":170,"string":"170"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":773,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\nfrom math import pi, sqrt\n\n\n\n\ndef \tlowerCAmelCase_\t(\t\t\t\t\t\t\tUpperCamelCase_\t\t\t\t\t) ->\t\t\t\t\t\tfloat:\n if num <= 0:\n raise ValueError(\"math domain error\"\t\t\t\t\t)\n if num > 1_71.5:\n raise OverflowError(\"math range error\"\t\t\t\t\t)\n elif num - int(UpperCamelCase_\t\t\t\t\t) not in (0, 0.5):\n raise NotImplementedError(\"num must be an integer or a half-integer\"\t\t\t\t\t)\n elif num == 0.5:\n return sqrt(UpperCamelCase_\t\t\t\t\t)\n else:\n return 1.0 if num == 1 else (num - 1) * gamma(num - 1\t\t\t\t\t)\n\n\n\n\ndef \tlowerCAmelCase_\t(\t\t\t\t\t\t\t) ->\t\t\t\t\t\tNone:\n assert gamma(0.5\t\t\t\t\t) == sqrt(UpperCamelCase_\t\t\t\t\t)\n assert gamma(1\t\t\t\t\t) == 1.0\n assert gamma(2\t\t\t\t\t) == 1.0\n\n\nif __name__ == \"__main__\":\n from doctest import testmod\n\n testmod()\n _UpperCAmelCase \t=\t\t\t1.0\n while num:\n _UpperCAmelCase \t=\t\t\tfloat(input('Gamma of: '))\n print(f'''gamma({num}) = {gamma(num)}''')\n print('\\nEnter 0 to exit...')\n\n"},"code_codestyle":{"kind":"number","value":328,"string":"328"},"style_context":{"kind":"string","value":"\n\n\n\n\n\nimport math\n\n\n\n\ndef \tlowerCAmelCase_\t(\t\t\t\t\t\t\tUpperCamelCase_ ,\t\t\t\t\t\tUpperCamelCase_\t\t\t\t\t) ->\t\t\t\t\t\tList[str]:\n if 0 not in (x, y):\n # We use the relation x^y = y*log10(x), where 10 is the base.\n return y * math.logaa(UpperCamelCase_\t\t\t\t\t)\n else:\n if x == 0: # 0 raised to any number is 0\n return 0\n elif y == 0:\n return 1 # any number raised to 0 is 1\n raise AssertionError(\"This should never happen\"\t\t\t\t\t)\n\n\nif __name__ == \"__main__\": # Main function\n # Read two numbers from input and typecast them to int using map function.\n # Here x is the base and y is the power.\n _UpperCAmelCase \t=\t\t\t'Enter the base and the power separated by a comma: '\n _UpperCAmelCase\t, _UpperCAmelCase \t=\t\t\tmap(int, input(prompt).split(','))\n _UpperCAmelCase\t, _UpperCAmelCase \t=\t\t\tmap(int, input(prompt).split(','))\n\n # We find the log of each number, using the function res(), which takes two\n # arguments.\n _UpperCAmelCase \t=\t\t\tres(xa, ya)\n _UpperCAmelCase \t=\t\t\tres(xa, ya)\n\n # We check for the largest number\n if resa > resa:\n print('Largest number is', xa, '^', ya)\n elif resa > resa:\n print('Largest number is', xa, '^', ya)\n else:\n print('Both are equal')\n\n"},"style_context_codestyle":{"kind":"number","value":328,"string":"328"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":774,"cells":{"code":{"kind":"string","value":"\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\rfrom __future__ import annotations\r\r__magic_name__ =\t\t\t[-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]\r__magic_name__ =\t\t\t[-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]\r\r\r\rdef \t\t\t\t\t\t_lowerCAmelCase\t\t\t\t(\t\t\t\tUpperCamelCase_\t\t\t\t):\r\t\t__SCREAMING_SNAKE_CASE = []\r\t\t__SCREAMING_SNAKE_CASE = len(UpperCamelCase_\t\t\t\t)\r\r\t\tfor i in range(UpperCamelCase_\t\t\t\t):\r\t\t\t\t__SCREAMING_SNAKE_CASE = -1\r\t\t\t\tfor j in range(i + 1 ,\t\t\t\tUpperCamelCase_\t\t\t\t):\r\t\t\t\t\t\tif arr[i] < arr[j]:\r\t\t\t\t\t\t\t\t__SCREAMING_SNAKE_CASE = arr[j]\r\t\t\t\t\t\t\t\tbreak\r\t\t\t\tresult.append(UpperCamelCase_\t\t\t\t)\r\t\treturn result\r\r\r\rdef \t\t\t\t\t\t_lowerCAmelCase\t\t\t\t(\t\t\t\tUpperCamelCase_\t\t\t\t):\r\t\t__SCREAMING_SNAKE_CASE = []\r\t\tfor i, outer in enumerate(UpperCamelCase_\t\t\t\t):\r\t\t\t\t__SCREAMING_SNAKE_CASE = -1\r\t\t\t\tfor inner in arr[i + 1 :]:\r\t\t\t\t\t\tif outer < inner:\r\t\t\t\t\t\t\t\t__SCREAMING_SNAKE_CASE = inner\r\t\t\t\t\t\t\t\tbreak\r\t\t\t\tresult.append(UpperCamelCase_\t\t\t\t)\r\t\treturn result\r\r\r\rdef \t\t\t\t\t\t_lowerCAmelCase\t\t\t\t(\t\t\t\tUpperCamelCase_\t\t\t\t):\r\t\t__SCREAMING_SNAKE_CASE = len(UpperCamelCase_\t\t\t\t)\r\t\t__SCREAMING_SNAKE_CASE = []\r\t\t__SCREAMING_SNAKE_CASE = [-1] * arr_size\r\r\t\tfor index in reversed(range(UpperCamelCase_\t\t\t\t)\t\t\t\t):\r\t\t\t\tif stack:\r\t\t\t\t\t\twhile stack[-1] <= arr[index]:\r\t\t\t\t\t\t\t\tstack.pop()\r\t\t\t\t\t\t\t\tif not stack:\r\t\t\t\t\t\t\t\t\t\tbreak\r\t\t\t\tif stack:\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE = stack[-1]\r\t\t\t\tstack.append(arr[index]\t\t\t\t)\r\t\treturn result\r\r\rif __name__ == \"__main__\":\r\t\t\t\t\t\t\tfrom doctest import testmod\r\t\t\t\t\t\t\tfrom timeit import timeit\r\r\t\t\t\t\t\t\ttestmod()\r\t\t\t\t\t\t\tprint(next_greatest_element_slow(arr))\r\t\t\t\t\t\t\tprint(next_greatest_element_fast(arr))\r\t\t\t\t\t\t\tprint(next_greatest_element(arr))\r\r\t\t\t\t\t\t\t__magic_name__ =\t\t\t(\r\t\t\t\t\t\t\t \"from __main__ import arr, next_greatest_element_slow, \"\r\t\t\t\t\t\t\t \"next_greatest_element_fast, next_greatest_element\"\r\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\tprint(\r\t\t\t\t\t\t\t \"next_greatest_element_slow():\",\r\t\t\t\t\t\t\t timeit(\"next_greatest_element_slow(arr)\", setup=setup),\r\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\tprint(\r\t\t\t\t\t\t\t \"next_greatest_element_fast():\",\r\t\t\t\t\t\t\t timeit(\"next_greatest_element_fast(arr)\", setup=setup),\r\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\tprint(\r\t\t\t\t\t\t\t \" next_greatest_element():\",\r\t\t\t\t\t\t\t timeit(\"next_greatest_element(arr)\", setup=setup),\r\t\t\t\t\t\t\t)\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":100,"string":"100"},"style_context":{"kind":"string","value":"\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\rimport copy\rimport os\rfrom typing import Union\r\rfrom ...configuration_utils import PretrainedConfig\rfrom ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES\rfrom ...utils import logging\rfrom ..auto import CONFIG_MAPPING\r\r\r__magic_name__ =\t\t\tlogging.get_logger(__name__)\r\r__magic_name__ =\t\t\t{\r \"salesforce/blip2-opt-2.7b\": \"https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json\",\r}\rclass \t\t\t\t\t\tSCREAMING_SNAKE_CASE_ (\t\t\t\t\t\t__a ):\r\r\r\r\r\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t__lowercase\t\t\t\t\t\t\t: Optional[Any]\t\t\t\t\t\t =\t\t'''blip_2_vision_model'''\r\r\r\r\r\t\tdef __init__( self\t\t\t\t\t\t\t, lowerCAmelCase__=1_4_0_8\t\t\t\t\t\t\t, lowerCAmelCase__=6_1_4_4\t\t\t\t\t\t\t, lowerCAmelCase__=3_9\t\t\t\t\t\t\t, lowerCAmelCase__=1_6\t\t\t\t\t\t\t, lowerCAmelCase__=2_2_4\t\t\t\t\t\t\t, lowerCAmelCase__=1_4\t\t\t\t\t\t\t, lowerCAmelCase__=\"gelu\"\t\t\t\t\t\t\t, lowerCAmelCase__=0.0_00_01\t\t\t\t\t\t\t, lowerCAmelCase__=0.0\t\t\t\t\t\t\t, lowerCAmelCase__=1E-10\t\t\t\t\t\t\t, lowerCAmelCase__=True\t\t\t\t\t\t\t, **lowerCAmelCase__\t\t\t\t\t\t\t, ):\r\t\t\t\tsuper().__init__(**lowerCAmelCase__)\r\r\t\t\t\t__SCREAMING_SNAKE_CASE = hidden_size\r\t\t\t\t__SCREAMING_SNAKE_CASE = intermediate_size\r\t\t\t\t__SCREAMING_SNAKE_CASE = num_hidden_layers\r\t\t\t\t__SCREAMING_SNAKE_CASE = num_attention_heads\r\t\t\t\t__SCREAMING_SNAKE_CASE = patch_size\r\t\t\t\t__SCREAMING_SNAKE_CASE = image_size\r\t\t\t\t__SCREAMING_SNAKE_CASE = initializer_range\r\t\t\t\t__SCREAMING_SNAKE_CASE = attention_dropout\r\t\t\t\t__SCREAMING_SNAKE_CASE = layer_norm_eps\r\t\t\t\t__SCREAMING_SNAKE_CASE = hidden_act\r\t\t\t\t__SCREAMING_SNAKE_CASE = qkv_bias\r\r\r\r\r\t\t@classmethod\r\t\tdef \tsnake_case_ ( cls\t\t\t\t\t\t\t, lowerCAmelCase__\t\t\t\t\t\t\t, **lowerCAmelCase__):\r\t\t\t\tcls._set_token_in_kwargs(lowerCAmelCase__)\r\r\t\t\t\t__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCAmelCase__\t\t\t\t\t\t\t, **lowerCAmelCase__)\r\r\t\t\t\t# get the vision config dict if we are loading from Blip2Config\r\t\t\t\tif config_dict.get(\"\"\"model_type\"\"\") == \"blip-2\":\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE = config_dict[\"\"\"vision_config\"\"\"]\r\r\t\t\t\tif \"model_type\" in config_dict and hasattr(cls\t\t\t\t\t\t\t, \"\"\"model_type\"\"\") and config_dict[\"model_type\"] != cls.model_type:\r\t\t\t\t\t\tlogger.warning(\r\t\t\t\t\t\t f\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\r\t\t\t\t\t\t f\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\")\r\r\t\t\t\treturn cls.from_dict(lowerCAmelCase__\t\t\t\t\t\t\t, **lowerCAmelCase__)\r\r\r\rclass \t\t\t\t\t\tSCREAMING_SNAKE_CASE_ (\t\t\t\t\t\t__a ):\r\r\r\r\r\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t__lowercase\t\t\t\t\t\t\t: Tuple\t\t\t\t\t\t =\t\t'''blip_2_qformer'''\r\r\r\r\r\t\tdef __init__( self\t\t\t\t\t\t\t, lowerCAmelCase__=3_0_5_2_2\t\t\t\t\t\t\t, lowerCAmelCase__=7_6_8\t\t\t\t\t\t\t, lowerCAmelCase__=1_2\t\t\t\t\t\t\t, lowerCAmelCase__=1_2\t\t\t\t\t\t\t, lowerCAmelCase__=3_0_7_2\t\t\t\t\t\t\t, lowerCAmelCase__=\"gelu\"\t\t\t\t\t\t\t, lowerCAmelCase__=0.1\t\t\t\t\t\t\t, lowerCAmelCase__=0.1\t\t\t\t\t\t\t, lowerCAmelCase__=5_1_2\t\t\t\t\t\t\t, lowerCAmelCase__=0.02\t\t\t\t\t\t\t, lowerCAmelCase__=1E-12\t\t\t\t\t\t\t, lowerCAmelCase__=0\t\t\t\t\t\t\t, lowerCAmelCase__=\"absolute\"\t\t\t\t\t\t\t, lowerCAmelCase__=2\t\t\t\t\t\t\t, lowerCAmelCase__=1_4_0_8\t\t\t\t\t\t\t, **lowerCAmelCase__\t\t\t\t\t\t\t, ):\r\t\t\t\tsuper().__init__(pad_token_id=lowerCAmelCase__\t\t\t\t\t\t\t, **lowerCAmelCase__)\r\r\t\t\t\t__SCREAMING_SNAKE_CASE = vocab_size\r\t\t\t\t__SCREAMING_SNAKE_CASE = hidden_size\r\t\t\t\t__SCREAMING_SNAKE_CASE = num_hidden_layers\r\t\t\t\t__SCREAMING_SNAKE_CASE = num_attention_heads\r\t\t\t\t__SCREAMING_SNAKE_CASE = hidden_act\r\t\t\t\t__SCREAMING_SNAKE_CASE = intermediate_size\r\t\t\t\t__SCREAMING_SNAKE_CASE = hidden_dropout_prob\r\t\t\t\t__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob\r\t\t\t\t__SCREAMING_SNAKE_CASE = max_position_embeddings\r\t\t\t\t__SCREAMING_SNAKE_CASE = initializer_range\r\t\t\t\t__SCREAMING_SNAKE_CASE = layer_norm_eps\r\t\t\t\t__SCREAMING_SNAKE_CASE = position_embedding_type\r\t\t\t\t__SCREAMING_SNAKE_CASE = cross_attention_frequency\r\t\t\t\t__SCREAMING_SNAKE_CASE = encoder_hidden_size\r\r\r\r\r\t\t@classmethod\r\t\tdef \tsnake_case_ ( cls\t\t\t\t\t\t\t, lowerCAmelCase__\t\t\t\t\t\t\t, **lowerCAmelCase__):\r\t\t\t\tcls._set_token_in_kwargs(lowerCAmelCase__)\r\r\t\t\t\t__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCAmelCase__\t\t\t\t\t\t\t, **lowerCAmelCase__)\r\r\t\t\t\t# get the qformer config dict if we are loading from Blip2Config\r\t\t\t\tif config_dict.get(\"\"\"model_type\"\"\") == \"blip-2\":\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE = config_dict[\"\"\"qformer_config\"\"\"]\r\r\t\t\t\tif \"model_type\" in config_dict and hasattr(cls\t\t\t\t\t\t\t, \"\"\"model_type\"\"\") and config_dict[\"model_type\"] != cls.model_type:\r\t\t\t\t\t\tlogger.warning(\r\t\t\t\t\t\t f\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\r\t\t\t\t\t\t f\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\")\r\r\t\t\t\treturn cls.from_dict(lowerCAmelCase__\t\t\t\t\t\t\t, **lowerCAmelCase__)\r\r\r\rclass \t\t\t\t\t\tSCREAMING_SNAKE_CASE_ (\t\t\t\t\t\t__a ):\r\r\r\r\r\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t__lowercase\t\t\t\t\t\t\t: Optional[Any]\t\t\t\t\t\t =\t\t'''blip-2'''\r\t\t__lowercase\t\t\t\t\t\t\t: Any\t\t\t\t\t\t =\t\tTrue\r\r\r\r\r\t\tdef __init__( self\t\t\t\t\t\t\t, lowerCAmelCase__=None\t\t\t\t\t\t\t, lowerCAmelCase__=None\t\t\t\t\t\t\t, lowerCAmelCase__=None\t\t\t\t\t\t\t, lowerCAmelCase__=3_2\t\t\t\t\t\t\t, **lowerCAmelCase__):\r\t\t\t\tsuper().__init__(**lowerCAmelCase__)\r\r\t\t\t\tif vision_config is None:\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE = {}\r\t\t\t\t\t\tlogger.info(\"\"\"vision_config is None. initializing the Blip2VisionConfig with default values.\"\"\")\r\r\t\t\t\tif qformer_config is None:\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE = {}\r\t\t\t\t\t\tlogger.info(\"\"\"qformer_config is None. Initializing the Blip2QFormerConfig with default values.\"\"\")\r\r\t\t\t\tif text_config is None:\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE = {}\r\t\t\t\t\t\tlogger.info(\"\"\"text_config is None. Initializing the text config with default values (`OPTConfig`).\"\"\")\r\r\t\t\t\t__SCREAMING_SNAKE_CASE = BlipaVisionConfig(**lowerCAmelCase__)\r\t\t\t\t__SCREAMING_SNAKE_CASE = BlipaQFormerConfig(**lowerCAmelCase__)\r\t\t\t\t__SCREAMING_SNAKE_CASE = text_config[\"\"\"model_type\"\"\"] if \"\"\"model_type\"\"\" in text_config else \"\"\"opt\"\"\"\r\t\t\t\t__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[text_model_type](**lowerCAmelCase__)\r\r\t\t\t\t__SCREAMING_SNAKE_CASE = self.text_config.tie_word_embeddings\r\t\t\t\t__SCREAMING_SNAKE_CASE = self.text_config.is_encoder_decoder\r\r\t\t\t\t__SCREAMING_SNAKE_CASE = num_query_tokens\r\t\t\t\t__SCREAMING_SNAKE_CASE = self.vision_config.hidden_size\r\t\t\t\t__SCREAMING_SNAKE_CASE = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES\r\t\t\t\t__SCREAMING_SNAKE_CASE = 1.0\r\t\t\t\t__SCREAMING_SNAKE_CASE = 0.02\r\r\r\r\r\t\t@classmethod\r\t\tdef \tsnake_case_ ( cls\t\t\t\t\t\t\t, lowerCAmelCase__\t\t\t\t\t\t\t, lowerCAmelCase__\t\t\t\t\t\t\t, lowerCAmelCase__\t\t\t\t\t\t\t, **lowerCAmelCase__\t\t\t\t\t\t\t, ):\r\t\t\t\treturn cls(\r\t\t\t\t vision_config=vision_config.to_dict()\t\t\t\t\t\t\t, qformer_config=qformer_config.to_dict()\t\t\t\t\t\t\t, text_config=text_config.to_dict()\t\t\t\t\t\t\t, **lowerCAmelCase__\t\t\t\t\t\t\t, )\r\r\r\r\r\t\tdef \tsnake_case_ ( self):\r\t\t\t\t__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__)\r\t\t\t\t__SCREAMING_SNAKE_CASE = self.vision_config.to_dict()\r\t\t\t\t__SCREAMING_SNAKE_CASE = self.qformer_config.to_dict()\r\t\t\t\t__SCREAMING_SNAKE_CASE = self.text_config.to_dict()\r\t\t\t\t__SCREAMING_SNAKE_CASE = self.__class__.model_type\r\t\t\t\treturn output\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":100,"string":"100"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":775,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\"\"\"simple docstring\"\"\"\n\n\n\nimport unittest\n\nfrom transformers import SPIECE_UNDERLINE\nfrom transformers.models.speechta import SpeechTaTokenizer\nfrom transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow\nfrom transformers.tokenization_utils import AddedToken\n\nfrom ...test_tokenization_common import TokenizerTesterMixin\n\n\nSCREAMING_SNAKE_CASE =\tget_tests_dir(\"fixtures/test_sentencepiece_bpe_char.model\")\n\n\n\n\n@require_sentencepiece\n@require_tokenizers\nclass UpperCAmelCase_ (\t\t\t\t\t\tA_, unittest.TestCase\t\t\t\t\t\t\t):\n lowercase__\t\t\t\t\t\t =\t\t\t\tSpeechTaTokenizer\n lowercase__\t\t\t\t\t\t =\t\t\t\tFalse\n lowercase__\t\t\t\t\t\t =\t\t\t\tTrue\n\n\n\n\n def \t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t( self\t\t\t\t\t\t:\tDict\t\t\t\t)\t\t\t\t->\t\t\t\t\t\t\tOptional[int]:\n\n\n\n\n '''simple docstring'''\n\n super().setUp()\n\n # We have a SentencePiece fixture for testing\n A__\t= SpeechTaTokenizer(snake_case_\t\t\t\t)\n\n A__\t= AddedToken(\"\" ,\t\t\tlstrip=snake_case_ ,\t\t\trstrip=snake_case_\t\t\t\t)\n A__\t= mask_token\n tokenizer.add_special_tokens({\"mask_token\": mask_token}\t\t\t\t)\n tokenizer.add_tokens([\"\"]\t\t\t\t)\n\n tokenizer.save_pretrained(self.tmpdirname\t\t\t\t)\n\n\n\n\n def \t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t( self\t\t\t\t\t\t:\tOptional[Any] ,\t\t\tsnake_case_\t\t\t\t\t\t:\tList[str]\t\t\t\t)\t\t\t\t->\t\t\t\t\t\t\tTuple:\n\n\n\n\n '''simple docstring'''\n\n A__\t= \"this is a test\"\n A__\t= \"this is a test\"\n return input_text, output_text\n\n\n\n\n def \t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t( self\t\t\t\t\t\t:\tDict ,\t\t\tsnake_case_\t\t\t\t\t\t:\tDict ,\t\t\tsnake_case_\t\t\t\t\t\t:\tAny=False ,\t\t\tsnake_case_\t\t\t\t\t\t:\tint=20 ,\t\t\tsnake_case_\t\t\t\t\t\t:\tDict=5\t\t\t\t)\t\t\t\t->\t\t\t\t\t\t\tOptional[int]:\n\n\n\n\n '''simple docstring'''\n\n A__,\t\t\t\tA__\t= self.get_input_output_texts(snake_case_\t\t\t\t)\n A__\t= tokenizer.encode(snake_case_ ,\t\t\tadd_special_tokens=snake_case_\t\t\t\t)\n A__\t= tokenizer.decode(snake_case_ ,\t\t\tclean_up_tokenization_spaces=snake_case_\t\t\t\t)\n return text, ids\n\n\n\n\n def \t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t( self\t\t\t\t\t\t:\tDict\t\t\t\t)\t\t\t\t->\t\t\t\t\t\t\tList[str]:\n\n\n\n\n '''simple docstring'''\n\n A__\t= \"\"\n A__\t= 1\n\n self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_\t\t\t\t) ,\t\t\tsnake_case_\t\t\t\t)\n self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_\t\t\t\t) ,\t\t\tsnake_case_\t\t\t\t)\n\n\n\n\n def \t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t( self\t\t\t\t\t\t:\tDict\t\t\t\t)\t\t\t\t->\t\t\t\t\t\t\tDict:\n\n\n\n\n '''simple docstring'''\n\n A__\t= list(self.get_tokenizer().get_vocab().keys()\t\t\t\t)\n\n self.assertEqual(vocab_keys[0] ,\t\t\t\"\"\t\t\t\t)\n self.assertEqual(vocab_keys[1] ,\t\t\t\"\"\t\t\t\t)\n self.assertEqual(vocab_keys[-4] ,\t\t\t\"œ\"\t\t\t\t)\n self.assertEqual(vocab_keys[-2] ,\t\t\t\"\"\t\t\t\t)\n self.assertEqual(vocab_keys[-1] ,\t\t\t\"\"\t\t\t\t)\n self.assertEqual(len(snake_case_\t\t\t\t) ,\t\t\t81\t\t\t\t)\n\n\n\n\n def \t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t( self\t\t\t\t\t\t:\tList[str]\t\t\t\t)\t\t\t\t->\t\t\t\t\t\t\tDict:\n\n\n\n\n '''simple docstring'''\n\n self.assertEqual(self.get_tokenizer().vocab_size ,\t\t\t79\t\t\t\t)\n\n\n\n\n def \t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t( self\t\t\t\t\t\t:\tList[str]\t\t\t\t)\t\t\t\t->\t\t\t\t\t\t\tAny:\n\n\n\n\n '''simple docstring'''\n\n A__\t= self.get_tokenizers(do_lower_case=snake_case_\t\t\t\t)\n for tokenizer in tokenizers:\n with self.subTest(F\"\"\"{tokenizer.__class__.__name__}\"\"\"\t\t\t\t):\n A__\t= tokenizer.vocab_size\n A__\t= len(snake_case_\t\t\t\t)\n\n self.assertNotEqual(snake_case_ ,\t\t\t0\t\t\t\t)\n\n # We usually have added tokens from the start in tests because our vocab fixtures are\n # smaller than the original vocabs - let's not assert this\n # self.assertEqual(vocab_size, all_size)\n\n A__\t= [\"aaaaa bbbbbb\", \"cccccccccdddddddd\"]\n A__\t= tokenizer.add_tokens(snake_case_\t\t\t\t)\n A__\t= tokenizer.vocab_size\n A__\t= len(snake_case_\t\t\t\t)\n\n self.assertNotEqual(snake_case_ ,\t\t\t0\t\t\t\t)\n self.assertEqual(snake_case_ ,\t\t\tsnake_case_\t\t\t\t)\n self.assertEqual(snake_case_ ,\t\t\tlen(snake_case_\t\t\t\t)\t\t\t\t)\n self.assertEqual(snake_case_ ,\t\t\tall_size + len(snake_case_\t\t\t\t)\t\t\t\t)\n\n A__\t= tokenizer.encode(\"aaaaa bbbbbb low cccccccccdddddddd l\" ,\t\t\tadd_special_tokens=snake_case_\t\t\t\t)\n\n self.assertGreaterEqual(len(snake_case_\t\t\t\t) ,\t\t\t4\t\t\t\t)\n self.assertGreater(tokens[0] ,\t\t\ttokenizer.vocab_size - 1\t\t\t\t)\n self.assertGreater(tokens[-3] ,\t\t\ttokenizer.vocab_size - 1\t\t\t\t)\n\n A__\t= {\"eos_token\": \">>>>|||<||<<|<<\", \"pad_token\": \"<<<<<|||>|>>>>|>\"}\n A__\t= tokenizer.add_special_tokens(snake_case_\t\t\t\t)\n A__\t= tokenizer.vocab_size\n A__\t= len(snake_case_\t\t\t\t)\n\n self.assertNotEqual(snake_case_ ,\t\t\t0\t\t\t\t)\n self.assertEqual(snake_case_ ,\t\t\tsnake_case_\t\t\t\t)\n self.assertEqual(snake_case_ ,\t\t\tlen(snake_case_\t\t\t\t)\t\t\t\t)\n self.assertEqual(snake_case_ ,\t\t\tall_size_a + len(snake_case_\t\t\t\t)\t\t\t\t)\n\n A__\t= tokenizer.encode(\n \">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l\" ,\t\t\tadd_special_tokens=snake_case_\t\t\t\t)\n\n self.assertGreaterEqual(len(snake_case_\t\t\t\t) ,\t\t\t6\t\t\t\t)\n self.assertGreater(tokens[0] ,\t\t\ttokenizer.vocab_size - 1\t\t\t\t)\n self.assertGreater(tokens[0] ,\t\t\ttokens[1]\t\t\t\t)\n self.assertGreater(tokens[-3] ,\t\t\ttokenizer.vocab_size - 1\t\t\t\t)\n self.assertGreater(tokens[-3] ,\t\t\ttokens[-4]\t\t\t\t)\n self.assertEqual(tokens[0] ,\t\t\ttokenizer.eos_token_id\t\t\t\t)\n self.assertEqual(tokens[-3] ,\t\t\ttokenizer.pad_token_id\t\t\t\t)\n\n\n\n\n def \t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t( self\t\t\t\t\t\t:\tTuple\t\t\t\t)\t\t\t\t->\t\t\t\t\t\t\tint:\n\n\n\n\n '''simple docstring'''\n\n pass\n\n\n\n\n def \t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t( self\t\t\t\t\t\t:\tint\t\t\t\t)\t\t\t\t->\t\t\t\t\t\t\tUnion[str, Any]:\n\n\n\n\n '''simple docstring'''\n\n pass\n\n\n\n\n def \t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t( self\t\t\t\t\t\t:\tOptional[Any]\t\t\t\t)\t\t\t\t->\t\t\t\t\t\t\tOptional[Any]:\n\n\n\n\n '''simple docstring'''\n\n A__\t= self.get_tokenizer()\n\n A__\t= tokenizer.tokenize(\"This is a test\"\t\t\t\t)\n # fmt: off\n self.assertListEqual(snake_case_ ,\t\t\t[SPIECE_UNDERLINE, \"T\", \"h\", \"i\", \"s\", SPIECE_UNDERLINE, \"i\", \"s\", SPIECE_UNDERLINE, \"a\", SPIECE_UNDERLINE, \"t\", \"e\", \"s\", \"t\"]\t\t\t\t)\n # fmt: on\n\n self.assertListEqual(\n tokenizer.convert_tokens_to_ids(snake_case_\t\t\t\t) ,\t\t\t[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,\t\t\t)\n\n A__\t= tokenizer.tokenize(\"I was born in 92000, and this is falsé.\"\t\t\t\t)\n self.assertListEqual(\n snake_case_ ,\t\t\t[SPIECE_UNDERLINE, \"I\", SPIECE_UNDERLINE, \"w\", \"a\", \"s\", SPIECE_UNDERLINE, \"b\", \"o\", \"r\", \"n\", SPIECE_UNDERLINE, \"i\", \"n\", SPIECE_UNDERLINE, \"92000\", \",\", SPIECE_UNDERLINE, \"a\", \"n\", \"d\", SPIECE_UNDERLINE, \"t\", \"h\", \"i\", \"s\", SPIECE_UNDERLINE, \"i\", \"s\", SPIECE_UNDERLINE, \"f\", \"a\", \"l\", \"s\", \"é\", \".\"]\t\t\t\t)\n\n A__\t= tokenizer.convert_tokens_to_ids(snake_case_\t\t\t\t)\n # fmt: off\n self.assertListEqual(snake_case_ ,\t\t\t[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26]\t\t\t\t)\n # fmt: on\n\n A__\t= tokenizer.convert_ids_to_tokens(snake_case_\t\t\t\t)\n self.assertListEqual(\n snake_case_ ,\t\t\t[SPIECE_UNDERLINE, \"I\", SPIECE_UNDERLINE, \"w\", \"a\", \"s\", SPIECE_UNDERLINE, \"b\", \"o\", \"r\", \"n\", SPIECE_UNDERLINE, \"i\", \"n\", SPIECE_UNDERLINE, \"\", \",\", SPIECE_UNDERLINE, \"a\", \"n\", \"d\", SPIECE_UNDERLINE, \"t\", \"h\", \"i\", \"s\", SPIECE_UNDERLINE, \"i\", \"s\", SPIECE_UNDERLINE, \"f\", \"a\", \"l\", \"s\", \"é\", \".\"]\t\t\t\t)\n\n\n\n\n @slow\n def \t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t( self\t\t\t\t\t\t:\tUnion[str, Any]\t\t\t\t)\t\t\t\t->\t\t\t\t\t\t\tstr:\n\n\n\n\n '''simple docstring'''\n\n A__\t= [\n \"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides \"\n \"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural \"\n \"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained \"\n \"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.\",\n \"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly \"\n \"conditioning on both left and right context in all layers.\",\n \"The quick brown fox jumps over the lazy dog.\",\n ]\n\n # fmt: off\n A__\t= {\n \"input_ids\": [\n [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],\n [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n ],\n \"attention_mask\": [\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ]\n }\n # fmt: on\n\n self.tokenizer_integration_test_util(\n expected_encoding=snake_case_ ,\t\t\tmodel_name=\"microsoft/speecht5_asr\" ,\t\t\trevision=\"c5ef64c71905caeccde0e4462ef3f9077224c524\" ,\t\t\tsequences=snake_case_ ,\t\t\t)\n\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":230,"string":"230"},"style_context":{"kind":"string","value":"\n\n\n\n\n\"\"\"simple docstring\"\"\"\n\n\n\nfrom ...processing_utils import ProcessorMixin\nfrom ...tokenization_utils_base import BatchEncoding\n\n\n\n\nclass UpperCAmelCase_ (\t\t\t\t\t\tA_\t\t\t\t\t\t\t):\n lowercase__\t\t\t\t\t\t =\t\t\t\t['''image_processor''', '''tokenizer''']\n lowercase__\t\t\t\t\t\t =\t\t\t\t'''AutoImageProcessor'''\n lowercase__\t\t\t\t\t\t =\t\t\t\t'''AutoTokenizer'''\n\n\n\n\n def __init__( self\t\t\t\t\t\t:\tstr ,\t\t\tsnake_case_\t\t\t\t\t\t:\tDict ,\t\t\tsnake_case_\t\t\t\t\t\t:\tList[str]\t\t\t\t)\t\t\t\t->\t\t\t\t\t\t\tstr:\n\n\n\n\n '''simple docstring'''\n\n super().__init__(snake_case_ ,\t\t\tsnake_case_\t\t\t\t)\n A__\t= self.image_processor\n\n\n\n\n def __call__( self\t\t\t\t\t\t:\tint ,\t\t\tsnake_case_\t\t\t\t\t\t:\tAny=None ,\t\t\tsnake_case_\t\t\t\t\t\t:\tAny=None ,\t\t\tsnake_case_\t\t\t\t\t\t:\tUnion[str, Any]=None ,\t\t\t**snake_case_\t\t\t\t\t\t:\tOptional[int]\t\t\t\t)\t\t\t\t->\t\t\t\t\t\t\tOptional[int]:\n\n\n\n\n '''simple docstring'''\n\n if text is None and images is None:\n raise ValueError(\"You have to specify either text or images. Both cannot be none.\"\t\t\t\t)\n\n if text is not None:\n A__\t= self.tokenizer(snake_case_ ,\t\t\treturn_tensors=snake_case_ ,\t\t\t**snake_case_\t\t\t\t)\n\n if images is not None:\n A__\t= self.image_processor(snake_case_ ,\t\t\treturn_tensors=snake_case_ ,\t\t\t**snake_case_\t\t\t\t)\n\n if text is not None and images is not None:\n A__\t= image_features.pixel_values\n return encoding\n elif text is not None:\n return encoding\n else:\n return BatchEncoding(data=dict(**snake_case_\t\t\t\t) ,\t\t\ttensor_type=snake_case_\t\t\t\t)\n\n\n\n\n def \t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t( self\t\t\t\t\t\t:\tOptional[int] ,\t\t\t*snake_case_\t\t\t\t\t\t:\tUnion[str, Any] ,\t\t\t**snake_case_\t\t\t\t\t\t:\tList[Any]\t\t\t\t)\t\t\t\t->\t\t\t\t\t\t\tint:\n\n\n\n\n '''simple docstring'''\n\n return self.tokenizer.batch_decode(*snake_case_ ,\t\t\t**snake_case_\t\t\t\t)\n\n\n\n\n def \t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t( self\t\t\t\t\t\t:\tList[str] ,\t\t\t*snake_case_\t\t\t\t\t\t:\tList[str] ,\t\t\t**snake_case_\t\t\t\t\t\t:\tOptional[int]\t\t\t\t)\t\t\t\t->\t\t\t\t\t\t\tTuple:\n\n\n\n\n '''simple docstring'''\n\n return self.tokenizer.decode(*snake_case_ ,\t\t\t**snake_case_\t\t\t\t)\n\n\n\n\n @property\n def \t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t( self\t\t\t\t\t\t:\tList[Any]\t\t\t\t)\t\t\t\t->\t\t\t\t\t\t\tList[Any]:\n\n\n\n\n '''simple docstring'''\n\n return [\"input_ids\", \"attention_mask\", \"pixel_values\"]\n\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":230,"string":"230"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":776,"cells":{"code":{"kind":"string","value":"\rdef \t\t\t\ta__ (\t\t\t\tsnake_case = 1_000_000\t\t\t\t):\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r __SCREAMING_SNAKE_CASE\t: int =\t\t\t\t\t\t[i - 1 for i in range(limit + 1\t\t\t\t)]\r\r for i in range(2 , limit + 1\t\t\t\t):\r if phi[i] == i - 1:\r for j in range(2 * i , limit + 1 , A__\t\t\t\t):\r phi[j] -= phi[j] // i\r\r return sum(phi[2 : limit + 1]\t\t\t\t)\r\r\rif __name__ == \"__main__\":\r print(solution())\r"},"code_codestyle":{"kind":"number","value":303,"string":"303"},"style_context":{"kind":"string","value":"from abc import ABC, abstractmethod\r\nfrom argparse import ArgumentParser\r\n\r\n\r\n\r\n\r\n\r\nclass __snake_case (\t\t\t_lowerCamelCase\t):\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t@staticmethod\r\n\t\t\t\t\t@abstractmethod\r\n\t\t\t\t\tdef \t\t\t__a\t\t\t( __UpperCamelCase\t\t) -> Dict:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\traise NotImplementedError()\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t@abstractmethod\r\n\t\t\t\t\tdef \t\t\t__a\t\t\t( self\t\t) -> Optional[int]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\traise NotImplementedError()\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":143,"string":"143"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":777,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import (\r\n OptionalDependencyNotAvailable,\r\n _LazyModule,\r\n is_flax_available,\r\n is_sentencepiece_available,\r\n is_tf_available,\r\n is_tokenizers_available,\r\n is_torch_available,\r\n)\r\n\r\n\r\n__A\t\t\t\t:\t\t\t\t\t\t\tList[str]\t\t\t =\t\t\t\t{\r\n \"configuration_albert\": [\"ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP\", \"AlbertConfig\", \"AlbertOnnxConfig\"],\r\n}\r\n\r\ntry:\r\n\t\t\t\t\tif not is_sentencepiece_available():\r\n\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\tpass\r\nelse:\r\n\t\t\t\t\t__A\t\t\t\t:\t\t\t\t\t\t\tAny\t\t\t =\t\t\t\t[\"AlbertTokenizer\"]\r\n\r\ntry:\r\n\t\t\t\t\tif not is_tokenizers_available():\r\n\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\tpass\r\nelse:\r\n\t\t\t\t\t__A\t\t\t\t:\t\t\t\t\t\t\tUnion[str, Any]\t\t\t =\t\t\t\t[\"AlbertTokenizerFast\"]\r\n\r\ntry:\r\n\t\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\tpass\r\nelse:\r\n\t\t\t\t\t__A\t\t\t\t:\t\t\t\t\t\t\tDict\t\t\t =\t\t\t\t[\r\n\t\t\t\t\t \"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST\",\r\n\t\t\t\t\t \"AlbertForMaskedLM\",\r\n\t\t\t\t\t \"AlbertForMultipleChoice\",\r\n\t\t\t\t\t \"AlbertForPreTraining\",\r\n\t\t\t\t\t \"AlbertForQuestionAnswering\",\r\n\t\t\t\t\t \"AlbertForSequenceClassification\",\r\n\t\t\t\t\t \"AlbertForTokenClassification\",\r\n\t\t\t\t\t \"AlbertModel\",\r\n\t\t\t\t\t \"AlbertPreTrainedModel\",\r\n\t\t\t\t\t \"load_tf_weights_in_albert\",\r\n\t\t\t\t\t]\r\n\r\ntry:\r\n\t\t\t\t\tif not is_tf_available():\r\n\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\tpass\r\nelse:\r\n\t\t\t\t\t__A\t\t\t\t:\t\t\t\t\t\t\tint\t\t\t =\t\t\t\t[\r\n\t\t\t\t\t \"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST\",\r\n\t\t\t\t\t \"TFAlbertForMaskedLM\",\r\n\t\t\t\t\t \"TFAlbertForMultipleChoice\",\r\n\t\t\t\t\t \"TFAlbertForPreTraining\",\r\n\t\t\t\t\t \"TFAlbertForQuestionAnswering\",\r\n\t\t\t\t\t \"TFAlbertForSequenceClassification\",\r\n\t\t\t\t\t \"TFAlbertForTokenClassification\",\r\n\t\t\t\t\t \"TFAlbertMainLayer\",\r\n\t\t\t\t\t \"TFAlbertModel\",\r\n\t\t\t\t\t \"TFAlbertPreTrainedModel\",\r\n\t\t\t\t\t]\r\n\r\ntry:\r\n\t\t\t\t\tif not is_flax_available():\r\n\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\tpass\r\nelse:\r\n\t\t\t\t\t__A\t\t\t\t:\t\t\t\t\t\t\tTuple\t\t\t =\t\t\t\t[\r\n\t\t\t\t\t \"FlaxAlbertForMaskedLM\",\r\n\t\t\t\t\t \"FlaxAlbertForMultipleChoice\",\r\n\t\t\t\t\t \"FlaxAlbertForPreTraining\",\r\n\t\t\t\t\t \"FlaxAlbertForQuestionAnswering\",\r\n\t\t\t\t\t \"FlaxAlbertForSequenceClassification\",\r\n\t\t\t\t\t \"FlaxAlbertForTokenClassification\",\r\n\t\t\t\t\t \"FlaxAlbertModel\",\r\n\t\t\t\t\t \"FlaxAlbertPreTrainedModel\",\r\n\t\t\t\t\t]\r\n\r\nif TYPE_CHECKING:\r\n\t\t\t\t\tfrom .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig\r\n\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\tif not is_sentencepiece_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tfrom .tokenization_albert import AlbertTokenizer\r\n\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\tif not is_tokenizers_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tfrom .tokenization_albert_fast import AlbertTokenizerFast\r\n\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tfrom .modeling_albert import (\r\n\t\t\t\t\t\t\t\t\t\t ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t\t\t AlbertForMaskedLM,\r\n\t\t\t\t\t\t\t\t\t\t AlbertForMultipleChoice,\r\n\t\t\t\t\t\t\t\t\t\t AlbertForPreTraining,\r\n\t\t\t\t\t\t\t\t\t\t AlbertForQuestionAnswering,\r\n\t\t\t\t\t\t\t\t\t\t AlbertForSequenceClassification,\r\n\t\t\t\t\t\t\t\t\t\t AlbertForTokenClassification,\r\n\t\t\t\t\t\t\t\t\t\t AlbertModel,\r\n\t\t\t\t\t\t\t\t\t\t AlbertPreTrainedModel,\r\n\t\t\t\t\t\t\t\t\t\t load_tf_weights_in_albert,\r\n\t\t\t\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\tif not is_tf_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tfrom .modeling_tf_albert import (\r\n\t\t\t\t\t\t\t\t\t\t TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t\t\t TFAlbertForMaskedLM,\r\n\t\t\t\t\t\t\t\t\t\t TFAlbertForMultipleChoice,\r\n\t\t\t\t\t\t\t\t\t\t TFAlbertForPreTraining,\r\n\t\t\t\t\t\t\t\t\t\t TFAlbertForQuestionAnswering,\r\n\t\t\t\t\t\t\t\t\t\t TFAlbertForSequenceClassification,\r\n\t\t\t\t\t\t\t\t\t\t TFAlbertForTokenClassification,\r\n\t\t\t\t\t\t\t\t\t\t TFAlbertMainLayer,\r\n\t\t\t\t\t\t\t\t\t\t TFAlbertModel,\r\n\t\t\t\t\t\t\t\t\t\t TFAlbertPreTrainedModel,\r\n\t\t\t\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\tif not is_flax_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tfrom .modeling_flax_albert import (\r\n\t\t\t\t\t\t\t\t\t\t FlaxAlbertForMaskedLM,\r\n\t\t\t\t\t\t\t\t\t\t FlaxAlbertForMultipleChoice,\r\n\t\t\t\t\t\t\t\t\t\t FlaxAlbertForPreTraining,\r\n\t\t\t\t\t\t\t\t\t\t FlaxAlbertForQuestionAnswering,\r\n\t\t\t\t\t\t\t\t\t\t FlaxAlbertForSequenceClassification,\r\n\t\t\t\t\t\t\t\t\t\t FlaxAlbertForTokenClassification,\r\n\t\t\t\t\t\t\t\t\t\t FlaxAlbertModel,\r\n\t\t\t\t\t\t\t\t\t\t FlaxAlbertPreTrainedModel,\r\n\t\t\t\t\t\t\t\t\t\t)\r\nelse:\r\n\t\t\t\t\timport sys\r\n\r\n\t\t\t\t\t__A\t\t\t\t:\t\t\t\t\t\t\tstr\t\t\t =\t\t\t\t_LazyModule(__name__, globals()[\"__file__\"], _import_structure, module_spec=__spec__)\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":371,"string":"371"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available\r\n\r\n\r\n__A\t\t\t\t:\t\t\t\t\t\t\tList[Any]\t\t\t =\t\t\t\t{\r\n \"configuration_gpt_neo\": [\"GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP\", \"GPTNeoConfig\", \"GPTNeoOnnxConfig\"],\r\n}\r\n\r\ntry:\r\n\t\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\tpass\r\nelse:\r\n\t\t\t\t\t__A\t\t\t\t:\t\t\t\t\t\t\tstr\t\t\t =\t\t\t\t[\r\n\t\t\t\t\t \"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST\",\r\n\t\t\t\t\t \"GPTNeoForCausalLM\",\r\n\t\t\t\t\t \"GPTNeoForQuestionAnswering\",\r\n\t\t\t\t\t \"GPTNeoForSequenceClassification\",\r\n\t\t\t\t\t \"GPTNeoForTokenClassification\",\r\n\t\t\t\t\t \"GPTNeoModel\",\r\n\t\t\t\t\t \"GPTNeoPreTrainedModel\",\r\n\t\t\t\t\t \"load_tf_weights_in_gpt_neo\",\r\n\t\t\t\t\t]\r\n\r\ntry:\r\n\t\t\t\t\tif not is_flax_available():\r\n\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\tpass\r\nelse:\r\n\t\t\t\t\t__A\t\t\t\t:\t\t\t\t\t\t\tList[Any]\t\t\t =\t\t\t\t[\r\n\t\t\t\t\t \"FlaxGPTNeoForCausalLM\",\r\n\t\t\t\t\t \"FlaxGPTNeoModel\",\r\n\t\t\t\t\t \"FlaxGPTNeoPreTrainedModel\",\r\n\t\t\t\t\t]\r\n\r\n\r\nif TYPE_CHECKING:\r\n\t\t\t\t\tfrom .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig\r\n\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tfrom .modeling_gpt_neo import (\r\n\t\t\t\t\t\t\t\t\t\t GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t\t\t GPTNeoForCausalLM,\r\n\t\t\t\t\t\t\t\t\t\t GPTNeoForQuestionAnswering,\r\n\t\t\t\t\t\t\t\t\t\t GPTNeoForSequenceClassification,\r\n\t\t\t\t\t\t\t\t\t\t GPTNeoForTokenClassification,\r\n\t\t\t\t\t\t\t\t\t\t GPTNeoModel,\r\n\t\t\t\t\t\t\t\t\t\t GPTNeoPreTrainedModel,\r\n\t\t\t\t\t\t\t\t\t\t load_tf_weights_in_gpt_neo,\r\n\t\t\t\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\tif not is_flax_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tfrom .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel\r\n\r\n\r\nelse:\r\n\t\t\t\t\timport sys\r\n\r\n\t\t\t\t\t__A\t\t\t\t:\t\t\t\t\t\t\tDict\t\t\t =\t\t\t\t_LazyModule(__name__, globals()[\"__file__\"], _import_structure, module_spec=__spec__)\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":326,"string":"326"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":778,"cells":{"code":{"kind":"string","value":"\r\nimport unicodedata\r\nfrom dataclasses import dataclass\r\nfrom typing import Optional, Union\r\n\r\nimport numpy as np\r\n\r\nfrom transformers.data.data_collator import DataCollatorMixin\r\nfrom transformers.file_utils import PaddingStrategy\r\nfrom transformers.tokenization_utils_base import PreTrainedTokenizerBase\r\n\r\n\r\ndef \t\t\tUpperCamelCase\t\t\t\t\t( __lowerCamelCase : Dict\t\t\t\t,\t\t\t\t\t__lowerCamelCase : int\t\t\t\t,\t\t\t\t\t__lowerCamelCase : Dict\t\t\t\t,\t\t\t\t\t__lowerCamelCase : Any ):\r\n if isinstance(__lowerCamelCase\t\t\t\t,\t\t\t\t\t__lowerCamelCase ):\r\n snake_case : int\t\t\t\t\t\t\t= np.full((len(__lowerCamelCase ), sequence_length, 2)\t\t\t\t,\t\t\t\t\t__lowerCamelCase )\r\n else:\r\n snake_case : List[Any]\t\t\t\t\t\t\t= np.full((len(__lowerCamelCase ), sequence_length)\t\t\t\t,\t\t\t\t\t__lowerCamelCase )\r\n\r\n for i, tensor in enumerate(__lowerCamelCase ):\r\n if padding_side == \"right\":\r\n if isinstance(__lowerCamelCase\t\t\t\t,\t\t\t\t\t__lowerCamelCase ):\r\n snake_case : Dict\t\t\t\t\t\t\t= tensor[:sequence_length]\r\n else:\r\n snake_case : Tuple\t\t\t\t\t\t\t= tensor[:sequence_length]\r\n else:\r\n if isinstance(__lowerCamelCase\t\t\t\t,\t\t\t\t\t__lowerCamelCase ):\r\n snake_case : str\t\t\t\t\t\t\t= tensor[:sequence_length]\r\n else:\r\n snake_case : Optional[int]\t\t\t\t\t\t\t= tensor[:sequence_length]\r\n\r\n return out_tensor.tolist()\r\n\r\n\r\ndef \t\t\tUpperCamelCase\t\t\t\t\t( __lowerCamelCase : List[Any] ):\r\n snake_case : Any\t\t\t\t\t\t\t= ord(__lowerCamelCase )\r\n if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):\r\n return True\r\n snake_case : List[str]\t\t\t\t\t\t\t= unicodedata.category(__lowerCamelCase )\r\n if cat.startswith(\"P\" ):\r\n return True\r\n return False\r\n\r\n\r\n\r\n\r\n\r\n\r\n@dataclass\r\nclass UpperCAmelCase\t\t(\t\t\t\t\t\tA_ ):\r\n A__\t\t\t\t\t:\t\tPreTrainedTokenizerBase\r\n A__\t\t\t\t\t:\t\tUnion[bool, str, PaddingStrategy]\t\t\t\t\t\t = True\r\n A__\t\t\t\t\t:\t\tOptional[int]\t\t\t\t\t\t = None\r\n A__\t\t\t\t\t:\t\tOptional[int]\t\t\t\t\t\t = None\r\n A__\t\t\t\t\t:\t\tint\t\t\t\t\t\t = -1_00\r\n A__\t\t\t\t\t:\t\tstr\t\t\t\t\t\t = \"pt\"\r\n\r\n\r\n\r\n def \t\t\t\t_SCREAMING_SNAKE_CASE (self\t\t:\t\t\t\t\tTuple ,\t\t\t\t\t\tsnake_case__\t\t:\t\t\t\t\tTuple )\t\t->\tint:\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n import torch\r\n\r\n snake_case : Optional[Any]\t\t\t\t\t\t\t= \"label\" if \"label\" in features[0].keys() else \"labels\"\r\n snake_case : str\t\t\t\t\t\t\t= [feature[label_name] for feature in features] if label_name in features[0].keys() else None\r\n snake_case : List[Any]\t\t\t\t\t\t\t= self.tokenizer.pad(\r\n snake_case__ ,\t\t\t\t\t\tpadding=self.padding ,\t\t\t\t\t\tmax_length=self.max_length ,\t\t\t\t\t\tpad_to_multiple_of=self.pad_to_multiple_of ,\t\t\t\t\t\treturn_tensors=\"pt\" if labels is None else None ,\t\t\t\t\t\t)\r\n\r\n if labels is None:\r\n return batch\r\n\r\n snake_case : int\t\t\t\t\t\t\t= torch.tensor(batch[\"entity_ids\"] ).shape[1]\r\n snake_case : List[Any]\t\t\t\t\t\t\t= self.tokenizer.padding_side\r\n if padding_side == \"right\":\r\n snake_case : Tuple\t\t\t\t\t\t\t= [\r\n list(snake_case__ ) + [self.label_pad_token_id] * (sequence_length - len(snake_case__ )) for label in labels\r\n ]\r\n else:\r\n snake_case : str\t\t\t\t\t\t\t= [\r\n [self.label_pad_token_id] * (sequence_length - len(snake_case__ )) + list(snake_case__ ) for label in labels\r\n ]\r\n\r\n snake_case : int\t\t\t\t\t\t\t= [feature[\"ner_tags\"] for feature in features]\r\n snake_case : int\t\t\t\t\t\t\t= padding_tensor(snake_case__ ,\t\t\t\t\t\t-1 ,\t\t\t\t\t\tsnake_case__ ,\t\t\t\t\t\tsnake_case__ )\r\n snake_case : str\t\t\t\t\t\t\t= [feature[\"original_entity_spans\"] for feature in features]\r\n snake_case : Dict\t\t\t\t\t\t\t= padding_tensor(snake_case__ ,\t\t\t\t\t\t(-1, -1) ,\t\t\t\t\t\tsnake_case__ ,\t\t\t\t\t\tsnake_case__ )\r\n snake_case : Dict\t\t\t\t\t\t\t= {k: torch.tensor(snake_case__ ,\t\t\t\t\t\tdtype=torch.intaa ) for k, v in batch.items()}\r\n\r\n return batch\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":59,"string":"59"},"style_context":{"kind":"string","value":"\r\r\r\r\r\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\rfrom __future__ import annotations\r\r\rdef \t\t\tUpperCAmelCase__\t\t\t\t\t\t\t( SCREAMING_SNAKE_CASE :\t\t\t\t\t\tstr\t\t\t):\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r return [ord(SCREAMING_SNAKE_CASE\t\t\t) - 96 for elem in plain]\r\r\rdef \t\t\tUpperCAmelCase__\t\t\t\t\t\t\t( SCREAMING_SNAKE_CASE :\t\t\t\t\t\tlist[int]\t\t\t):\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r return \"\".join(chr(elem + 96\t\t\t) for elem in encoded\t\t\t)\r\r\rdef \t\t\tUpperCAmelCase__\t\t\t\t\t\t\t( ):\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r lowerCAmelCase \t\t\t\t\t\t= encode(input(\"\"\"-> \"\"\"\t\t\t).strip().lower()\t\t\t)\r print(\"\"\"Encoded: \"\"\"\t\t\t\t\t\t\t,\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t)\r print(\"\"\"Decoded:\"\"\"\t\t\t\t\t\t\t,\t\t\t\t\t\tdecode(SCREAMING_SNAKE_CASE\t\t\t)\t\t\t)\r\r\rif __name__ == \"__main__\":\r main()\r\r\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":46,"string":"46"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":779,"cells":{"code":{"kind":"string","value":"'''simple docstring'''\r\r\r\r\rimport io\rimport json\r\rimport fsspec\rimport pytest\r\rfrom datasets import Dataset, DatasetDict, Features, NamedSplit, Value\rfrom datasets.io.json import JsonDatasetReader, JsonDatasetWriter\r\rfrom ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases\r\r\r\r\r\rdef \t\t\t\t\t_UpperCAmelCase (\t\t\t\t\t\t\t_UpperCamelCase : Optional[int], _UpperCamelCase : Tuple ) ->\t\t\t\tDict:\r assert isinstance(_UpperCamelCase, _UpperCamelCase )\r assert dataset.num_rows == 4\r assert dataset.num_columns == 3\r assert dataset.column_names == [\"col_1\", \"col_2\", \"col_3\"]\r for feature, expected_dtype in expected_features.items():\r assert dataset.features[feature].dtype == expected_dtype\r\r\r\r\r\r@pytest.mark.parametrize('''keep_in_memory''', [False, True] )\rdef \t\t\t\t\t_UpperCAmelCase (\t\t\t\t\t\t\t_UpperCamelCase : Any, _UpperCamelCase : int, _UpperCamelCase : List[str] ) ->\t\t\t\tDict:\r A_ = tmp_path / '''cache'''\r A_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}\r with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():\r A_ = JsonDatasetReader(_UpperCamelCase, cache_dir=_UpperCamelCase, keep_in_memory=_UpperCamelCase ).read()\r _check_json_dataset(_UpperCamelCase, _UpperCamelCase )\r\r\r\r\r\r@pytest.mark.parametrize(\r '''features''', [\r None,\r {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},\r {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},\r {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},\r {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},\r ], )\rdef \t\t\t\t\t_UpperCAmelCase (\t\t\t\t\t\t\t_UpperCamelCase : Dict, _UpperCamelCase : Optional[int], _UpperCamelCase : int ) ->\t\t\t\tDict:\r A_ = tmp_path / '''cache'''\r A_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}\r A_ = features.copy() if features else default_expected_features\r A_ = (\r Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None\r )\r A_ = JsonDatasetReader(_UpperCamelCase, features=_UpperCamelCase, cache_dir=_UpperCamelCase ).read()\r _check_json_dataset(_UpperCamelCase, _UpperCamelCase )\r\r\r\r\r\r@pytest.mark.parametrize(\r '''features''', [\r None,\r {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},\r ], )\rdef \t\t\t\t\t_UpperCAmelCase (\t\t\t\t\t\t\t_UpperCamelCase : Dict, _UpperCamelCase : Dict, _UpperCamelCase : Optional[int] ) ->\t\t\t\tList[str]:\r A_ = tmp_path / '''cache'''\r A_ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}\r A_ = features.copy() if features else default_expected_features\r A_ = (\r Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None\r )\r A_ = JsonDatasetReader(_UpperCamelCase, features=_UpperCamelCase, cache_dir=_UpperCamelCase ).read()\r assert isinstance(_UpperCamelCase, _UpperCamelCase )\r assert dataset.num_rows == 2\r assert dataset.num_columns == 3\r assert dataset.column_names == [\"col_3\", \"col_1\", \"col_2\"]\r for feature, expected_dtype in expected_features.items():\r assert dataset.features[feature].dtype == expected_dtype\r\r\r\r\r\rdef \t\t\t\t\t_UpperCAmelCase (\t\t\t\t\t\t\t_UpperCamelCase : List[Any], _UpperCamelCase : Union[str, Any] ) ->\t\t\t\tUnion[str, Any]:\r # jsonl_312_path features are {\"col_3\": \"float64\", \"col_1\": \"string\", \"col_2\": \"int64\"}\r A_ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}\r A_ = features.copy()\r A_ = (\r Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None\r )\r A_ = tmp_path / '''cache'''\r A_ = JsonDatasetReader(_UpperCamelCase, features=_UpperCamelCase, cache_dir=_UpperCamelCase ).read()\r assert isinstance(_UpperCamelCase, _UpperCamelCase )\r assert dataset.num_rows == 2\r assert dataset.num_columns == 3\r assert dataset.column_names == [\"col_2\", \"col_3\", \"col_1\"]\r for feature, expected_dtype in expected_features.items():\r assert dataset.features[feature].dtype == expected_dtype\r\r\r\r\r\r@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )\rdef \t\t\t\t\t_UpperCAmelCase (\t\t\t\t\t\t\t_UpperCamelCase : Tuple, _UpperCamelCase : Union[str, Any], _UpperCamelCase : Optional[Any] ) ->\t\t\t\tstr:\r A_ = tmp_path / '''cache'''\r A_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}\r A_ = JsonDatasetReader(_UpperCamelCase, cache_dir=_UpperCamelCase, split=_UpperCamelCase ).read()\r _check_json_dataset(_UpperCamelCase, _UpperCamelCase )\r assert dataset.split == split if split else \"train\"\r\r\r\r\r\r@pytest.mark.parametrize('''path_type''', [str, list] )\rdef \t\t\t\t\t_UpperCAmelCase (\t\t\t\t\t\t\t_UpperCamelCase : int, _UpperCamelCase : Union[str, Any], _UpperCamelCase : str ) ->\t\t\t\tUnion[str, Any]:\r if issubclass(_UpperCamelCase, _UpperCamelCase ):\r A_ = jsonl_path\r elif issubclass(_UpperCamelCase, _UpperCamelCase ):\r A_ = [jsonl_path]\r A_ = tmp_path / '''cache'''\r A_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}\r A_ = JsonDatasetReader(_UpperCamelCase, cache_dir=_UpperCamelCase ).read()\r _check_json_dataset(_UpperCamelCase, _UpperCamelCase )\r\r\r\r\r\rdef \t\t\t\t\t_UpperCAmelCase (\t\t\t\t\t\t\t_UpperCamelCase : Tuple, _UpperCamelCase : Union[str, Any], _UpperCamelCase : str=(\"train\",) ) ->\t\t\t\tint:\r assert isinstance(_UpperCamelCase, _UpperCamelCase )\r for split in splits:\r A_ = dataset_dict[split]\r assert dataset.num_rows == 4\r assert dataset.num_columns == 3\r assert dataset.column_names == [\"col_1\", \"col_2\", \"col_3\"]\r for feature, expected_dtype in expected_features.items():\r assert dataset.features[feature].dtype == expected_dtype\r\r\r\r\r\r@pytest.mark.parametrize('''keep_in_memory''', [False, True] )\rdef \t\t\t\t\t_UpperCAmelCase (\t\t\t\t\t\t\t_UpperCamelCase : List[str], _UpperCamelCase : Union[str, Any], _UpperCamelCase : int ) ->\t\t\t\tUnion[str, Any]:\r A_ = tmp_path / '''cache'''\r A_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}\r with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():\r A_ = JsonDatasetReader({'''train''': jsonl_path}, cache_dir=_UpperCamelCase, keep_in_memory=_UpperCamelCase ).read()\r _check_json_datasetdict(_UpperCamelCase, _UpperCamelCase )\r\r\r\r\r\r@pytest.mark.parametrize(\r '''features''', [\r None,\r {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},\r {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},\r {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},\r {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},\r ], )\rdef \t\t\t\t\t_UpperCAmelCase (\t\t\t\t\t\t\t_UpperCamelCase : Tuple, _UpperCamelCase : int, _UpperCamelCase : Optional[Any] ) ->\t\t\t\tstr:\r A_ = tmp_path / '''cache'''\r A_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}\r A_ = features.copy() if features else default_expected_features\r A_ = (\r Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None\r )\r A_ = JsonDatasetReader({'''train''': jsonl_path}, features=_UpperCamelCase, cache_dir=_UpperCamelCase ).read()\r _check_json_datasetdict(_UpperCamelCase, _UpperCamelCase )\r\r\r\r\r\r@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )\rdef \t\t\t\t\t_UpperCAmelCase (\t\t\t\t\t\t\t_UpperCamelCase : Union[str, Any], _UpperCamelCase : List[str], _UpperCamelCase : Optional[int] ) ->\t\t\t\tTuple:\r if split:\r A_ = {split: jsonl_path}\r else:\r A_ = '''train'''\r A_ = {'''train''': jsonl_path, '''test''': jsonl_path}\r A_ = tmp_path / '''cache'''\r A_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}\r A_ = JsonDatasetReader(_UpperCamelCase, cache_dir=_UpperCamelCase ).read()\r _check_json_datasetdict(_UpperCamelCase, _UpperCamelCase, splits=list(path.keys() ) )\r assert all(dataset[split].split == split for split in path.keys() )\r\r\r\r\r\rdef \t\t\t\t\t_UpperCAmelCase (\t\t\t\t\t\t\t_UpperCamelCase : List[str] ) ->\t\t\t\tOptional[Any]:\r return json.load(_UpperCamelCase )\r\r\r\r\r\rdef \t\t\t\t\t_UpperCAmelCase (\t\t\t\t\t\t\t_UpperCamelCase : Dict ) ->\t\t\t\tTuple:\r return [json.loads(_UpperCamelCase ) for line in buffer]\r\r\r\r\rclass __UpperCAmelCase :\r\r\r\r\r\r '''simple docstring'''\r\r\r\r @pytest.mark.parametrize('''lines, load_json_function''' ,\t\t\t\t\t[(True, load_json_lines), (False, load_json)]\t\t\t)\r def __A\t(\t\t\t\tself ,\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t)\t\t->\t\t\t\t\tList[Any]:\r with io.BytesIO() as buffer:\r JsonDatasetWriter(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\tlines=_SCREAMING_SNAKE_CASE\t\t\t).write()\r buffer.seek(0\t\t\t)\r A_ = load_json_function(_SCREAMING_SNAKE_CASE\t\t\t)\r assert isinstance(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t)\r assert isinstance(exported_content[0] ,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t)\r assert len(_SCREAMING_SNAKE_CASE\t\t\t) == 10\r\r @pytest.mark.parametrize(\r '''orient, container, keys, len_at''' ,\t\t\t\t\t[\r ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),\r ('''split''', dict, {'''columns''', '''data'''}, '''data'''),\r ('''index''', dict, set('''0123456789'''\t\t\t), None),\r ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),\r ('''values''', list, None, None),\r ('''table''', dict, {'''schema''', '''data'''}, '''data'''),\r ] ,\t\t\t\t\t)\r def __A\t(\t\t\t\tself ,\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t)\t\t->\t\t\t\t\tstr:\r with io.BytesIO() as buffer:\r JsonDatasetWriter(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\tlines=_SCREAMING_SNAKE_CASE ,\t\t\t\t\torient=_SCREAMING_SNAKE_CASE\t\t\t).write()\r buffer.seek(0\t\t\t)\r A_ = load_json(_SCREAMING_SNAKE_CASE\t\t\t)\r assert isinstance(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t)\r if keys:\r if container is dict:\r assert exported_content.keys() == keys\r else:\r assert exported_content[0].keys() == keys\r else:\r assert not hasattr(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t'''keys'''\t\t\t) and not hasattr(exported_content[0] ,\t\t\t\t\t'''keys'''\t\t\t)\r if len_at:\r assert len(exported_content[len_at]\t\t\t) == 10\r else:\r assert len(_SCREAMING_SNAKE_CASE\t\t\t) == 10\r\r @pytest.mark.parametrize('''lines, load_json_function''' ,\t\t\t\t\t[(True, load_json_lines), (False, load_json)]\t\t\t)\r def __A\t(\t\t\t\tself ,\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t)\t\t->\t\t\t\t\tOptional[int]:\r with io.BytesIO() as buffer:\r JsonDatasetWriter(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\tlines=_SCREAMING_SNAKE_CASE ,\t\t\t\t\tnum_proc=2\t\t\t).write()\r buffer.seek(0\t\t\t)\r A_ = load_json_function(_SCREAMING_SNAKE_CASE\t\t\t)\r assert isinstance(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t)\r assert isinstance(exported_content[0] ,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t)\r assert len(_SCREAMING_SNAKE_CASE\t\t\t) == 10\r\r @pytest.mark.parametrize(\r '''orient, container, keys, len_at''' ,\t\t\t\t\t[\r ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),\r ('''split''', dict, {'''columns''', '''data'''}, '''data'''),\r ('''index''', dict, set('''0123456789'''\t\t\t), None),\r ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),\r ('''values''', list, None, None),\r ('''table''', dict, {'''schema''', '''data'''}, '''data'''),\r ] ,\t\t\t\t\t)\r def __A\t(\t\t\t\tself ,\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t)\t\t->\t\t\t\t\tstr:\r with io.BytesIO() as buffer:\r JsonDatasetWriter(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\tlines=_SCREAMING_SNAKE_CASE ,\t\t\t\t\torient=_SCREAMING_SNAKE_CASE ,\t\t\t\t\tnum_proc=2\t\t\t).write()\r buffer.seek(0\t\t\t)\r A_ = load_json(_SCREAMING_SNAKE_CASE\t\t\t)\r assert isinstance(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t)\r if keys:\r if container is dict:\r assert exported_content.keys() == keys\r else:\r assert exported_content[0].keys() == keys\r else:\r assert not hasattr(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t'''keys'''\t\t\t) and not hasattr(exported_content[0] ,\t\t\t\t\t'''keys'''\t\t\t)\r if len_at:\r assert len(exported_content[len_at]\t\t\t) == 10\r else:\r assert len(_SCREAMING_SNAKE_CASE\t\t\t) == 10\r\r def __A\t(\t\t\t\tself ,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t)\t\t->\t\t\t\t\tstr:\r with pytest.raises(_SCREAMING_SNAKE_CASE\t\t\t):\r with io.BytesIO() as buffer:\r JsonDatasetWriter(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\tnum_proc=0\t\t\t)\r\r @pytest.mark.parametrize('''compression, extension''' ,\t\t\t\t\t[('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')]\t\t\t)\r def __A\t(\t\t\t\tself ,\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t)\t\t->\t\t\t\t\tOptional[int]:\r A_ = tmp_path_factory.mktemp('''data'''\t\t\t) / F'''test.json.{extension}'''\r A_ = str(shared_datadir / F'''test_file.json.{extension}'''\t\t\t)\r JsonDatasetWriter(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\tcompression=_SCREAMING_SNAKE_CASE\t\t\t).write()\r\r with fsspec.open(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t'''rb''' ,\t\t\t\t\tcompression='''infer'''\t\t\t) as f:\r A_ = f.read()\r with fsspec.open(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t'''rb''' ,\t\t\t\t\tcompression='''infer'''\t\t\t) as f:\r A_ = f.read()\r assert exported_content == original_content\r\r\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":18,"string":"18"},"style_context":{"kind":"string","value":"'''simple docstring'''\r\r\r\r\rimport tempfile\rimport unittest\r\rimport numpy as np\rfrom huggingface_hub import HfFolder, delete_repo\rfrom requests.exceptions import HTTPError\r\rfrom transformers import BertConfig, is_flax_available\rfrom transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax\r\r\rif is_flax_available():\r import os\r\r from flax.core.frozen_dict import unfreeze\r from flax.traverse_util import flatten_dict\r\r from transformers import FlaxBertModel\r\r __snake_case :\t\t\t\tstr\t\t\t\t\t\t = '0.12' # assumed parallelism: 8\r\r\r\r\r@require_flax\r@is_staging_test\rclass __UpperCAmelCase ( unittest.TestCase\t\t\t\t\t):\r\r\r\r\r\r '''simple docstring'''\r\r\r\r @classmethod\r def __A\t(\t\t\t\tcls\t\t\t)\t\t->\t\t\t\t\tDict:\r A_ = TOKEN\r HfFolder.save_token(_SCREAMING_SNAKE_CASE\t\t\t)\r\r @classmethod\r def __A\t(\t\t\t\tcls\t\t\t)\t\t->\t\t\t\t\tOptional[int]:\r try:\r delete_repo(token=cls._token ,\t\t\t\t\trepo_id='''test-model-flax'''\t\t\t)\r except HTTPError:\r pass\r\r try:\r delete_repo(token=cls._token ,\t\t\t\t\trepo_id='''valid_org/test-model-flax-org'''\t\t\t)\r except HTTPError:\r pass\r\r def __A\t(\t\t\t\tself\t\t\t)\t\t->\t\t\t\t\tstr:\r A_ = BertConfig(\r vocab_size=99 ,\t\t\t\t\thidden_size=32 ,\t\t\t\t\tnum_hidden_layers=5 ,\t\t\t\t\tnum_attention_heads=4 ,\t\t\t\t\tintermediate_size=37\t\t\t)\r A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE\t\t\t)\r model.push_to_hub('''test-model-flax''' ,\t\t\t\t\tuse_auth_token=self._token\t\t\t)\r\r A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax'''\t\t\t)\r\r A_ = flatten_dict(unfreeze(model.params\t\t\t)\t\t\t)\r A_ = flatten_dict(unfreeze(new_model.params\t\t\t)\t\t\t)\r\r for key in base_params.keys():\r A_ = (base_params[key] - new_params[key]).sum().item()\r self.assertLessEqual(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t1E-3 ,\t\t\t\t\tmsg=F'''{key} not identical'''\t\t\t)\r\r # Reset repo\r delete_repo(token=self._token ,\t\t\t\t\trepo_id='''test-model-flax'''\t\t\t)\r\r # Push to hub via save_pretrained\r with tempfile.TemporaryDirectory() as tmp_dir:\r model.save_pretrained(_SCREAMING_SNAKE_CASE ,\t\t\t\t\trepo_id='''test-model-flax''' ,\t\t\t\t\tpush_to_hub=_SCREAMING_SNAKE_CASE ,\t\t\t\t\tuse_auth_token=self._token\t\t\t)\r\r A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax'''\t\t\t)\r\r A_ = flatten_dict(unfreeze(model.params\t\t\t)\t\t\t)\r A_ = flatten_dict(unfreeze(new_model.params\t\t\t)\t\t\t)\r\r for key in base_params.keys():\r A_ = (base_params[key] - new_params[key]).sum().item()\r self.assertLessEqual(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t1E-3 ,\t\t\t\t\tmsg=F'''{key} not identical'''\t\t\t)\r\r def __A\t(\t\t\t\tself\t\t\t)\t\t->\t\t\t\t\tList[str]:\r A_ = BertConfig(\r vocab_size=99 ,\t\t\t\t\thidden_size=32 ,\t\t\t\t\tnum_hidden_layers=5 ,\t\t\t\t\tnum_attention_heads=4 ,\t\t\t\t\tintermediate_size=37\t\t\t)\r A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE\t\t\t)\r model.push_to_hub('''valid_org/test-model-flax-org''' ,\t\t\t\t\tuse_auth_token=self._token\t\t\t)\r\r A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org'''\t\t\t)\r\r A_ = flatten_dict(unfreeze(model.params\t\t\t)\t\t\t)\r A_ = flatten_dict(unfreeze(new_model.params\t\t\t)\t\t\t)\r\r for key in base_params.keys():\r A_ = (base_params[key] - new_params[key]).sum().item()\r self.assertLessEqual(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t1E-3 ,\t\t\t\t\tmsg=F'''{key} not identical'''\t\t\t)\r\r # Reset repo\r delete_repo(token=self._token ,\t\t\t\t\trepo_id='''valid_org/test-model-flax-org'''\t\t\t)\r\r # Push to hub via save_pretrained\r with tempfile.TemporaryDirectory() as tmp_dir:\r model.save_pretrained(\r _SCREAMING_SNAKE_CASE ,\t\t\t\t\trepo_id='''valid_org/test-model-flax-org''' ,\t\t\t\t\tpush_to_hub=_SCREAMING_SNAKE_CASE ,\t\t\t\t\tuse_auth_token=self._token\t\t\t)\r\r A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org'''\t\t\t)\r\r A_ = flatten_dict(unfreeze(model.params\t\t\t)\t\t\t)\r A_ = flatten_dict(unfreeze(new_model.params\t\t\t)\t\t\t)\r\r for key in base_params.keys():\r A_ = (base_params[key] - new_params[key]).sum().item()\r self.assertLessEqual(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t1E-3 ,\t\t\t\t\tmsg=F'''{key} not identical'''\t\t\t)\r\r\r\r\r\rdef \t\t\t\t\t_UpperCAmelCase (\t\t\t\t\t\t\t_UpperCamelCase : Union[str, Any], _UpperCamelCase : Tuple ) ->\t\t\t\tDict:\r A_ = True\r A_ = flatten_dict(modela.params )\r A_ = flatten_dict(modela.params )\r for key in flat_params_a.keys():\r if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:\r A_ = False\r\r return models_are_equal\r\r\r\r\r@require_flax\rclass __UpperCAmelCase ( unittest.TestCase\t\t\t\t\t):\r\r\r\r\r\r '''simple docstring'''\r\r\r\r def __A\t(\t\t\t\tself\t\t\t)\t\t->\t\t\t\t\tList[str]:\r A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only'''\t\t\t)\r A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE\t\t\t)\r\r A_ = '''bert'''\r with tempfile.TemporaryDirectory() as tmp_dir:\r model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t)\t\t\t)\r\r with self.assertRaises(_SCREAMING_SNAKE_CASE\t\t\t):\r A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE\t\t\t)\r\r A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE ,\t\t\t\t\tsubfolder=_SCREAMING_SNAKE_CASE\t\t\t)\r\r self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t)\t\t\t)\r\r def __A\t(\t\t\t\tself\t\t\t)\t\t->\t\t\t\t\tList[Any]:\r A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only'''\t\t\t)\r A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE\t\t\t)\r\r A_ = '''bert'''\r with tempfile.TemporaryDirectory() as tmp_dir:\r model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t) ,\t\t\t\t\tmax_shard_size='''10KB'''\t\t\t)\r\r with self.assertRaises(_SCREAMING_SNAKE_CASE\t\t\t):\r A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE\t\t\t)\r\r A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE ,\t\t\t\t\tsubfolder=_SCREAMING_SNAKE_CASE\t\t\t)\r\r self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t)\t\t\t)\r\r def __A\t(\t\t\t\tself\t\t\t)\t\t->\t\t\t\t\tDict:\r A_ = '''bert'''\r A_ = '''hf-internal-testing/tiny-random-bert-subfolder'''\r\r with self.assertRaises(_SCREAMING_SNAKE_CASE\t\t\t):\r A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE\t\t\t)\r\r A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE ,\t\t\t\t\tsubfolder=_SCREAMING_SNAKE_CASE\t\t\t)\r\r self.assertIsNotNone(_SCREAMING_SNAKE_CASE\t\t\t)\r\r def __A\t(\t\t\t\tself\t\t\t)\t\t->\t\t\t\t\tOptional[Any]:\r A_ = '''bert'''\r A_ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''\r with self.assertRaises(_SCREAMING_SNAKE_CASE\t\t\t):\r A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE\t\t\t)\r\r A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE ,\t\t\t\t\tsubfolder=_SCREAMING_SNAKE_CASE\t\t\t)\r\r self.assertIsNotNone(_SCREAMING_SNAKE_CASE\t\t\t)\r\r\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":18,"string":"18"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":780,"cells":{"code":{"kind":"string","value":"\n\n\n'''simple docstring'''\n\n\nimport os\nfrom shutil import copyfile\nfrom typing import List, Optional, Tuple\n\nimport sentencepiece as spm\n\nfrom ...tokenization_utils import PreTrainedTokenizer\nfrom ...utils import logging\n\n\nUpperCAmelCase_\t\t: str\t\t\t\t\t\t\t =\t\tlogging.get_logger(__name__)\n\nUpperCAmelCase_\t\t: Optional[int]\t\t\t\t\t\t\t =\t\t{'vocab_file': 'sentencepiece.model'}\n\nUpperCAmelCase_\t\t: str\t\t\t\t\t\t\t =\t\t{\n 'vocab_file': {\n 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',\n },\n}\n\nUpperCAmelCase_\t\t: Union[str, Any]\t\t\t\t\t\t\t =\t\t{\n 'google/rembert': 256,\n}\n\n\n\n\n\nclass lowercase__ (\t\t\t\t\t\t_snake_case\t\t\t\t\t):\n\n\n\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\tA_\t\t\t\t\t\t\t: List[str] \t= VOCAB_FILES_NAMES\n\t\t\t\t\t\t\tA_\t\t\t\t\t\t\t: Optional[int] \t= PRETRAINED_VOCAB_FILES_MAP\n\t\t\t\t\t\t\tA_\t\t\t\t\t\t\t: Union[str, Any] \t= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n\n\n\n\n\n\n\t\t\t\t\t\t\tdef __init__( self ,\t\t\t\t\t__snake_case ,\t\t\t\t\t__snake_case=False ,\t\t\t\t\t__snake_case=True ,\t\t\t\t\t__snake_case=True ,\t\t\t\t\t__snake_case=\"[CLS]\" ,\t\t\t\t\t__snake_case=\"[SEP]\" ,\t\t\t\t\t__snake_case=\"[UNK]\" ,\t\t\t\t\t__snake_case=\"[SEP]\" ,\t\t\t\t\t__snake_case=\"[PAD]\" ,\t\t\t\t\t__snake_case=\"[CLS]\" ,\t\t\t\t\t__snake_case=\"[MASK]\" ,\t\t\t\t\t**__snake_case ,\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\tsuper().__init__(\n\t\t\t\t\t\t\t\t\t\t\t do_lower_case=__snake_case ,\t\t\t\t\tremove_space=__snake_case ,\t\t\t\t\tkeep_accents=__snake_case ,\t\t\t\t\tbos_token=__snake_case ,\t\t\t\t\teos_token=__snake_case ,\t\t\t\t\tunk_token=__snake_case ,\t\t\t\t\tsep_token=__snake_case ,\t\t\t\t\tpad_token=__snake_case ,\t\t\t\t\tcls_token=__snake_case ,\t\t\t\t\tmask_token=__snake_case ,\t\t\t\t\t**__snake_case ,\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE : List[str] \t\t= do_lower_case\n\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE : Optional[int] \t\t= remove_space\n\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE : List[str] \t\t= keep_accents\n\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE : Optional[int] \t\t= vocab_file\n\n\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE : List[str] \t\t= spm.SentencePieceProcessor()\n\t\t\t\t\t\t\t\t\t\t\tself.sp_model.Load(__snake_case )\n\n\n\n\n\n\n\t\t\t\t\t\t\t@property\n\t\t\t\t\t\t\tdef UpperCAmelCase_\t( self ):\n\t\t\t\t\t\t\t\t\t\t\treturn len(self.sp_model )\n\n\n\n\n\n\n\t\t\t\t\t\t\tdef UpperCAmelCase_\t( self ):\n\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE : int \t\t= {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}\n\t\t\t\t\t\t\t\t\t\t\tvocab.update(self.added_tokens_encoder )\n\t\t\t\t\t\t\t\t\t\t\treturn vocab\n\n\n\n\n\n\n\t\t\t\t\t\t\tdef __getstate__( self ):\n\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE : List[str] \t\t= self.__dict__.copy()\n\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE : Any \t\t= None\n\t\t\t\t\t\t\t\t\t\t\treturn state\n\n\n\n\n\n\n\t\t\t\t\t\t\tdef __setstate__( self ,\t\t\t\t\t__snake_case ):\n\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE : List[str] \t\t= d\n\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE : int \t\t= spm.SentencePieceProcessor()\n\t\t\t\t\t\t\t\t\t\t\tself.sp_model.Load(self.vocab_file )\n\n\n\n\n\n\n\t\t\t\t\t\t\tdef UpperCAmelCase_\t( self ,\t\t\t\t\t__snake_case ,\t\t\t\t\t__snake_case=False ):\n\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE : Union[str, Any] \t\t= self.sp_model.EncodeAsPieces(__snake_case )\n\t\t\t\t\t\t\t\t\t\t\treturn pieces\n\n\n\n\n\n\n\t\t\t\t\t\t\tdef UpperCAmelCase_\t( self ,\t\t\t\t\t__snake_case ):\n\t\t\t\t\t\t\t\t\t\t\treturn self.sp_model.PieceToId(__snake_case )\n\n\n\n\n\n\n\t\t\t\t\t\t\tdef UpperCAmelCase_\t( self ,\t\t\t\t\t__snake_case ):\n\t\t\t\t\t\t\t\t\t\t\treturn self.sp_model.IdToPiece(__snake_case )\n\n\n\n\n\n\n\t\t\t\t\t\t\tdef UpperCAmelCase_\t( self ,\t\t\t\t\t__snake_case ):\n\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE : Optional[Any] \t\t= self.sp_model.decode_pieces(__snake_case )\n\t\t\t\t\t\t\t\t\t\t\treturn out_string\n\n\n\n\n\n\n\t\t\t\t\t\t\tdef UpperCAmelCase_\t( self ,\t\t\t\t\t__snake_case ,\t\t\t\t\t__snake_case = None ):\n\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE : List[str] \t\t= [self.sep_token_id]\n\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE : Dict \t\t= [self.cls_token_id]\n\t\t\t\t\t\t\t\t\t\t\tif token_ids_a is None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn cls + token_ids_a + sep\n\t\t\t\t\t\t\t\t\t\t\treturn cls + token_ids_a + sep + token_ids_a + sep\n\n\n\n\n\n\n\t\t\t\t\t\t\tdef UpperCAmelCase_\t( self ,\t\t\t\t\t__snake_case ,\t\t\t\t\t__snake_case = None ,\t\t\t\t\t__snake_case = False ):\n\n\t\t\t\t\t\t\t\t\t\t\tif already_has_special_tokens:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif token_ids_a is not None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"You should not supply a second sequence if the provided sequence of \"\"\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"ids is already formatted with special tokens for the model.\"\"\" )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]\n\n\t\t\t\t\t\t\t\t\t\t\tif token_ids_a is not None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1]\n\t\t\t\t\t\t\t\t\t\t\treturn [1] + ([0] * len(__snake_case )) + [1]\n\n\n\n\n\n\n\t\t\t\t\t\t\tdef UpperCAmelCase_\t( self ,\t\t\t\t\t__snake_case ,\t\t\t\t\t__snake_case = None ):\n\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE : Optional[int] \t\t= [self.sep_token_id]\n\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE : Union[str, Any] \t\t= [self.cls_token_id]\n\n\t\t\t\t\t\t\t\t\t\t\tif token_ids_a is None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn len(cls + token_ids_a + sep ) * [0]\n\t\t\t\t\t\t\t\t\t\t\treturn len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]\n\n\n\n\n\n\n\t\t\t\t\t\t\tdef UpperCAmelCase_\t( self ,\t\t\t\t\t__snake_case ,\t\t\t\t\t__snake_case = None ):\n\t\t\t\t\t\t\t\t\t\t\tif not os.path.isdir(__snake_case ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.error(\"\"\"Vocabulary path ({}) should be a directory\"\"\".format(__snake_case ) )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE : Tuple \t\t= os.path.join(\n\t\t\t\t\t\t\t\t\t\t\t __snake_case ,\t\t\t\t\t(filename_prefix + \"\"\"-\"\"\" if filename_prefix else \"\"\"\"\"\") + VOCAB_FILES_NAMES[\"\"\"vocab_file\"\"\"] )\n\n\t\t\t\t\t\t\t\t\t\t\tif os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcopyfile(self.vocab_file ,\t\t\t\t\t__snake_case )\n\n\t\t\t\t\t\t\t\t\t\t\treturn (out_vocab_file,)\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":200,"string":"200"},"style_context":{"kind":"string","value":"\n\n\n'''simple docstring'''\n\n\ndef \t\t\tsnake_case_ ( SCREAMING_SNAKE_CASE__ ):\n\n\n\n\n\n\n\n\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\t\t\t\t_SCREAMING_SNAKE_CASE : int \t\t= 0\n\t\t\t\twhile len(SCREAMING_SNAKE_CASE__ ) > 1:\n\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE : Any \t\t= 0\n\t\t\t\t\t\t\t\t# Consider two files with minimum cost to be merged\n\t\t\t\t\t\t\t\tfor _ in range(2 ):\n\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE : Optional[int] \t\t= files.index(min(SCREAMING_SNAKE_CASE__ ) )\n\t\t\t\t\t\t\t\t\t\t\t\ttemp += files[min_index]\n\t\t\t\t\t\t\t\t\t\t\t\tfiles.pop(SCREAMING_SNAKE_CASE__ )\n\t\t\t\t\t\t\t\tfiles.append(SCREAMING_SNAKE_CASE__ )\n\t\t\t\t\t\t\t\toptimal_merge_cost += temp\n\t\t\t\treturn optimal_merge_cost\n\n\nif __name__ == \"__main__\":\n\t\t\t\timport doctest\n\n\t\t\t\tdoctest.testmod()\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":200,"string":"200"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":781,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\nimport torch\r\n\r\n\r\n\r\n\r\ndef \t\tlowerCAmelCase_ ( )\t\t\t\t-> int:\r\n if torch.cuda.is_available():\r\n UpperCamelCase__\t\t\t\t\t\t\t: Optional[int]\t =\t\t\t\ttorch.cuda.device_count()\r\n else:\r\n UpperCamelCase__\t\t\t\t\t\t\t: int\t =\t\t\t\t0\r\n print(f\"Successfully ran on {num_gpus} GPUs\"\t\t\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n"},"code_codestyle":{"kind":"number","value":361,"string":"361"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\nfrom manim import *\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass lowercase__\t( __lowerCamelCase ):\r\n\r\n '''simple docstring'''\r\n\r\n\r\n def \t\t\t\tUpperCamelCase__ ( self\t\t\t\t\t\t\t)\t\t\t->\t\t\t\tint:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n UpperCamelCase__\t\t\t\t\t\t\t: int\t =\t\t\t\tRectangle(height=0.5,\t\twidth=0.5\t\t\t\t\t\t\t)\r\n UpperCamelCase__\t\t\t\t\t\t\t: Optional[int]\t =\t\t\t\tRectangle(height=0.46,\t\twidth=0.46\t\t\t\t\t\t\t).set_stroke(width=0\t\t\t\t\t\t\t)\r\n\r\n UpperCamelCase__\t\t\t\t\t\t\t: Dict\t =\t\t\t\t[mem.copy() for i in range(6\t\t\t\t\t\t\t)]\r\n UpperCamelCase__\t\t\t\t\t\t\t: Any\t =\t\t\t\t[mem.copy() for i in range(6\t\t\t\t\t\t\t)]\r\n UpperCamelCase__\t\t\t\t\t\t\t: int\t =\t\t\t\tVGroup(*__magic_name__\t\t\t\t\t\t\t).arrange(__magic_name__,\t\tbuff=0\t\t\t\t\t\t\t)\r\n UpperCamelCase__\t\t\t\t\t\t\t: Tuple\t =\t\t\t\tVGroup(*__magic_name__\t\t\t\t\t\t\t).arrange(__magic_name__,\t\tbuff=0\t\t\t\t\t\t\t)\r\n UpperCamelCase__\t\t\t\t\t\t\t: int\t =\t\t\t\tVGroup(__magic_name__,\t\t__magic_name__\t\t\t\t\t\t\t).arrange(__magic_name__,\t\tbuff=0\t\t\t\t\t\t\t)\r\n UpperCamelCase__\t\t\t\t\t\t\t: Optional[int]\t =\t\t\t\tText('''CPU''',\t\tfont_size=24\t\t\t\t\t\t\t)\r\n UpperCamelCase__\t\t\t\t\t\t\t: Any\t =\t\t\t\tGroup(__magic_name__,\t\t__magic_name__\t\t\t\t\t\t\t).arrange(__magic_name__,\t\tbuff=0.5,\t\taligned_edge=__magic_name__\t\t\t\t\t\t\t)\r\n cpu.move_to([-2.5, -0.5, 0]\t\t\t\t\t\t\t)\r\n self.add(__magic_name__\t\t\t\t\t\t\t)\r\n\r\n UpperCamelCase__\t\t\t\t\t\t\t: Any\t =\t\t\t\t[mem.copy() for i in range(1\t\t\t\t\t\t\t)]\r\n UpperCamelCase__\t\t\t\t\t\t\t: Optional[int]\t =\t\t\t\tVGroup(*__magic_name__\t\t\t\t\t\t\t).arrange(__magic_name__,\t\tbuff=0\t\t\t\t\t\t\t)\r\n UpperCamelCase__\t\t\t\t\t\t\t: Union[str, Any]\t =\t\t\t\tText('''GPU''',\t\tfont_size=24\t\t\t\t\t\t\t)\r\n UpperCamelCase__\t\t\t\t\t\t\t: List[Any]\t =\t\t\t\tGroup(__magic_name__,\t\t__magic_name__\t\t\t\t\t\t\t).arrange(__magic_name__,\t\tbuff=0.5,\t\taligned_edge=__magic_name__\t\t\t\t\t\t\t)\r\n gpu.align_to(__magic_name__,\t\t__magic_name__\t\t\t\t\t\t\t)\r\n gpu.set_x(gpu.get_x() - 1\t\t\t\t\t\t\t)\r\n\r\n self.add(__magic_name__\t\t\t\t\t\t\t)\r\n\r\n UpperCamelCase__\t\t\t\t\t\t\t: str\t =\t\t\t\t[mem.copy() for i in range(6\t\t\t\t\t\t\t)]\r\n UpperCamelCase__\t\t\t\t\t\t\t: Optional[int]\t =\t\t\t\tVGroup(*__magic_name__\t\t\t\t\t\t\t).arrange(__magic_name__,\t\tbuff=0\t\t\t\t\t\t\t)\r\n\r\n UpperCamelCase__\t\t\t\t\t\t\t: Optional[int]\t =\t\t\t\tText('''Model''',\t\tfont_size=24\t\t\t\t\t\t\t)\r\n UpperCamelCase__\t\t\t\t\t\t\t: int\t =\t\t\t\tGroup(__magic_name__,\t\t__magic_name__\t\t\t\t\t\t\t).arrange(__magic_name__,\t\tbuff=0.5,\t\taligned_edge=__magic_name__\t\t\t\t\t\t\t)\r\n model.move_to([3, -1.0, 0]\t\t\t\t\t\t\t)\r\n\r\n self.play(\r\n Create(__magic_name__,\t\trun_time=1\t\t\t\t\t\t\t),\t\tCreate(__magic_name__,\t\trun_time=1\t\t\t\t\t\t\t),\t\tCreate(__magic_name__,\t\trun_time=1\t\t\t\t\t\t\t),\t\t)\r\n\r\n UpperCamelCase__\t\t\t\t\t\t\t: Optional[int]\t =\t\t\t\tMarkupText(\r\n f\"First, an empty model skeleton is loaded\\ninto memory without using much RAM.\",\t\tfont_size=24,\t\t)\r\n\r\n UpperCamelCase__\t\t\t\t\t\t\t: List[str]\t =\t\t\t\tSquare(side_length=2.2\t\t\t\t\t\t\t)\r\n key.move_to([-5, 2, 0]\t\t\t\t\t\t\t)\r\n\r\n UpperCamelCase__\t\t\t\t\t\t\t: Union[str, Any]\t =\t\t\t\tMarkupText(\r\n f\"Key:\\n\\n Empty Model\",\t\tfont_size=18,\t\t)\r\n\r\n key_text.move_to([-5, 2.4, 0]\t\t\t\t\t\t\t)\r\n\r\n step_a.move_to([2, 2, 0]\t\t\t\t\t\t\t)\r\n self.play(Write(__magic_name__,\t\trun_time=2.5\t\t\t\t\t\t\t),\t\tWrite(__magic_name__\t\t\t\t\t\t\t),\t\tWrite(__magic_name__\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\n\r\n self.add(__magic_name__\t\t\t\t\t\t\t)\r\n\r\n UpperCamelCase__\t\t\t\t\t\t\t: Dict\t =\t\t\t\t[]\r\n UpperCamelCase__\t\t\t\t\t\t\t: Any\t =\t\t\t\t[]\r\n UpperCamelCase__\t\t\t\t\t\t\t: int\t =\t\t\t\t[]\r\n for i, rect in enumerate(__magic_name__\t\t\t\t\t\t\t):\r\n UpperCamelCase__\t\t\t\t\t\t\t: Union[str, Any]\t =\t\t\t\tRectangle(height=0.46,\t\twidth=0.46\t\t\t\t\t\t\t).set_stroke(width=0.0\t\t\t\t\t\t\t).set_fill(__magic_name__,\t\topacity=0.7\t\t\t\t\t\t\t)\r\n cpu_target.move_to(__magic_name__\t\t\t\t\t\t\t)\r\n cpu_target.generate_target()\r\n UpperCamelCase__\t\t\t\t\t\t\t: Tuple\t =\t\t\t\t0.46 / 4\r\n UpperCamelCase__\t\t\t\t\t\t\t: Optional[Any]\t =\t\t\t\t0.46 / 3\r\n\r\n if i == 0:\r\n cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT\t\t\t\t\t\t\t),\t\tbuff=0.02,\t\tdirection=__magic_name__\t\t\t\t\t\t\t)\r\n cpu_target.target.set_x(cpu_target.target.get_x() + 0.1\t\t\t\t\t\t\t)\r\n elif i == 3:\r\n cpu_target.target.next_to(cpu_targs[0].target,\t\tdirection=__magic_name__,\t\tbuff=0.0\t\t\t\t\t\t\t)\r\n else:\r\n cpu_target.target.next_to(cpu_targs[i - 1].target,\t\tdirection=__magic_name__,\t\tbuff=0.0\t\t\t\t\t\t\t)\r\n cpu_targs.append(__magic_name__\t\t\t\t\t\t\t)\r\n\r\n first_animations.append(rect.animate(run_time=0.5\t\t\t\t\t\t\t).set_stroke(__magic_name__\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\n second_animations.append(MoveToTarget(__magic_name__,\t\trun_time=1.5\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\n\r\n self.play(*__magic_name__\t\t\t\t\t\t\t)\r\n self.play(*__magic_name__\t\t\t\t\t\t\t)\r\n\r\n self.wait()\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":247,"string":"247"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":782,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef a_ (\t\t\t\t\t_A = 600851475143 )\t->\tint:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t\t\t\t\t\t\t\t\t= int(_A )\r\n\t\t\t\texcept (TypeError, ValueError):\r\n\t\t\t\t\t\t\t\traise TypeError('Parameter n must be int or castable to int.' )\r\n\t\t\t\tif n <= 0:\r\n\t\t\t\t\t\t\t\traise ValueError('Parameter n must be greater than or equal to one.' )\r\n\t\t\t\tsnake_case__\t\t\t\t\t\t\t\t\t\t\t\t\t= 2\r\n\t\t\t\tsnake_case__\t\t\t\t\t\t\t\t\t\t\t\t\t= 0\r\n\t\t\t\tif n == 2:\r\n\t\t\t\t\t\t\t\treturn 2\r\n\t\t\t\twhile n > 2:\r\n\t\t\t\t\t\t\t\twhile n % i != 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\ti += 1\r\n\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t\t\t\t\t\t\t\t\t= i\r\n\t\t\t\t\t\t\t\twhile n % i == 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t\t\t\t\t\t\t\t\t= n // i\r\n\t\t\t\t\t\t\t\ti += 1\r\n\t\t\t\treturn int(_A )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t\t\t\tprint(f'''{solution() = }''')\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":307,"string":"307"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef a_ (\t\t\t\t\t_A\t\t\t\t\t, _A )\t->\tint:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\t\t\t\treturn 1 if input_a == input_a else 0\r\ndef a_ (\t\t\t\t\t)\t->\tNone:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\t\t\t\tassert xnor_gate(0\t\t\t\t\t, 0 ) == 1\r\n\t\t\t\tassert xnor_gate(0\t\t\t\t\t, 1 ) == 0\r\n\t\t\t\tassert xnor_gate(1\t\t\t\t\t, 0 ) == 0\r\n\t\t\t\tassert xnor_gate(1\t\t\t\t\t, 1 ) == 1\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t\t\t\tprint(xnor_gate(0, 0))\r\n\t\t\t\t\t\t\tprint(xnor_gate(0, 1))\r\n\t\t\t\t\t\t\tprint(xnor_gate(1, 0))\r\n\t\t\t\t\t\t\tprint(xnor_gate(1, 1))\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":307,"string":"307"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":783,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\nlowercase__\t\t: Dict\t\t\t\t\t\t = 1_0\r\n\r\ndef UpperCamelCase_ ( lowerCAmelCase__\t\t\t\t: list[int]\t\t\t\t\t\t)\t\t\t\t\t\t-> list[int]:\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n lowerCAmelCase_ : str\t\t\t\t\t\t\t\t=\t\t\t1\r\n lowerCAmelCase_ : Optional[Any]\t\t\t\t\t\t\t\t=\t\t\tmax(lowerCAmelCase__\t\t\t\t\t\t)\r\n while placement <= max_digit:\r\n # declare and initialize empty buckets\r\n lowerCAmelCase_ : list[list]\t\t\t\t\t\t\t\t=\t\t\t[[] for _ in range(lowerCAmelCase__\t\t\t\t\t\t)]\r\n # split list_of_ints between the buckets\r\n for i in list_of_ints:\r\n lowerCAmelCase_ : Union[str, Any]\t\t\t\t\t\t\t\t=\t\t\tint((i / placement) % RADIX\t\t\t\t\t\t)\r\n buckets[tmp].append(lowerCAmelCase__\t\t\t\t\t\t)\r\n # put each buckets' contents into list_of_ints\r\n lowerCAmelCase_ : List[str]\t\t\t\t\t\t\t\t=\t\t\t0\r\n for b in range(lowerCAmelCase__\t\t\t\t\t\t):\r\n for i in buckets[b]:\r\n lowerCAmelCase_ : Optional[int]\t\t\t\t\t\t\t\t=\t\t\ti\r\n a += 1\r\n # move to next\r\n placement *= RADIX\r\n return list_of_ints\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import doctest\r\n\r\n doctest.testmod()\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":364,"string":"364"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom diffusers.utils.testing_utils import require_onnxruntime\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_onnxruntime\r\nclass \t\t\t\tUpperCamelCase__ :\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":289,"string":"289"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":784,"cells":{"code":{"kind":"string","value":"\r\n\r\nimport sacrebleu as scb\r\nfrom packaging import version\r\nfrom sacrebleu import TER\r\n\r\nimport datasets\r\n\r\n\r\nlowercase_ \t\t= \"\\\\n@inproceedings{snover-etal-2006-study,\\n title = \\\"A Study of Translation Edit Rate with Targeted Human Annotation\\\",\\n author = \\\"Snover, Matthew and\\n Dorr, Bonnie and\\n Schwartz, Rich and\\n Micciulla, Linnea and\\n Makhoul, John\\\",\\n booktitle = \\\"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\\\",\\n month = aug # \\\" 8-12\\\",\\n year = \\\"2006\\\",\\n address = \\\"Cambridge, Massachusetts, USA\\\",\\n publisher = \\\"Association for Machine Translation in the Americas\\\",\\n url = \\\"https://aclanthology.org/2006.amta-papers.25\\\",\\n pages = \\\"223--231\\\",\\n}\\n@inproceedings{post-2018-call,\\n title = \\\"A Call for Clarity in Reporting {BLEU} Scores\\\",\\n author = \\\"Post, Matt\\\",\\n booktitle = \\\"Proceedings of the Third Conference on Machine Translation: Research Papers\\\",\\n month = oct,\\n year = \\\"2018\\\",\\n address = \\\"Belgium, Brussels\\\",\\n publisher = \\\"Association for Computational Linguistics\\\",\\n url = \\\"https://www.aclweb.org/anthology/W18-6319\\\",\\n pages = \\\"186--191\\\",\\n}\\n\"\r\n\r\nlowercase_ \t\t= \"\\\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\\nhere: https://github.com/jhclark/tercom.\\n\\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\\n\\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\\n\"\r\n\r\nlowercase_ \t\t= \"\\nProduces TER scores alongside the number of edits and reference length.\\n\\nArgs:\\n predictions (list of str): The system stream (a sequence of segments).\\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\\n Only applies if `normalized = True`. Defaults to `False`.\\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\\n\\nReturns:\\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\\n 'num_edits' (int): The cumulative number of edits\\n 'ref_length' (float): The cumulative average reference length\\n\\nExamples:\\n Example 1:\\n >>> predictions = [\\\"does this sentence match??\\\",\\n ... \\\"what about this sentence?\\\",\\n ... \\\"What did the TER metric user say to the developer?\\\"]\\n >>> references = [[\\\"does this sentence match\\\", \\\"does this sentence match!?!\\\"],\\n ... [\\\"wHaT aBoUt ThIs SeNtEnCe?\\\", \\\"wHaT aBoUt ThIs SeNtEnCe?\\\"],\\n ... [\\\"Your jokes are...\\\", \\\"...TERrible\\\"]]\\n >>> ter = datasets.load_metric(\\\"ter\\\")\\n >>> results = ter.compute(predictions=predictions,\\n ... references=references,\\n ... case_sensitive=True)\\n >>> print(results)\\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\\n\\n Example 2:\\n >>> predictions = [\\\"does this sentence match??\\\",\\n ... \\\"what about this sentence?\\\"]\\n >>> references = [[\\\"does this sentence match\\\", \\\"does this sentence match!?!\\\"],\\n ... [\\\"wHaT aBoUt ThIs SeNtEnCe?\\\", \\\"wHaT aBoUt ThIs SeNtEnCe?\\\"]]\\n >>> ter = datasets.load_metric(\\\"ter\\\")\\n >>> results = ter.compute(predictions=predictions,\\n ... references=references,\\n ... case_sensitive=True)\\n >>> print(results)\\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\\n\\n Example 3:\\n >>> predictions = [\\\"does this sentence match??\\\",\\n ... \\\"what about this sentence?\\\"]\\n >>> references = [[\\\"does this sentence match\\\", \\\"does this sentence match!?!\\\"],\\n ... [\\\"wHaT aBoUt ThIs SeNtEnCe?\\\", \\\"wHaT aBoUt ThIs SeNtEnCe?\\\"]]\\n >>> ter = datasets.load_metric(\\\"ter\\\")\\n >>> results = ter.compute(predictions=predictions,\\n ... references=references,\\n ... normalized=True,\\n ... case_sensitive=True)\\n >>> print(results)\\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\\n\\n Example 4:\\n >>> predictions = [\\\"does this sentence match??\\\",\\n ... \\\"what about this sentence?\\\"]\\n >>> references = [[\\\"does this sentence match\\\", \\\"does this sentence match!?!\\\"],\\n ... [\\\"wHaT aBoUt ThIs SeNtEnCe?\\\", \\\"wHaT aBoUt ThIs SeNtEnCe?\\\"]]\\n >>> ter = datasets.load_metric(\\\"ter\\\")\\n >>> results = ter.compute(predictions=predictions,\\n ... references=references,\\n ... ignore_punct=True,\\n ... case_sensitive=False)\\n >>> print(results)\\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\\n\\n Example 5:\\n >>> predictions = [\\\"does this sentence match??\\\",\\n ... \\\"what about this sentence?\\\",\\n ... \\\"What did the TER metric user say to the developer?\\\"]\\n >>> references = [[\\\"does this sentence match\\\", \\\"does this sentence match!?!\\\"],\\n ... [\\\"wHaT aBoUt ThIs SeNtEnCe?\\\", \\\"wHaT aBoUt ThIs SeNtEnCe?\\\"],\\n ... [\\\"Your jokes are...\\\", \\\"...TERrible\\\"]]\\n >>> ter = datasets.load_metric(\\\"ter\\\")\\n >>> results = ter.compute(predictions=predictions,\\n ... references=references,\\n ... ignore_punct=True,\\n ... case_sensitive=False)\\n >>> print(results)\\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\\n\"\r\n\r\n\r\n@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,\t\t\t\t\t\t\t_KWARGS_DESCRIPTION )\r\nclass A\t\t\t\t\t\t(\t\t\t\t\t\t\tdatasets.Metric ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\t\t\t\tsnake_case__ ( self : Optional[int]\t\t)-> List[str]:\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n if version.parse(scb.__version__\t\t) < version.parse('1.4.12'\t\t):\r\n raise ImportWarning(\r\n 'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\\'t match this condition.\\n'\r\n 'You can install it with `pip install \"sacrebleu>=1.4.12\"`.'\t\t)\r\n return datasets.MetricInfo(\r\n description=_DESCRIPTION,citation=_CITATION,homepage='http://www.cs.umd.edu/~snover/tercom/',inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(\r\n {\r\n 'predictions': datasets.Value('string',id='sequence'\t\t),\r\n 'references': datasets.Sequence(datasets.Value('string',id='sequence'\t\t),id='references'\t\t),\r\n }\t\t),codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'],reference_urls=[\r\n 'https://github.com/jhclark/tercom',\r\n ],)\r\n\r\n\r\n def \t\t\t\t\t\tsnake_case__ ( self : int,lowercase_ : Any,lowercase_ : Optional[Any],lowercase_ : bool = False,lowercase_ : bool = False,lowercase_ : bool = False,lowercase_ : bool = False,)-> Optional[Any]:\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n A__ = len(references[0]\t\t)\r\n if any(len(lowercase_\t\t) != references_per_prediction for refs in references\t\t):\r\n raise ValueError('Sacrebleu requires the same number of references for each prediction'\t\t)\r\n A__ = [[refs[i] for refs in references] for i in range(lowercase_\t\t)]\r\n\r\n A__ = TER(\r\n normalized=lowercase_,no_punct=lowercase_,asian_support=lowercase_,case_sensitive=lowercase_,)\r\n A__ = sb_ter.corpus_score(lowercase_,lowercase_\t\t)\r\n\r\n return {\"score\": output.score, \"num_edits\": output.num_edits, \"ref_length\": output.ref_length}\r\n\r\n"},"code_codestyle":{"kind":"number","value":7,"string":"7"},"style_context":{"kind":"string","value":"\r\n\r\nimport argparse\r\nimport json\r\n\r\nimport requests\r\nimport torch\r\nfrom huggingface_hub import hf_hub_download\r\nfrom PIL import Image\r\n\r\nfrom transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation\r\ndef \t\t\t\t\t\t_snake_case( SCREAMING_SNAKE_CASE__\t\t\t:\tAny )\t\t\t\t\t\t\t-> int:\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n A__ = 384\r\n A__ = 7\r\n if \"tiny\" in model_name:\r\n A__ = 96\r\n A__ = (2, 2, 6, 2)\r\n A__ = (3, 6, 12, 24)\r\n elif \"small\" in model_name:\r\n A__ = 96\r\n A__ = (2, 2, 18, 2)\r\n A__ = (3, 6, 12, 24)\r\n elif \"base\" in model_name:\r\n A__ = 128\r\n A__ = (2, 2, 18, 2)\r\n A__ = (4, 8, 16, 32)\r\n A__ = 12\r\n A__ = 512\r\n elif \"large\" in model_name:\r\n A__ = 192\r\n A__ = (2, 2, 18, 2)\r\n A__ = (6, 12, 24, 48)\r\n A__ = 12\r\n A__ = 768\r\n\r\n # set label information\r\n A__ = 150\r\n A__ = 'huggingface/label-files'\r\n A__ = 'ade20k-id2label.json'\r\n A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ ,\t\t\tSCREAMING_SNAKE_CASE__ ,\t\t\trepo_type='dataset' ) ,\t\t\t'r' ) )\r\n A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}\r\n A__ = {v: k for k, v in idalabel.items()}\r\n\r\n A__ = SwinConfig(\r\n embed_dim=SCREAMING_SNAKE_CASE__ ,\t\t\tdepths=SCREAMING_SNAKE_CASE__ ,\t\t\tnum_heads=SCREAMING_SNAKE_CASE__ ,\t\t\twindow_size=SCREAMING_SNAKE_CASE__ ,\t\t\tout_features=['stage1', 'stage2', 'stage3', 'stage4'] ,\t\t\t)\r\n A__ = UperNetConfig(\r\n backbone_config=SCREAMING_SNAKE_CASE__ ,\t\t\tauxiliary_in_channels=SCREAMING_SNAKE_CASE__ ,\t\t\tnum_labels=SCREAMING_SNAKE_CASE__ ,\t\t\tidalabel=SCREAMING_SNAKE_CASE__ ,\t\t\tlabelaid=SCREAMING_SNAKE_CASE__ ,\t\t\t)\r\n\r\n return config\r\ndef \t\t\t\t\t\t_snake_case( SCREAMING_SNAKE_CASE__\t\t\t:\tUnion[str, Any] )\t\t\t\t\t\t\t-> Dict:\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n A__ = []\r\n\r\n # fmt: off\r\n # stem\r\n rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )\r\n rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )\r\n rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )\r\n rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )\r\n # stages\r\n for i in range(len(config.backbone_config.depths ) ):\r\n for j in range(config.backbone_config.depths[i] ):\r\n rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )\r\n rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )\r\n rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )\r\n rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )\r\n rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )\r\n rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )\r\n rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )\r\n rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )\r\n rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )\r\n rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )\r\n rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') )\r\n rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') )\r\n\r\n if i < 3:\r\n rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') )\r\n rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') )\r\n rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') )\r\n rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )\r\n rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )\r\n\r\n # decode head\r\n rename_keys.extend(\r\n [\r\n ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),\r\n ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),\r\n ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),\r\n ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),\r\n ] )\r\n # fmt: on\r\n\r\n return rename_keys\r\ndef \t\t\t\t\t\t_snake_case( SCREAMING_SNAKE_CASE__\t\t\t:\tList[str] ,\t\t\tSCREAMING_SNAKE_CASE__\t\t\t:\tAny ,\t\t\tSCREAMING_SNAKE_CASE__\t\t\t:\tList[str] )\t\t\t\t\t\t\t-> Optional[int]:\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n A__ = dct.pop(SCREAMING_SNAKE_CASE__ )\r\n A__ = val\r\ndef \t\t\t\t\t\t_snake_case( SCREAMING_SNAKE_CASE__\t\t\t:\tUnion[str, Any] ,\t\t\tSCREAMING_SNAKE_CASE__\t\t\t:\tList[str] )\t\t\t\t\t\t\t-> Any:\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n A__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]\r\n for i in range(len(backbone_config.depths ) ):\r\n A__ = num_features[i]\r\n for j in range(backbone_config.depths[i] ):\r\n # fmt: off\r\n # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)\r\n A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' )\r\n A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' )\r\n # next, add query, keys and values (in that order) to the state dict\r\n A__ = in_proj_weight[:dim, :]\r\n A__ = in_proj_bias[: dim]\r\n A__ = in_proj_weight[\r\n dim : dim * 2, :\r\n ]\r\n A__ = in_proj_bias[\r\n dim : dim * 2\r\n ]\r\n A__ = in_proj_weight[\r\n -dim :, :\r\n ]\r\n A__ = in_proj_bias[-dim :]\r\n # fmt: on\r\ndef \t\t\t\t\t\t_snake_case( SCREAMING_SNAKE_CASE__\t\t\t:\tUnion[str, Any] )\t\t\t\t\t\t\t-> Optional[Any]:\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n A__ ,\t\t\t\t\t\tA__ = x.shape\r\n A__ = x.reshape(SCREAMING_SNAKE_CASE__ ,\t\t\t4 ,\t\t\tin_channel // 4 )\r\n A__ = x[:, [0, 2, 1, 3], :].transpose(1 ,\t\t\t2 ).reshape(SCREAMING_SNAKE_CASE__ ,\t\t\tSCREAMING_SNAKE_CASE__ )\r\n return x\r\ndef \t\t\t\t\t\t_snake_case( SCREAMING_SNAKE_CASE__\t\t\t:\tTuple )\t\t\t\t\t\t\t-> List[str]:\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n A__ ,\t\t\t\t\t\tA__ = x.shape\r\n A__ = x.reshape(SCREAMING_SNAKE_CASE__ ,\t\t\tin_channel // 4 ,\t\t\t4 )\r\n A__ = x[:, :, [0, 2, 1, 3]].transpose(1 ,\t\t\t2 ).reshape(SCREAMING_SNAKE_CASE__ ,\t\t\tSCREAMING_SNAKE_CASE__ )\r\n\r\n return x\r\ndef \t\t\t\t\t\t_snake_case( SCREAMING_SNAKE_CASE__\t\t\t:\tAny )\t\t\t\t\t\t\t-> Optional[int]:\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n A__ = x.shape[0]\r\n A__ = x.reshape(4 ,\t\t\tin_channel // 4 )\r\n A__ = x[[0, 2, 1, 3], :].transpose(0 ,\t\t\t1 ).reshape(SCREAMING_SNAKE_CASE__ )\r\n return x\r\ndef \t\t\t\t\t\t_snake_case( SCREAMING_SNAKE_CASE__\t\t\t:\tAny )\t\t\t\t\t\t\t-> List[Any]:\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n A__ = x.shape[0]\r\n A__ = x.reshape(in_channel // 4 ,\t\t\t4 )\r\n A__ = x[:, [0, 2, 1, 3]].transpose(0 ,\t\t\t1 ).reshape(SCREAMING_SNAKE_CASE__ )\r\n return x\r\ndef \t\t\t\t\t\t_snake_case( SCREAMING_SNAKE_CASE__\t\t\t:\tList[str] ,\t\t\tSCREAMING_SNAKE_CASE__\t\t\t:\tOptional[Any] ,\t\t\tSCREAMING_SNAKE_CASE__\t\t\t:\tOptional[int] )\t\t\t\t\t\t\t-> Union[str, Any]:\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n A__ = {\r\n 'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',\r\n 'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',\r\n 'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',\r\n 'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',\r\n }\r\n A__ = model_name_to_url[model_name]\r\n A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ ,\t\t\tmap_location='cpu' ,\t\t\tfile_name=SCREAMING_SNAKE_CASE__ )[\r\n 'state_dict'\r\n ]\r\n\r\n for name, param in state_dict.items():\r\n print(SCREAMING_SNAKE_CASE__ ,\t\t\tparam.shape )\r\n\r\n A__ = get_upernet_config(SCREAMING_SNAKE_CASE__ )\r\n A__ = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )\r\n model.eval()\r\n\r\n # replace \"bn\" => \"batch_norm\"\r\n for key in state_dict.copy().keys():\r\n A__ = state_dict.pop(SCREAMING_SNAKE_CASE__ )\r\n if \"bn\" in key:\r\n A__ = key.replace('bn' ,\t\t\t'batch_norm' )\r\n A__ = val\r\n\r\n # rename keys\r\n A__ = create_rename_keys(SCREAMING_SNAKE_CASE__ )\r\n for src, dest in rename_keys:\r\n rename_key(SCREAMING_SNAKE_CASE__ ,\t\t\tSCREAMING_SNAKE_CASE__ ,\t\t\tSCREAMING_SNAKE_CASE__ )\r\n read_in_q_k_v(SCREAMING_SNAKE_CASE__ ,\t\t\tconfig.backbone_config )\r\n\r\n # fix downsample parameters\r\n for key, value in state_dict.items():\r\n if \"downsample\" in key:\r\n if \"reduction\" in key:\r\n A__ = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE__ )\r\n if \"norm\" in key:\r\n A__ = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE__ )\r\n\r\n model.load_state_dict(SCREAMING_SNAKE_CASE__ )\r\n\r\n # verify on image\r\n A__ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'\r\n A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ ,\t\t\tstream=SCREAMING_SNAKE_CASE__ ).raw ).convert('RGB' )\r\n\r\n A__ = SegformerImageProcessor()\r\n A__ = processor(SCREAMING_SNAKE_CASE__ ,\t\t\treturn_tensors='pt' ).pixel_values\r\n\r\n with torch.no_grad():\r\n A__ = model(SCREAMING_SNAKE_CASE__ )\r\n A__ = outputs.logits\r\n\r\n print(logits.shape )\r\n print('First values of logits:' ,\t\t\tlogits[0, 0, :3, :3] )\r\n # assert values\r\n if model_name == \"upernet-swin-tiny\":\r\n A__ = torch.tensor(\r\n [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )\r\n elif model_name == \"upernet-swin-small\":\r\n A__ = torch.tensor(\r\n [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )\r\n elif model_name == \"upernet-swin-base\":\r\n A__ = torch.tensor(\r\n [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )\r\n elif model_name == \"upernet-swin-large\":\r\n A__ = torch.tensor(\r\n [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )\r\n print('Logits:' ,\t\t\toutputs.logits[0, 0, :3, :3] )\r\n assert torch.allclose(outputs.logits[0, 0, :3, :3] ,\t\t\tSCREAMING_SNAKE_CASE__ ,\t\t\tatol=1E-4 )\r\n print('Looks ok!' )\r\n\r\n if pytorch_dump_folder_path is not None:\r\n print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )\r\n model.save_pretrained(SCREAMING_SNAKE_CASE__ )\r\n print(f'Saving processor to {pytorch_dump_folder_path}' )\r\n processor.save_pretrained(SCREAMING_SNAKE_CASE__ )\r\n\r\n if push_to_hub:\r\n print(f'Pushing model and processor for {model_name} to hub' )\r\n model.push_to_hub(f'openmmlab/{model_name}' )\r\n processor.push_to_hub(f'openmmlab/{model_name}' )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n lowercase_ \t\t= argparse.ArgumentParser()\r\n # Required parameters\r\n parser.add_argument(\r\n \"--model_name\",\r\n default=\"upernet-swin-tiny\",\r\n type=str,\r\n choices=[f\"\"\"upernet-swin-{size}\"\"\" for size in [\"tiny\", \"small\", \"base\", \"large\"]],\r\n help=\"Name of the Swin + UperNet model you'd like to convert.\",\r\n )\r\n parser.add_argument(\r\n \"--pytorch_dump_folder_path\", default=None, type=str, help=\"Path to the output PyTorch model directory.\"\r\n )\r\n parser.add_argument(\r\n \"--push_to_hub\", action=\"store_true\", help=\"Whether or not to push the converted model to the 🤗 hub.\"\r\n )\r\n\r\n lowercase_ \t\t= parser.parse_args()\r\n convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":7,"string":"7"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":785,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t_SCREAMING_SNAKE_CASE\t\t( _lowercase\t\t\t\t\t\t: str )\t\t\t->list:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tif n_term == \"\":\r\n\t\treturn []\r\n\ta\t\t\t\t:\t\tlist = []\r\n\tfor temp in range(int(_lowercase ) ):\r\n\t\tseries.append(F\"\"\"1/{temp + 1}\"\"\" if series else \"1\" )\r\n\treturn series\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\ta\t\t\t\t\t\t:\t\t\tTuple\t\t\t\t\t = input('''Enter the last number (nth term) of the Harmonic Series''')\r\n\t\tprint('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')\r\n\t\tprint(harmonic_series(nth_term))\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":79,"string":"79"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\nimport sacrebleu as scb\r\nfrom packaging import version\r\nfrom sacrebleu import TER\r\n\r\nimport datasets\r\n\r\n\r\na\t\t\t\t\t\t:\t\t\tTuple\t\t\t\t\t = '''\\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n'''\r\n\r\na\t\t\t\t\t\t:\t\t\tList[str]\t\t\t\t\t = '''\\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'''\r\n\r\na\t\t\t\t\t\t:\t\t\tList[Any]\t\t\t\t\t = '''\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \\'score\\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \\'num_edits\\' (int): The cumulative number of edits\n \\'ref_length\\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\\'score\\': 150.0, \\'num_edits\\': 15, \\'ref_length\\': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\\'score\\': 62.5, \\'num_edits\\': 5, \\'ref_length\\': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\\'score\\': 57.14285714285714, \\'num_edits\\': 6, \\'ref_length\\': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\\'score\\': 0.0, \\'num_edits\\': 0, \\'ref_length\\': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\\'score\\': 100.0, \\'num_edits\\': 10, \\'ref_length\\': 10.0}\n'''\r\n\r\n\r\n@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION\t\t,\t_KWARGS_DESCRIPTION\t\t\t\t\t\t)\r\nclass \t__UpperCamelCase (\t\t\t\t\t\tdatasets.Metric\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tdef __a ( self\t\t\t\t) ->\t\t\tDict:\r\n\t\t\t\t\t\t\t\tif version.parse(scb.__version__\t\t\t\t) < version.parse(\"1.4.12\"\t\t\t\t):\r\n\t\t\t\t\t\t\t\t\traise ImportWarning(\r\n\t\t\t\t\t\t\t\t\t \"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\\n\"\r\n\t\t\t\t\t\t\t\t\t \"You can install it with `pip install \\\"sacrebleu>=1.4.12\\\"`.\"\t\t\t\t)\r\n\t\t\t\t\t\t\t\treturn datasets.MetricInfo(\r\n\t\t\t\t\t\t\t\t description=_DESCRIPTION\t\t\t\t, citation=_CITATION\t\t\t\t, homepage=\"http://www.cs.umd.edu/~snover/tercom/\"\t\t\t\t, inputs_description=_KWARGS_DESCRIPTION\t\t\t\t, features=datasets.Features(\r\n\t\t\t\t\t\t\t\t {\r\n\t\t\t\t\t\t\t\t \"predictions\": datasets.Value(\"string\"\t\t\t\t, id=\"sequence\"\t\t\t\t),\r\n\t\t\t\t\t\t\t\t \"references\": datasets.Sequence(datasets.Value(\"string\"\t\t\t\t, id=\"sequence\"\t\t\t\t)\t\t\t\t, id=\"references\"\t\t\t\t),\r\n\t\t\t\t\t\t\t\t }\t\t\t\t)\t\t\t\t, codebase_urls=[\"https://github.com/mjpost/sacreBLEU#ter\"]\t\t\t\t, reference_urls=[\r\n\t\t\t\t\t\t\t\t \"https://github.com/jhclark/tercom\",\r\n\t\t\t\t\t\t\t\t ]\t\t\t\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tdef __a ( self\t\t\t\t, lowerCAmelCase__\t\t\t\t, lowerCAmelCase__\t\t\t\t, lowerCAmelCase__ = False\t\t\t\t, lowerCAmelCase__ = False\t\t\t\t, lowerCAmelCase__ = False\t\t\t\t, lowerCAmelCase__ = False\t\t\t\t, ) ->\t\t\tAny:\r\n\t\t\t\t\t\t\t\ta\t\t\t\t:\t\tOptional[int] = len(references[0]\t\t\t\t)\r\n\t\t\t\t\t\t\t\tif any(len(lowerCAmelCase__\t\t\t\t) != references_per_prediction for refs in references\t\t\t\t):\r\n\t\t\t\t\t\t\t\t\traise ValueError(\"Sacrebleu requires the same number of references for each prediction\"\t\t\t\t)\r\n\t\t\t\t\t\t\t\ta\t\t\t\t:\t\tList[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__\t\t\t\t)]\r\n\r\n\t\t\t\t\t\t\t\ta\t\t\t\t:\t\tUnion[str, Any] = TER(\r\n\t\t\t\t\t\t\t\t normalized=lowerCAmelCase__\t\t\t\t, no_punct=lowerCAmelCase__\t\t\t\t, asian_support=lowerCAmelCase__\t\t\t\t, case_sensitive=lowerCAmelCase__\t\t\t\t, )\r\n\t\t\t\t\t\t\t\ta\t\t\t\t:\t\tOptional[Any] = sb_ter.corpus_score(lowerCAmelCase__\t\t\t\t, lowerCAmelCase__\t\t\t\t)\r\n\r\n\t\t\t\t\t\t\t\treturn {\"score\": output.score, \"num_edits\": output.num_edits, \"ref_length\": output.ref_length}\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":79,"string":"79"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":786,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\nimport argparse\r\nimport json\r\nfrom pathlib import Path\r\n\r\nimport requests\r\nimport torch\r\nfrom huggingface_hub import hf_hub_download\r\nfrom PIL import Image\r\n\r\nfrom transformers import (\r\n BertTokenizer,\r\n ViltConfig,\r\n ViltForImageAndTextRetrieval,\r\n ViltForImagesAndTextClassification,\r\n ViltForMaskedLM,\r\n ViltForQuestionAnswering,\r\n ViltImageProcessor,\r\n ViltProcessor,\r\n)\r\nfrom transformers.utils import logging\r\n\r\n\r\nlogging.set_verbosity_info()\r\n_lowercase\t\t: int\t\t\t\t\t=logging.get_logger(__name__)\r\n\r\ndef \tlowerCAmelCase_ ( _lowercase\t\t\t\t: List[Any]\t\t\t\t\t\t, _lowercase\t\t\t\t: Optional[int]=False\t\t\t\t\t\t, _lowercase\t\t\t\t: List[str]=False\t\t\t\t\t\t, _lowercase\t\t\t\t: Any=False)\t\t\t\t\t\t\t-> Optional[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\"\"\"simple docstring\"\"\"\r\n\t\t\ta__ :\t\t\t\t\t\tTuple\t\t\t\t\t\t = []\r\n\t\t\tfor i in range(config.num_hidden_layers):\r\n\t\t\t\t\t\t# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms\r\n\t\t\t\t\t\trename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight'''))\r\n\t\t\t\t\t\trename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias'''))\r\n\t\t\t\t\t\trename_keys.append(\r\n\t\t\t\t\t\t (F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight'''))\r\n\t\t\t\t\t\trename_keys.append(\r\n\t\t\t\t\t\t (F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias'''))\r\n\t\t\t\t\t\trename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight'''))\r\n\t\t\t\t\t\trename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias'''))\r\n\t\t\t\t\t\trename_keys.append(\r\n\t\t\t\t\t\t (F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight'''))\r\n\t\t\t\t\t\trename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias'''))\r\n\t\t\t\t\t\trename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight'''))\r\n\t\t\t\t\t\trename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias'''))\r\n\r\n\t\t\t# embeddings\r\n\t\t\trename_keys.extend(\r\n\t\t\t [\r\n\t\t\t # text embeddings\r\n\t\t\t (\"\"\"text_embeddings.word_embeddings.weight\"\"\", \"\"\"vilt.embeddings.text_embeddings.word_embeddings.weight\"\"\"),\r\n\t\t\t (\r\n\t\t\t \"\"\"text_embeddings.position_embeddings.weight\"\"\",\r\n\t\t\t \"\"\"vilt.embeddings.text_embeddings.position_embeddings.weight\"\"\",\r\n\t\t\t ),\r\n\t\t\t (\"\"\"text_embeddings.position_ids\"\"\", \"\"\"vilt.embeddings.text_embeddings.position_ids\"\"\"),\r\n\t\t\t (\r\n\t\t\t \"\"\"text_embeddings.token_type_embeddings.weight\"\"\",\r\n\t\t\t \"\"\"vilt.embeddings.text_embeddings.token_type_embeddings.weight\"\"\",\r\n\t\t\t ),\r\n\t\t\t (\"\"\"text_embeddings.LayerNorm.weight\"\"\", \"\"\"vilt.embeddings.text_embeddings.LayerNorm.weight\"\"\"),\r\n\t\t\t (\"\"\"text_embeddings.LayerNorm.bias\"\"\", \"\"\"vilt.embeddings.text_embeddings.LayerNorm.bias\"\"\"),\r\n\t\t\t # patch embeddings\r\n\t\t\t (\"\"\"transformer.cls_token\"\"\", \"\"\"vilt.embeddings.cls_token\"\"\"),\r\n\t\t\t (\"\"\"transformer.patch_embed.proj.weight\"\"\", \"\"\"vilt.embeddings.patch_embeddings.projection.weight\"\"\"),\r\n\t\t\t (\"\"\"transformer.patch_embed.proj.bias\"\"\", \"\"\"vilt.embeddings.patch_embeddings.projection.bias\"\"\"),\r\n\t\t\t (\"\"\"transformer.pos_embed\"\"\", \"\"\"vilt.embeddings.position_embeddings\"\"\"),\r\n\t\t\t # token type embeddings\r\n\t\t\t (\"\"\"token_type_embeddings.weight\"\"\", \"\"\"vilt.embeddings.token_type_embeddings.weight\"\"\"),\r\n\t\t\t ])\r\n\r\n\t\t\t# final layernorm + pooler\r\n\t\t\trename_keys.extend(\r\n\t\t\t [\r\n\t\t\t (\"\"\"transformer.norm.weight\"\"\", \"\"\"vilt.layernorm.weight\"\"\"),\r\n\t\t\t (\"\"\"transformer.norm.bias\"\"\", \"\"\"vilt.layernorm.bias\"\"\"),\r\n\t\t\t (\"\"\"pooler.dense.weight\"\"\", \"\"\"vilt.pooler.dense.weight\"\"\"),\r\n\t\t\t (\"\"\"pooler.dense.bias\"\"\", \"\"\"vilt.pooler.dense.bias\"\"\"),\r\n\t\t\t ])\r\n\r\n\t\t\t# classifier head(s)\r\n\t\t\tif vqa_model:\r\n\t\t\t\t\t\t# classification head\r\n\t\t\t\t\t\trename_keys.extend(\r\n\t\t\t\t\t\t [\r\n\t\t\t\t\t\t (\"\"\"vqa_classifier.0.weight\"\"\", \"\"\"classifier.0.weight\"\"\"),\r\n\t\t\t\t\t\t (\"\"\"vqa_classifier.0.bias\"\"\", \"\"\"classifier.0.bias\"\"\"),\r\n\t\t\t\t\t\t (\"\"\"vqa_classifier.1.weight\"\"\", \"\"\"classifier.1.weight\"\"\"),\r\n\t\t\t\t\t\t (\"\"\"vqa_classifier.1.bias\"\"\", \"\"\"classifier.1.bias\"\"\"),\r\n\t\t\t\t\t\t (\"\"\"vqa_classifier.3.weight\"\"\", \"\"\"classifier.3.weight\"\"\"),\r\n\t\t\t\t\t\t (\"\"\"vqa_classifier.3.bias\"\"\", \"\"\"classifier.3.bias\"\"\"),\r\n\t\t\t\t\t\t ])\r\n\t\t\telif nlvr_model:\r\n\t\t\t\t\t\t# classification head\r\n\t\t\t\t\t\trename_keys.extend(\r\n\t\t\t\t\t\t [\r\n\t\t\t\t\t\t (\"\"\"nlvr2_classifier.0.weight\"\"\", \"\"\"classifier.0.weight\"\"\"),\r\n\t\t\t\t\t\t (\"\"\"nlvr2_classifier.0.bias\"\"\", \"\"\"classifier.0.bias\"\"\"),\r\n\t\t\t\t\t\t (\"\"\"nlvr2_classifier.1.weight\"\"\", \"\"\"classifier.1.weight\"\"\"),\r\n\t\t\t\t\t\t (\"\"\"nlvr2_classifier.1.bias\"\"\", \"\"\"classifier.1.bias\"\"\"),\r\n\t\t\t\t\t\t (\"\"\"nlvr2_classifier.3.weight\"\"\", \"\"\"classifier.3.weight\"\"\"),\r\n\t\t\t\t\t\t (\"\"\"nlvr2_classifier.3.bias\"\"\", \"\"\"classifier.3.bias\"\"\"),\r\n\t\t\t\t\t\t ])\r\n\t\t\telse:\r\n\t\t\t\t\t\tpass\r\n\r\n\t\t\treturn rename_keys\r\n\r\ndef \tlowerCAmelCase_ ( _lowercase\t\t\t\t: Union[str, Any]\t\t\t\t\t\t, _lowercase\t\t\t\t: Optional[int])\t\t\t\t\t\t\t-> List[str]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\"\"\"simple docstring\"\"\"\r\n\t\t\tfor i in range(config.num_hidden_layers):\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t = 'vilt.'\r\n\t\t\t\t\t\t# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tDict\t\t\t\t\t\t = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''')\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tTuple\t\t\t\t\t\t = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''')\r\n\t\t\t\t\t\t# next, add query, keys and values (in that order) to the state dict\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t = in_proj_weight[\r\n\t\t\t\t\t\t : config.hidden_size, :\r\n\t\t\t\t\t\t]\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tList[Any]\t\t\t\t\t\t = in_proj_bias[: config.hidden_size]\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tint\t\t\t\t\t\t = in_proj_weight[\r\n\t\t\t\t\t\t config.hidden_size : config.hidden_size * 2, :\r\n\t\t\t\t\t\t]\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t = in_proj_bias[\r\n\t\t\t\t\t\t config.hidden_size : config.hidden_size * 2\r\n\t\t\t\t\t\t]\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tTuple\t\t\t\t\t\t = in_proj_weight[\r\n\t\t\t\t\t\t -config.hidden_size :, :\r\n\t\t\t\t\t\t]\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t = in_proj_bias[-config.hidden_size :]\r\n\r\ndef \tlowerCAmelCase_ ( _lowercase\t\t\t\t: str)\t\t\t\t\t\t\t-> Optional[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\"\"\"simple docstring\"\"\"\r\n\t\t\ta__ :\t\t\t\t\t\tTuple\t\t\t\t\t\t = ['head.weight', 'head.bias']\r\n\t\t\tfor k in ignore_keys:\r\n\t\t\t\t\t\tstate_dict.pop(__a\t\t\t\t\t\t, __a)\r\n\r\ndef \tlowerCAmelCase_ ( _lowercase\t\t\t\t: int\t\t\t\t\t\t, _lowercase\t\t\t\t: Dict\t\t\t\t\t\t, _lowercase\t\t\t\t: Optional[int])\t\t\t\t\t\t\t-> int:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\"\"\"simple docstring\"\"\"\r\n\t\t\ta__ :\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t = dct.pop(__a)\r\n\t\t\ta__ :\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t = val\r\n\r\n@torch.no_grad()\r\ndef \tlowerCAmelCase_ ( _lowercase\t\t\t\t: int\t\t\t\t\t\t, _lowercase\t\t\t\t: str)\t\t\t\t\t\t\t-> str:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\"\"\"simple docstring\"\"\"\r\n\t\t\ta__ :\t\t\t\t\t\tDict\t\t\t\t\t\t = ViltConfig(image_size=384\t\t\t\t\t\t, patch_size=32\t\t\t\t\t\t, tie_word_embeddings=__a)\r\n\t\t\ta__ :\t\t\t\t\t\tList[Any]\t\t\t\t\t\t = False\r\n\t\t\ta__ :\t\t\t\t\t\tList[Any]\t\t\t\t\t\t = False\r\n\t\t\ta__ :\t\t\t\t\t\tList[str]\t\t\t\t\t\t = False\r\n\t\t\ta__ :\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t = False\r\n\t\t\tif \"vqa\" in checkpoint_url:\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t = True\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tList[str]\t\t\t\t\t\t = 3129\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tAny\t\t\t\t\t\t = 'huggingface/label-files'\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t = 'vqa2-id2label.json'\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tList[Any]\t\t\t\t\t\t = json.load(open(hf_hub_download(__a\t\t\t\t\t\t, __a\t\t\t\t\t\t, repo_type=\"\"\"dataset\"\"\")\t\t\t\t\t\t, \"\"\"r\"\"\"))\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tList[str]\t\t\t\t\t\t = {int(__a): v for k, v in idalabel.items()}\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t = idalabel\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tList[Any]\t\t\t\t\t\t = {v: k for k, v in idalabel.items()}\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tList[Any]\t\t\t\t\t\t = ViltForQuestionAnswering(__a)\r\n\t\t\telif \"nlvr\" in checkpoint_url:\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tstr\t\t\t\t\t\t = True\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tList[Any]\t\t\t\t\t\t = 2\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tList[str]\t\t\t\t\t\t = {0: 'False', 1: 'True'}\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t = {v: k for k, v in config.idalabel.items()}\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tTuple\t\t\t\t\t\t = 3\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tstr\t\t\t\t\t\t = ViltForImagesAndTextClassification(__a)\r\n\t\t\telif \"irtr\" in checkpoint_url:\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t = True\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t = ViltForImageAndTextRetrieval(__a)\r\n\t\t\telif \"mlm_itm\" in checkpoint_url:\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tstr\t\t\t\t\t\t = True\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tint\t\t\t\t\t\t = ViltForMaskedLM(__a)\r\n\t\t\telse:\r\n\t\t\t\t\t\traise ValueError(\"\"\"Unknown model type\"\"\")\r\n\r\n\t\t\t# load state_dict of original model, remove and rename some keys\r\n\t\t\ta__ :\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t = torch.hub.load_state_dict_from_url(__a\t\t\t\t\t\t, map_location=\"\"\"cpu\"\"\")['state_dict']\r\n\t\t\ta__ :\t\t\t\t\t\tList[str]\t\t\t\t\t\t = create_rename_keys(__a\t\t\t\t\t\t, __a\t\t\t\t\t\t, __a\t\t\t\t\t\t, __a)\r\n\t\t\tfor src, dest in rename_keys:\r\n\t\t\t\t\t\trename_key(__a\t\t\t\t\t\t, __a\t\t\t\t\t\t, __a)\r\n\t\t\tread_in_q_k_v(__a\t\t\t\t\t\t, __a)\r\n\t\t\tif mlm_model or irtr_model:\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tList[str]\t\t\t\t\t\t = ['itm_score.fc.weight', 'itm_score.fc.bias']\r\n\t\t\t\t\t\tfor k in ignore_keys:\r\n\t\t\t\t\t\t\t\t\tstate_dict.pop(__a\t\t\t\t\t\t, __a)\r\n\r\n # load state dict into HuggingFace model\r\n\t\t\tmodel.eval()\r\n\t\t\tif mlm_model:\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tList[Any]\t\t\t\t\t\t = model.load_state_dict(__a\t\t\t\t\t\t, strict=__a)\r\n\t\t\t\t\t\tassert missing_keys == [\"mlm_score.decoder.bias\"]\r\n\t\t\telse:\r\n\t\t\t\t\t\tmodel.load_state_dict(__a)\r\n\r\n\t\t\t# Define processor\r\n\t\t\ta__ :\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t = ViltImageProcessor(size=384)\r\n\t\t\ta__ :\t\t\t\t\t\tAny\t\t\t\t\t\t = BertTokenizer.from_pretrained(\"\"\"bert-base-uncased\"\"\")\r\n\t\t\ta__ :\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t = ViltProcessor(__a\t\t\t\t\t\t, __a)\r\n\r\n\t\t\t# Forward pass on example inputs (image + text)\r\n\t\t\tif nlvr_model:\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t = Image.open(requests.get(\"\"\"https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg\"\"\"\t\t\t\t\t\t, stream=__a).raw)\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t = Image.open(requests.get(\"\"\"https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg\"\"\"\t\t\t\t\t\t, stream=__a).raw)\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tAny\t\t\t\t\t\t = (\r\n\t\t\t\t\t\t 'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'\r\n\t\t\t\t\t\t ' standing.'\r\n\t\t\t\t\t\t)\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tDict\t\t\t\t\t\t = processor(__a\t\t\t\t\t\t, __a\t\t\t\t\t\t, return_tensors=\"\"\"pt\"\"\")\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tList[str]\t\t\t\t\t\t = processor(__a\t\t\t\t\t\t, __a\t\t\t\t\t\t, return_tensors=\"\"\"pt\"\"\")\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tDict\t\t\t\t\t\t = model(\r\n\t\t\t\t\t\t input_ids=encoding_a.input_ids\t\t\t\t\t\t, pixel_values=encoding_a.pixel_values\t\t\t\t\t\t, pixel_values_a=encoding_a.pixel_values\t\t\t\t\t\t, )\r\n\t\t\telse:\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tDict\t\t\t\t\t\t = Image.open(requests.get(\"\"\"http://images.cocodataset.org/val2017/000000039769.jpg\"\"\"\t\t\t\t\t\t, stream=__a).raw)\r\n\t\t\t\t\t\tif mlm_model:\r\n\t\t\t\t\t\t\t\t\ta__ :\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t = 'a bunch of [MASK] laying on a [MASK].'\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\ta__ :\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t = 'How many cats are there?'\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tList[str]\t\t\t\t\t\t = processor(__a\t\t\t\t\t\t, __a\t\t\t\t\t\t, return_tensors=\"\"\"pt\"\"\")\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tint\t\t\t\t\t\t = model(**__a)\r\n\r\n\t\t\t# Verify outputs\r\n\t\t\tif mlm_model:\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tList[str]\t\t\t\t\t\t = torch.Size([1, 11, 3_0522])\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tAny\t\t\t\t\t\t = torch.tensor([-12.5061, -12.5123, -12.5174])\r\n\t\t\t\t\t\tassert outputs.logits.shape == expected_shape\r\n\t\t\t\t\t\tassert torch.allclose(outputs.logits[0, 0, :3]\t\t\t\t\t\t, __a\t\t\t\t\t\t, atol=1e-4)\r\n\r\n\t\t\t\t\t\t# verify masked token prediction equals \"cats\"\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tstr\t\t\t\t\t\t = outputs.logits[0, 4, :].argmax(-1).item()\r\n\t\t\t\t\t\tassert tokenizer.decode([predicted_id]) == \"cats\"\r\n\t\t\telif vqa_model:\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tTuple\t\t\t\t\t\t = torch.Size([1, 3129])\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t = torch.tensor([-15.9495, -18.1472, -10.3041])\r\n\t\t\t\t\t\tassert torch.allclose(outputs.logits[0, :3]\t\t\t\t\t\t, __a\t\t\t\t\t\t, atol=1e-4)\r\n\t\t\t\t\t\tassert outputs.logits.shape == expected_shape\r\n\t\t\t\t\t\tassert torch.allclose(outputs.logits[0, 0, :3]\t\t\t\t\t\t, __a\t\t\t\t\t\t, atol=1e-4)\r\n\r\n\t\t\t\t\t\t# verify vqa prediction equals \"2\"\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t = outputs.logits.argmax(-1).item()\r\n\t\t\t\t\t\tassert model.config.idalabel[predicted_idx] == \"2\"\r\n\t\t\telif nlvr_model:\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tstr\t\t\t\t\t\t = torch.Size([1, 2])\r\n\t\t\t\t\t\ta__ :\t\t\t\t\t\tDict\t\t\t\t\t\t = torch.tensor([-2.8721, 2.1291])\r\n\t\t\t\t\t\tassert torch.allclose(outputs.logits[0, :3]\t\t\t\t\t\t, __a\t\t\t\t\t\t, atol=1e-4)\r\n\t\t\t\t\t\tassert outputs.logits.shape == expected_shape\r\n\r\n\t\t\tPath(__a).mkdir(exist_ok=__a)\r\n\t\t\tprint(F'''Saving model and processor to {pytorch_dump_folder_path}''')\r\n\t\t\tmodel.save_pretrained(__a)\r\n\t\t\tprocessor.save_pretrained(__a)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t_lowercase\t\t: int\t\t\t\t\t=argparse.ArgumentParser()\r\n\t\t# Required parameters\r\n\t\tparser.add_argument(\r\n\t\t \"--checkpoint_url\",\r\n\t\t default=\"https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt\",\r\n\t\t type=str,\r\n\t\t help=\"URL of the checkpoint you'd like to convert.\",\r\n\t\t)\r\n\t\tparser.add_argument(\r\n\t\t \"--pytorch_dump_folder_path\", default=None, type=str, help=\"Path to the output PyTorch model directory.\"\r\n\t\t)\r\n\r\n\t\t_lowercase\t\t: Tuple\t\t\t\t\t=parser.parse_args()\r\n\t\tconvert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":170,"string":"170"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\nfrom collections import namedtuple\r\n\r\n\r\ndef \t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ (\t\t\t__a , __a , __a ):\r\n\tsnake_case_\t\t\t\t\t\t:\tAny =\t\tnamedtuple('result' , 'name value' )\r\n\tif (voltage, current, power).count(0 ) != 1:\r\n\t\traise ValueError('Only one argument must be 0' )\r\n\telif power < 0:\r\n\t\traise ValueError(\r\n\t\t 'Power cannot be negative in any electrical/electronics system' )\r\n\telif voltage == 0:\r\n\t\treturn result('voltage' , power / current )\r\n\telif current == 0:\r\n\t\treturn result('current' , power / voltage )\r\n\telif power == 0:\r\n\t\treturn result('power' , float(round(abs(voltage * current ) , 2 ) ) )\r\n\telse:\r\n\t\traise ValueError('Exactly one argument must be 0' )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t\t\timport doctest\r\n\r\n\t\t\t\t\t\tdoctest.testmod()\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":327,"string":"327"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":787,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\nimport os\r\nfrom typing import Optional\r\n\r\nimport fsspec\r\nfrom fsspec.archive import AbstractArchiveFileSystem\r\nfrom fsspec.utils import DEFAULT_BLOCK_SIZE\r\n\r\nclass \t\t\tSCREAMING_SNAKE_CASE\t\t\t( _a\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n _SCREAMING_SNAKE_CASE = \"\"\"\"\"\"\r\n _SCREAMING_SNAKE_CASE = (\r\n None # protocol passed in prefix to the url. ex: \"gzip\", for gzip://file.txt::http://foo.bar/file.txt.gz\r\n )\r\n _SCREAMING_SNAKE_CASE = None # compression type in fsspec. ex: \"gzip\"\r\n _SCREAMING_SNAKE_CASE = None # extension of the filename to strip. ex: \"\".gz\" to get file.txt from file.txt.gz\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __init__( self\t\t\t\t: int ,\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: str = \"\" ,\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: Optional[str] = None ,\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: Optional[dict] = None ,\t\t\t\t\t\t**UpperCamelCase__\t\t\t\t: List[str]\t\t\t):\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n super().__init__(self ,\t\t\t\t\t\t**UpperCamelCase__\t\t\t)\r\n # always open as \"rb\" since fsspec can then use the TextIOWrapper to make it work for \"r\" mode\r\n UpperCamelCase = fsspec.open(\r\n UpperCamelCase__ ,\t\t\t\t\t\tmode='rb' ,\t\t\t\t\t\tprotocol=UpperCamelCase__ ,\t\t\t\t\t\tcompression=self.compression ,\t\t\t\t\t\tclient_kwargs={\r\n 'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459\r\n 'trust_env': True, # Enable reading proxy env variables.\r\n **(target_options or {}).pop('client_kwargs' ,\t\t\t\t\t\t{}\t\t\t), # To avoid issues if it was already passed.\r\n } ,\t\t\t\t\t\t**(target_options or {}) ,\t\t\t\t\t\t)\r\n UpperCamelCase = os.path.basename(self.file.path.split('::'\t\t\t)[0]\t\t\t)\r\n UpperCamelCase = (\r\n self.compressed_name[: self.compressed_name.rindex('.'\t\t\t)]\r\n if '.' in self.compressed_name\r\n else self.compressed_name\r\n )\r\n UpperCamelCase = None\r\n\r\n\r\n\r\n\r\n\r\n\r\n @classmethod\r\n def \t\t\tA\t( cls\t\t\t\t: int ,\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: List[str]\t\t\t):\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n return super()._strip_protocol(UpperCamelCase__\t\t\t).lstrip('/'\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\tA\t( self\t\t\t\t: Optional[int]\t\t\t):\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n if self.dir_cache is None:\r\n UpperCamelCase = {**self.file.fs.info(self.file.path\t\t\t), 'name': self.uncompressed_name}\r\n UpperCamelCase = {f['name']: f}\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\tA\t( self\t\t\t\t: Any ,\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: str\t\t\t):\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n return self.file.open().read()\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\tA\t( self\t\t\t\t: List[str] ,\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: str ,\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: str = \"rb\" ,\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: Union[str, Any]=None ,\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: Any=True ,\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: List[Any]=None ,\t\t\t\t\t\t**UpperCamelCase__\t\t\t\t: Dict ,\t\t\t\t\t\t):\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCamelCase = self._strip_protocol(UpperCamelCase__\t\t\t)\r\n if mode != \"rb\":\r\n raise ValueError(f\"\"\"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'\"\"\"\t\t\t)\r\n return self.file.open()\r\n\r\nclass \t\t\tSCREAMING_SNAKE_CASE\t\t\t( _a\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n _SCREAMING_SNAKE_CASE = \"\"\"bz2\"\"\"\r\n _SCREAMING_SNAKE_CASE = \"\"\"bz2\"\"\"\r\n _SCREAMING_SNAKE_CASE = \"\"\".bz2\"\"\"\r\n\r\nclass \t\t\tSCREAMING_SNAKE_CASE\t\t\t( _a\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n _SCREAMING_SNAKE_CASE = \"\"\"gzip\"\"\"\r\n _SCREAMING_SNAKE_CASE = \"\"\"gzip\"\"\"\r\n _SCREAMING_SNAKE_CASE = \"\"\".gz\"\"\"\r\n\r\nclass \t\t\tSCREAMING_SNAKE_CASE\t\t\t( _a\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n _SCREAMING_SNAKE_CASE = \"\"\"lz4\"\"\"\r\n _SCREAMING_SNAKE_CASE = \"\"\"lz4\"\"\"\r\n _SCREAMING_SNAKE_CASE = \"\"\".lz4\"\"\"\r\n\r\nclass \t\t\tSCREAMING_SNAKE_CASE\t\t\t( _a\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n _SCREAMING_SNAKE_CASE = \"\"\"xz\"\"\"\r\n _SCREAMING_SNAKE_CASE = \"\"\"xz\"\"\"\r\n _SCREAMING_SNAKE_CASE = \"\"\".xz\"\"\"\r\n\r\nclass \t\t\tSCREAMING_SNAKE_CASE\t\t\t( _a\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n _SCREAMING_SNAKE_CASE = \"\"\"zstd\"\"\"\r\n _SCREAMING_SNAKE_CASE = \"\"\"zstd\"\"\"\r\n _SCREAMING_SNAKE_CASE = \"\"\".zst\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __init__( self\t\t\t\t: List[str] ,\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: str ,\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: str = \"rb\" ,\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: Optional[str] = None ,\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: Optional[dict] = None ,\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: int = DEFAULT_BLOCK_SIZE ,\t\t\t\t\t\t**UpperCamelCase__\t\t\t\t: Dict ,\t\t\t\t\t\t):\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n super().__init__(\r\n fo=UpperCamelCase__ ,\t\t\t\t\t\tmode=UpperCamelCase__ ,\t\t\t\t\t\ttarget_protocol=UpperCamelCase__ ,\t\t\t\t\t\ttarget_options=UpperCamelCase__ ,\t\t\t\t\t\tblock_size=UpperCamelCase__ ,\t\t\t\t\t\t**UpperCamelCase__ ,\t\t\t\t\t\t)\r\n # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:\r\n #\r\n # File \"/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py\", line 145, in open\r\n # out.close = close\r\n # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only\r\n #\r\n # see https://github.com/intake/filesystem_spec/issues/725\r\n UpperCamelCase = self.file.__enter__\r\n\r\n class \t\t\tSCREAMING_SNAKE_CASE\t\t\t:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n def __init__( self\t\t\t\t: Optional[int] ,\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: Optional[int]\t\t\t):\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCamelCase = file_\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __enter__( self\t\t\t\t: Tuple\t\t\t):\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n self._file.__enter__()\r\n return self\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __exit__( self\t\t\t\t: int ,\t\t\t\t\t\t*UpperCamelCase__\t\t\t\t: Tuple ,\t\t\t\t\t\t**UpperCamelCase__\t\t\t\t: Tuple\t\t\t):\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n self._file.__exit__(*UpperCamelCase__ ,\t\t\t\t\t\t**UpperCamelCase__\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __iter__( self\t\t\t\t: Dict\t\t\t):\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n return iter(self._file\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\tA\t( self\t\t\t\t: str\t\t\t):\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n return next(self._file\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __getattr__( self\t\t\t\t: Optional[Any] ,\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: Union[str, Any]\t\t\t):\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n return getattr(self._file ,\t\t\t\t\t\tUpperCamelCase__\t\t\t)\r\n\r\n def fixed_enter(*UpperCamelCase__\t\t\t\t: List[str] ,\t\t\t\t\t\t**UpperCamelCase__\t\t\t\t: Any\t\t\t):\r\n return WrappedFile(_enter(*UpperCamelCase__ ,\t\t\t\t\t\t**UpperCamelCase__\t\t\t)\t\t\t)\r\n\r\n UpperCamelCase = fixed_enter\r\n\r\n"},"code_codestyle":{"kind":"number","value":249,"string":"249"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\nimport unittest\r\n\r\nfrom transformers import (\r\n MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,\r\n TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,\r\n TextaTextGenerationPipeline,\r\n pipeline,\r\n)\r\nfrom transformers.testing_utils import is_pipeline_test, require_tf, require_torch\r\nfrom transformers.utils import is_torch_available\r\n\r\nfrom .test_pipelines_common import ANY\r\n\r\n\r\nif is_torch_available():\r\n import torch\r\n\r\n@is_pipeline_test\r\nclass \t\t\tSCREAMING_SNAKE_CASE\t\t\t( unittest.TestCase\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n _SCREAMING_SNAKE_CASE = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING\r\n _SCREAMING_SNAKE_CASE = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\tA\t( self\t\t\t\t: List[Any] ,\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: Optional[int] ,\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: Union[str, Any] ,\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: List[str]\t\t\t):\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCamelCase = TextaTextGenerationPipeline(model=UpperCamelCase__ ,\t\t\t\t\t\ttokenizer=UpperCamelCase__\t\t\t)\r\n return generator, [\"Something to write\", \"Something else\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\tA\t( self\t\t\t\t: str ,\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: Union[str, Any] ,\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: List[Any]\t\t\t):\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCamelCase = generator('Something there'\t\t\t)\r\n self.assertEqual(UpperCamelCase__ ,\t\t\t\t\t\t[{'generated_text': ANY(UpperCamelCase__\t\t\t)}]\t\t\t)\r\n # These are encoder decoder, they don't just append to incoming string\r\n self.assertFalse(outputs[0]['generated_text'].startswith('Something there'\t\t\t)\t\t\t)\r\n\r\n UpperCamelCase = generator(['This is great !', 'Something else'] ,\t\t\t\t\t\tnum_return_sequences=2 ,\t\t\t\t\t\tdo_sample=UpperCamelCase__\t\t\t)\r\n self.assertEqual(\r\n UpperCamelCase__ ,\t\t\t\t\t\t[\r\n [{'generated_text': ANY(UpperCamelCase__\t\t\t)}, {'generated_text': ANY(UpperCamelCase__\t\t\t)}],\r\n [{'generated_text': ANY(UpperCamelCase__\t\t\t)}, {'generated_text': ANY(UpperCamelCase__\t\t\t)}],\r\n ] ,\t\t\t\t\t\t)\r\n\r\n UpperCamelCase = generator(\r\n ['This is great !', 'Something else'] ,\t\t\t\t\t\tnum_return_sequences=2 ,\t\t\t\t\t\tbatch_size=2 ,\t\t\t\t\t\tdo_sample=UpperCamelCase__\t\t\t)\r\n self.assertEqual(\r\n UpperCamelCase__ ,\t\t\t\t\t\t[\r\n [{'generated_text': ANY(UpperCamelCase__\t\t\t)}, {'generated_text': ANY(UpperCamelCase__\t\t\t)}],\r\n [{'generated_text': ANY(UpperCamelCase__\t\t\t)}, {'generated_text': ANY(UpperCamelCase__\t\t\t)}],\r\n ] ,\t\t\t\t\t\t)\r\n\r\n with self.assertRaises(UpperCamelCase__\t\t\t):\r\n generator(4\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n @require_torch\r\n def \t\t\tA\t( self\t\t\t\t: Dict\t\t\t):\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCamelCase = pipeline('text2text-generation' ,\t\t\t\t\t\tmodel='patrickvonplaten/t5-tiny-random' ,\t\t\t\t\t\tframework='pt'\t\t\t)\r\n # do_sample=False necessary for reproducibility\r\n UpperCamelCase = generator('Something there' ,\t\t\t\t\t\tdo_sample=UpperCamelCase__\t\t\t)\r\n self.assertEqual(UpperCamelCase__ ,\t\t\t\t\t\t[{'generated_text': ''}]\t\t\t)\r\n\r\n UpperCamelCase = 3\r\n UpperCamelCase = generator(\r\n 'Something there' ,\t\t\t\t\t\tnum_return_sequences=UpperCamelCase__ ,\t\t\t\t\t\tnum_beams=UpperCamelCase__ ,\t\t\t\t\t\t)\r\n UpperCamelCase = [\r\n {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},\r\n {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},\r\n {'generated_text': ''},\r\n ]\r\n self.assertEqual(UpperCamelCase__ ,\t\t\t\t\t\tUpperCamelCase__\t\t\t)\r\n\r\n UpperCamelCase = generator('This is a test' ,\t\t\t\t\t\tdo_sample=UpperCamelCase__ ,\t\t\t\t\t\tnum_return_sequences=2 ,\t\t\t\t\t\treturn_tensors=UpperCamelCase__\t\t\t)\r\n self.assertEqual(\r\n UpperCamelCase__ ,\t\t\t\t\t\t[\r\n {'generated_token_ids': ANY(torch.Tensor\t\t\t)},\r\n {'generated_token_ids': ANY(torch.Tensor\t\t\t)},\r\n ] ,\t\t\t\t\t\t)\r\n UpperCamelCase = generator.model.config.eos_token_id\r\n UpperCamelCase = ''\r\n UpperCamelCase = generator(\r\n ['This is a test', 'This is a second test'] ,\t\t\t\t\t\tdo_sample=UpperCamelCase__ ,\t\t\t\t\t\tnum_return_sequences=2 ,\t\t\t\t\t\tbatch_size=2 ,\t\t\t\t\t\treturn_tensors=UpperCamelCase__ ,\t\t\t\t\t\t)\r\n self.assertEqual(\r\n UpperCamelCase__ ,\t\t\t\t\t\t[\r\n [\r\n {'generated_token_ids': ANY(torch.Tensor\t\t\t)},\r\n {'generated_token_ids': ANY(torch.Tensor\t\t\t)},\r\n ],\r\n [\r\n {'generated_token_ids': ANY(torch.Tensor\t\t\t)},\r\n {'generated_token_ids': ANY(torch.Tensor\t\t\t)},\r\n ],\r\n ] ,\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n @require_tf\r\n def \t\t\tA\t( self\t\t\t\t: str\t\t\t):\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCamelCase = pipeline('text2text-generation' ,\t\t\t\t\t\tmodel='patrickvonplaten/t5-tiny-random' ,\t\t\t\t\t\tframework='tf'\t\t\t)\r\n # do_sample=False necessary for reproducibility\r\n UpperCamelCase = generator('Something there' ,\t\t\t\t\t\tdo_sample=UpperCamelCase__\t\t\t)\r\n self.assertEqual(UpperCamelCase__ ,\t\t\t\t\t\t[{'generated_text': ''}]\t\t\t)\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":249,"string":"249"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":788,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\nimport warnings\r\n\r\nfrom transformers import AutoTokenizer\r\nfrom transformers.utils import is_torch_available\r\nfrom transformers.utils.generic import ExplicitEnum\r\n\r\nfrom ...processing_utils import ProcessorMixin\r\n\r\n\r\nif is_torch_available():\r\n\t\t\t\timport torch\r\n\r\nclass \t\tUpperCAmelCase__\t\t\t(\t\t\t__UpperCamelCase ):\r\n\r\n\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tUpperCamelCase\t\t\t\t\t\t\t\t\t= \"\"\"char\"\"\"\r\n\t\t\tUpperCamelCase\t\t\t\t\t\t\t\t\t= \"\"\"bpe\"\"\"\r\n\t\t\tUpperCamelCase\t\t\t\t\t\t\t\t\t= \"\"\"wp\"\"\"\r\n__A \t\t\t=(DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)\r\n\r\nclass \t\tUpperCAmelCase__\t\t\t(\t\t\t__UpperCamelCase ):\r\n\r\n\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tUpperCamelCase\t\t\t\t\t\t\t\t\t= [\"\"\"image_processor\"\"\", \"\"\"char_tokenizer\"\"\"]\r\n\t\t\tUpperCamelCase\t\t\t\t\t\t\t\t\t= \"\"\"ViTImageProcessor\"\"\"\r\n\t\t\tUpperCamelCase\t\t\t\t\t\t\t\t\t= \"\"\"MgpstrTokenizer\"\"\"\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t\t\t\t\t\t\t: Tuple , a_\t\t\t\t\t\t\t: List[Any]=None , a_\t\t\t\t\t\t\t: List[Any]=None , **a_\t\t\t\t\t\t\t: Optional[int] ):\r\n\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t__UpperCAmelCase : List[str]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tNone\r\n\t\t\t\t\tif \"feature_extractor\" in kwargs:\r\n\t\t\t\t\t\t\twarnings.warn(\r\n\t\t\t\t\t\t\t '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''\r\n\t\t\t\t\t\t\t ''' instead.''' , a_ , )\r\n\t\t\t\t\t\t\t__UpperCAmelCase : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tkwargs.pop('''feature_extractor''' )\r\n\r\n\t\t\t\t\t__UpperCAmelCase : Dict\t\t\t\t\t\t\t\t\t\t\t\t\t=\timage_processor if image_processor is not None else feature_extractor\r\n\t\t\t\t\tif image_processor is None:\r\n\t\t\t\t\t\t\traise ValueError('''You need to specify an `image_processor`.''' )\r\n\t\t\t\t\tif tokenizer is None:\r\n\t\t\t\t\t\t\traise ValueError('''You need to specify a `tokenizer`.''' )\r\n\r\n\t\t\t\t\t__UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t=\ttokenizer\r\n\t\t\t\t\t__UpperCAmelCase : str\t\t\t\t\t\t\t\t\t\t\t\t\t=\tAutoTokenizer.from_pretrained('''gpt2''' )\r\n\t\t\t\t\t__UpperCAmelCase : str\t\t\t\t\t\t\t\t\t\t\t\t\t=\tAutoTokenizer.from_pretrained('''bert-base-uncased''' )\r\n\r\n\t\t\t\t\tsuper().__init__(a_ , a_ )\r\n\r\n\r\n\r\n\r\n\t\t\tdef __call__( self\t\t\t\t\t\t\t: Tuple , a_\t\t\t\t\t\t\t: Any=None , a_\t\t\t\t\t\t\t: Optional[int]=None , a_\t\t\t\t\t\t\t: Union[str, Any]=None , **a_\t\t\t\t\t\t\t: int ):\r\n\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tif images is None and text is None:\r\n\t\t\t\t\t\t\traise ValueError('''You need to specify either an `images` or `text` input to process.''' )\r\n\r\n\t\t\t\t\tif images is not None:\r\n\t\t\t\t\t\t\t__UpperCAmelCase : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tself.image_processor(a_ , return_tensors=a_ , **a_ )\r\n\t\t\t\t\tif text is not None:\r\n\t\t\t\t\t\t\t__UpperCAmelCase : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tself.char_tokenizer(a_ , return_tensors=a_ , **a_ )\r\n\r\n\t\t\t\t\tif text is None:\r\n\t\t\t\t\t\t\treturn inputs\r\n\t\t\t\t\telif images is None:\r\n\t\t\t\t\t\t\treturn encodings\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t__UpperCAmelCase : int\t\t\t\t\t\t\t\t\t\t\t\t\t=\tencodings['''input_ids''']\r\n\t\t\t\t\t\t\treturn inputs\r\n\r\n\r\n\r\n\r\n\t\t\tdef snake_case__ ( self\t\t\t\t\t\t\t: Union[str, Any] , a_\t\t\t\t\t\t\t: int ):\r\n\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t,\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t,\t\t\t\t\t__UpperCAmelCase : Any\t\t\t\t\t\t\t\t\t\t\t\t\t=\tsequences\r\n\t\t\t\t\t__UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t=\tchar_preds.size(0 )\r\n\r\n\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t,\t\t\t\t\t__UpperCAmelCase : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tself._decode_helper(a_ , '''char''' )\r\n\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t,\t\t\t\t\t__UpperCAmelCase : List[str]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tself._decode_helper(a_ , '''bpe''' )\r\n\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t,\t\t\t\t\t__UpperCAmelCase : List[str]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tself._decode_helper(a_ , '''wp''' )\r\n\r\n\t\t\t\t\t__UpperCAmelCase : int\t\t\t\t\t\t\t\t\t\t\t\t\t=\t[]\r\n\t\t\t\t\t__UpperCAmelCase : Optional[int]\t\t\t\t\t\t\t\t\t\t\t\t\t=\t[]\r\n\t\t\t\t\tfor i in range(a_ ):\r\n\t\t\t\t\t\t\t__UpperCAmelCase : List[str]\t\t\t\t\t\t\t\t\t\t\t\t\t=\t[char_scores[i], bpe_scores[i], wp_scores[i]]\r\n\t\t\t\t\t\t\t__UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t=\t[char_strs[i], bpe_strs[i], wp_strs[i]]\r\n\t\t\t\t\t\t\t__UpperCAmelCase : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tscores.index(max(a_ ) )\r\n\t\t\t\t\t\t\tfinal_strs.append(strs[max_score_index] )\r\n\t\t\t\t\t\t\tfinal_scores.append(scores[max_score_index] )\r\n\r\n\t\t\t\t\t__UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\t{}\r\n\t\t\t\t\t__UpperCAmelCase : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tfinal_strs\r\n\t\t\t\t\t__UpperCAmelCase : int\t\t\t\t\t\t\t\t\t\t\t\t\t=\tfinal_scores\r\n\t\t\t\t\t__UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tchar_strs\r\n\t\t\t\t\t__UpperCAmelCase : Any\t\t\t\t\t\t\t\t\t\t\t\t\t=\tbpe_strs\r\n\t\t\t\t\t__UpperCAmelCase : Optional[int]\t\t\t\t\t\t\t\t\t\t\t\t\t=\twp_strs\r\n\t\t\t\t\treturn out\r\n\r\n\r\n\r\n\r\n\t\t\tdef snake_case__ ( self\t\t\t\t\t\t\t: Dict , a_\t\t\t\t\t\t\t: Tuple , a_\t\t\t\t\t\t\t: Optional[int] ):\r\n\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tif format == DecodeType.CHARACTER:\r\n\t\t\t\t\t\t\t__UpperCAmelCase : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tself.char_decode\r\n\t\t\t\t\t\t\t__UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t=\t1\r\n\t\t\t\t\t\t\t__UpperCAmelCase : List[str]\t\t\t\t\t\t\t\t\t\t\t\t\t=\t'''[s]'''\r\n\t\t\t\t\telif format == DecodeType.BPE:\r\n\t\t\t\t\t\t\t__UpperCAmelCase : Optional[int]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tself.bpe_decode\r\n\t\t\t\t\t\t\t__UpperCAmelCase : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\t2\r\n\t\t\t\t\t\t\t__UpperCAmelCase : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\t'''#'''\r\n\t\t\t\t\telif format == DecodeType.WORDPIECE:\r\n\t\t\t\t\t\t\t__UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t=\tself.wp_decode\r\n\t\t\t\t\t\t\t__UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t=\t1_02\r\n\t\t\t\t\t\t\t__UpperCAmelCase : str\t\t\t\t\t\t\t\t\t\t\t\t\t=\t'''[SEP]'''\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t\traise ValueError(F'Format {format} is not supported.' )\r\n\r\n\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t,\t\t\t\t\t__UpperCAmelCase : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\t[], []\r\n\t\t\t\t\t__UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t=\tpred_logits.size(0 )\r\n\t\t\t\t\t__UpperCAmelCase : int\t\t\t\t\t\t\t\t\t\t\t\t\t=\tpred_logits.size(1 )\r\n\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t,\t\t\t\t\t__UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t=\tpred_logits.topk(1 , dim=-1 , largest=a_ , sorted=a_ )\r\n\t\t\t\t\t__UpperCAmelCase : Any\t\t\t\t\t\t\t\t\t\t\t\t\t=\tpreds_index.view(-1 , a_ )[:, 1:]\r\n\t\t\t\t\t__UpperCAmelCase : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tdecoder(a_ )\r\n\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t,\t\t\t\t\t__UpperCAmelCase : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\ttorch.nn.functional.softmax(a_ , dim=2 ).max(dim=2 )\r\n\t\t\t\t\t__UpperCAmelCase : List[str]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tpreds_max_prob[:, 1:]\r\n\r\n\t\t\t\t\tfor index in range(a_ ):\r\n\t\t\t\t\t\t\t__UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tpreds_str[index].find(a_ )\r\n\t\t\t\t\t\t\t__UpperCAmelCase : Dict\t\t\t\t\t\t\t\t\t\t\t\t\t=\tpreds_str[index][:pred_eos]\r\n\t\t\t\t\t\t\t__UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tpreds_index[index].cpu().tolist()\r\n\t\t\t\t\t\t\t__UpperCAmelCase : Optional[int]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tpred_index.index(a_ ) if eos_token in pred_index else -1\r\n\t\t\t\t\t\t\t__UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tpreds_max_prob[index][: pred_eos_index + 1]\r\n\t\t\t\t\t\t\t__UpperCAmelCase : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tpred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0\r\n\t\t\t\t\t\t\tdec_strs.append(a_ )\r\n\t\t\t\t\t\t\tconf_scores.append(a_ )\r\n\r\n\t\t\t\t\treturn dec_strs, conf_scores\r\n\r\n\r\n\r\n\r\n\t\t\tdef snake_case__ ( self\t\t\t\t\t\t\t: List[str] , a_\t\t\t\t\t\t\t: int ):\r\n\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t__UpperCAmelCase : int\t\t\t\t\t\t\t\t\t\t\t\t\t=\t[seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(a_ )]\r\n\t\t\t\t\treturn decode_strs\r\n\r\n\r\n\r\n\r\n\t\t\tdef snake_case__ ( self\t\t\t\t\t\t\t: str , a_\t\t\t\t\t\t\t: Tuple ):\r\n\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\treturn self.bpe_tokenizer.batch_decode(a_ )\r\n\r\n\r\n\r\n\r\n\t\t\tdef snake_case__ ( self\t\t\t\t\t\t\t: Optional[int] , a_\t\t\t\t\t\t\t: Optional[Any] ):\r\n\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t__UpperCAmelCase : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\t[seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(a_ )]\r\n\t\t\t\t\treturn decode_strs\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":226,"string":"226"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\nimport argparse\r\nimport json\r\nimport math\r\nimport os\r\nimport time\r\nimport traceback\r\nimport zipfile\r\nfrom collections import Counter\r\n\r\nimport requests\r\n\r\ndef a\t\t\t\t\t\t( _UpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\t\tAny , _UpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\t\tAny=None\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t__UpperCAmelCase : Optional[int]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tNone\r\n\t\tif token is not None:\r\n\t\t\t\t__UpperCAmelCase : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\t{'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'Bearer {token}'}\r\n\r\n\t\t__UpperCAmelCase : List[str]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tf'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'\r\n\t\t__UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\trequests.get(_UpperCAmelCase , headers=_UpperCAmelCase\t\t\t\t\t\t).json()\r\n\t\t__UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\t{}\r\n\r\n\t\ttry:\r\n\t\t\t\tjob_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']}\t\t\t\t\t\t)\r\n\t\t\t\t__UpperCAmelCase : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tmath.ceil((result['''total_count'''] - 1_00) / 1_00\t\t\t\t\t\t)\r\n\r\n\t\t\t\tfor i in range(_UpperCAmelCase\t\t\t\t\t\t):\r\n\t\t\t\t\t\t__UpperCAmelCase : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\trequests.get(url + f'&page={i + 2}' , headers=_UpperCAmelCase\t\t\t\t\t\t).json()\r\n\t\t\t\t\t\tjob_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']}\t\t\t\t\t\t)\r\n\r\n\t\t\t\treturn job_links\r\n\t\texcept Exception:\r\n\t\t\t\tprint(f'Unknown error, could not fetch links:\\n{traceback.format_exc()}'\t\t\t\t\t\t)\r\n\r\n\t\treturn {}\r\n\r\ndef a\t\t\t\t\t\t( _UpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\t\tList[str] , _UpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\t\tList[str]=None\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t__UpperCAmelCase : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tNone\r\n\t\tif token is not None:\r\n\t\t\t\t__UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t=\t{'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'Bearer {token}'}\r\n\r\n\t\t__UpperCAmelCase : int\t\t\t\t\t\t\t\t\t\t\t\t\t=\tf'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'\r\n\t\t__UpperCAmelCase : str\t\t\t\t\t\t\t\t\t\t\t\t\t=\trequests.get(_UpperCAmelCase , headers=_UpperCAmelCase\t\t\t\t\t\t).json()\r\n\t\t__UpperCAmelCase : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\t{}\r\n\r\n\t\ttry:\r\n\t\t\t\tartifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']}\t\t\t\t\t\t)\r\n\t\t\t\t__UpperCAmelCase : str\t\t\t\t\t\t\t\t\t\t\t\t\t=\tmath.ceil((result['''total_count'''] - 1_00) / 1_00\t\t\t\t\t\t)\r\n\r\n\t\t\t\tfor i in range(_UpperCAmelCase\t\t\t\t\t\t):\r\n\t\t\t\t\t\t__UpperCAmelCase : Dict\t\t\t\t\t\t\t\t\t\t\t\t\t=\trequests.get(url + f'&page={i + 2}' , headers=_UpperCAmelCase\t\t\t\t\t\t).json()\r\n\t\t\t\t\t\tartifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']}\t\t\t\t\t\t)\r\n\r\n\t\t\t\treturn artifacts\r\n\t\texcept Exception:\r\n\t\t\t\tprint(f'Unknown error, could not fetch links:\\n{traceback.format_exc()}'\t\t\t\t\t\t)\r\n\r\n\t\treturn {}\r\n\r\ndef a\t\t\t\t\t\t( _UpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\t\tOptional[Any] , _UpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\t\tOptional[int] , _UpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\t\tList[str] , _UpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\t\tint\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t__UpperCAmelCase : str\t\t\t\t\t\t\t\t\t\t\t\t\t=\tNone\r\n\t\tif token is not None:\r\n\t\t\t\t__UpperCAmelCase : str\t\t\t\t\t\t\t\t\t\t\t\t\t=\t{'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'Bearer {token}'}\r\n\r\n\t\t__UpperCAmelCase : int\t\t\t\t\t\t\t\t\t\t\t\t\t=\trequests.get(_UpperCAmelCase , headers=_UpperCAmelCase , allow_redirects=_UpperCAmelCase\t\t\t\t\t\t)\r\n\t\t__UpperCAmelCase : Optional[int]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tresult.headers['''Location''']\r\n\t\t__UpperCAmelCase : Optional[int]\t\t\t\t\t\t\t\t\t\t\t\t\t=\trequests.get(_UpperCAmelCase , allow_redirects=_UpperCAmelCase\t\t\t\t\t\t)\r\n\t\t__UpperCAmelCase : int\t\t\t\t\t\t\t\t\t\t\t\t\t=\tos.path.join(_UpperCAmelCase , f'{artifact_name}.zip'\t\t\t\t\t\t)\r\n\t\twith open(_UpperCAmelCase , '''wb'''\t\t\t\t\t\t) as fp:\r\n\t\t\t\tfp.write(response.content\t\t\t\t\t\t)\r\n\r\ndef a\t\t\t\t\t\t( _UpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\t\tDict , _UpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\t\tList[str]=None\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t__UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\t[]\r\n\t\t__UpperCAmelCase : int\t\t\t\t\t\t\t\t\t\t\t\t\t=\t[]\r\n\t\t__UpperCAmelCase : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tNone\r\n\r\n\t\twith zipfile.ZipFile(_UpperCAmelCase\t\t\t\t\t\t) as z:\r\n\t\t\t\tfor filename in z.namelist():\r\n\t\t\t\t\t\tif not os.path.isdir(_UpperCAmelCase\t\t\t\t\t\t):\r\n\t\t\t\t\t\t\t\t# read the file\r\n\t\t\t\t\t\t\t\tif filename in [\"failures_line.txt\", \"summary_short.txt\", \"job_name.txt\"]:\r\n\t\t\t\t\t\t\t\t\t\twith z.open(_UpperCAmelCase\t\t\t\t\t\t) as f:\r\n\t\t\t\t\t\t\t\t\t\t\t\tfor line in f:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t=\tline.decode('''UTF-8'''\t\t\t\t\t\t).strip()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif filename == \"failures_line.txt\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# `error_line` is the place where `error` occurs\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase : int\t\t\t\t\t\t\t\t\t\t\t\t\t=\tline[: line.index(''': '''\t\t\t\t\t\t)]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase : Any\t\t\t\t\t\t\t\t\t\t\t\t\t=\tline[line.index(''': '''\t\t\t\t\t\t) + len(''': '''\t\t\t\t\t\t) :]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\terrors.append([error_line, error]\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\texcept Exception:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# skip un-related lines\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telif filename == \"summary_short.txt\" and line.startswith('''FAILED '''\t\t\t\t\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# `test` is the test method that failed\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase : str\t\t\t\t\t\t\t\t\t\t\t\t\t=\tline[len('''FAILED '''\t\t\t\t\t\t) :]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfailed_tests.append(_UpperCAmelCase\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telif filename == \"job_name.txt\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t=\tline\r\n\r\n\t\tif len(_UpperCAmelCase\t\t\t\t\t\t) != len(_UpperCAmelCase\t\t\t\t\t\t):\r\n\t\t\t\traise ValueError(\r\n\t\t\t\t f'`errors` and `failed_tests` should have the same number of elements. Got {len(_UpperCAmelCase\t\t\t\t\t\t)} for `errors` '\r\n\t\t\t\t f'and {len(_UpperCAmelCase\t\t\t\t\t\t)} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'\r\n\t\t\t\t ''' problem.'''\t\t\t\t\t\t)\r\n\r\n\t\t__UpperCAmelCase : str\t\t\t\t\t\t\t\t\t\t\t\t\t=\tNone\r\n\t\tif job_name and job_links:\r\n\t\t\t\t__UpperCAmelCase : Any\t\t\t\t\t\t\t\t\t\t\t\t\t=\tjob_links.get(_UpperCAmelCase , _UpperCAmelCase\t\t\t\t\t\t)\r\n\r\n\t\t# A list with elements of the form (line of error, error, failed test)\r\n\t\t__UpperCAmelCase : Dict\t\t\t\t\t\t\t\t\t\t\t\t\t=\t[x + [y] + [job_link] for x, y in zip(_UpperCAmelCase , _UpperCAmelCase\t\t\t\t\t\t)]\r\n\r\n\t\treturn result\r\n\r\ndef a\t\t\t\t\t\t( _UpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\t\tDict , _UpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\t\tTuple=None\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t__UpperCAmelCase : str\t\t\t\t\t\t\t\t\t\t\t\t\t=\t[]\r\n\r\n\t\t__UpperCAmelCase : str\t\t\t\t\t\t\t\t\t\t\t\t\t=\t[os.path.join(_UpperCAmelCase , _UpperCAmelCase\t\t\t\t\t\t) for p in os.listdir(_UpperCAmelCase\t\t\t\t\t\t) if p.endswith('''.zip'''\t\t\t\t\t\t)]\r\n\t\tfor p in paths:\r\n\t\t\t\terrors.extend(get_errors_from_single_artifact(_UpperCAmelCase , job_links=_UpperCAmelCase\t\t\t\t\t\t)\t\t\t\t\t\t)\r\n\r\n\t\treturn errors\r\n\r\ndef a\t\t\t\t\t\t( _UpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\t\tstr , _UpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\t\tOptional[Any]=None\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t__UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tCounter()\r\n\t\tcounter.update([x[1] for x in logs]\t\t\t\t\t\t)\r\n\t\t__UpperCAmelCase : Dict\t\t\t\t\t\t\t\t\t\t\t\t\t=\tcounter.most_common()\r\n\t\t__UpperCAmelCase : int\t\t\t\t\t\t\t\t\t\t\t\t\t=\t{}\r\n\t\tfor error, count in counts:\r\n\t\t\t\tif error_filter is None or error not in error_filter:\r\n\t\t\t\t\t\t__UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t=\t{'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}\r\n\r\n\t\t__UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tdict(sorted(r.items() , key=lambda _UpperCAmelCase\t\t\t\t\t\t: item[1][\"count\"] , reverse=_UpperCAmelCase\t\t\t\t\t\t)\t\t\t\t\t\t)\r\n\t\treturn r\r\n\r\ndef a\t\t\t\t\t\t( _UpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\t\tDict\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t__UpperCAmelCase : List[str]\t\t\t\t\t\t\t\t\t\t\t\t\t=\ttest.split('''::'''\t\t\t\t\t\t)[0]\r\n\t\tif test.startswith('''tests/models/'''\t\t\t\t\t\t):\r\n\t\t\t\t__UpperCAmelCase : Optional[int]\t\t\t\t\t\t\t\t\t\t\t\t\t=\ttest.split('''/'''\t\t\t\t\t\t)[2]\r\n\t\telse:\r\n\t\t\t\t__UpperCAmelCase : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tNone\r\n\r\n\t\treturn test\r\n\r\ndef a\t\t\t\t\t\t( _UpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\t\tOptional[int] , _UpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\t\tint=None\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t__UpperCAmelCase : Dict\t\t\t\t\t\t\t\t\t\t\t\t\t=\t[(x[0], x[1], get_model(x[2]\t\t\t\t\t\t)) for x in logs]\r\n\t\t__UpperCAmelCase : int\t\t\t\t\t\t\t\t\t\t\t\t\t=\t[x for x in logs if x[2] is not None]\r\n\t\t__UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t=\t{x[2] for x in logs}\r\n\r\n\t\t__UpperCAmelCase : List[str]\t\t\t\t\t\t\t\t\t\t\t\t\t=\t{}\r\n\t\tfor test in tests:\r\n\t\t\t\t__UpperCAmelCase : List[str]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tCounter()\r\n\t\t\t\t# count by errors in `test`\r\n\t\t\t\tcounter.update([x[1] for x in logs if x[2] == test]\t\t\t\t\t\t)\r\n\t\t\t\t__UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t=\tcounter.most_common()\r\n\t\t\t\t__UpperCAmelCase : Any\t\t\t\t\t\t\t\t\t\t\t\t\t=\t{error: count for error, count in counts if (error_filter is None or error not in error_filter)}\r\n\t\t\t\t__UpperCAmelCase : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tsum(error_counts.values()\t\t\t\t\t\t)\r\n\t\t\t\tif n_errors > 0:\r\n\t\t\t\t\t\t__UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t=\t{'''count''': n_errors, '''errors''': error_counts}\r\n\r\n\t\t__UpperCAmelCase : Optional[int]\t\t\t\t\t\t\t\t\t\t\t\t\t=\tdict(sorted(r.items() , key=lambda _UpperCAmelCase\t\t\t\t\t\t: item[1][\"count\"] , reverse=_UpperCAmelCase\t\t\t\t\t\t)\t\t\t\t\t\t)\r\n\t\treturn r\r\n\r\ndef a\t\t\t\t\t\t( _UpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t__UpperCAmelCase : Any\t\t\t\t\t\t\t\t\t\t\t\t\t=\t'''| no. | error | status |'''\r\n\t\t__UpperCAmelCase : List[str]\t\t\t\t\t\t\t\t\t\t\t\t\t=\t'''|-:|:-|:-|'''\r\n\t\t__UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\t[header, sep]\r\n\t\tfor error in reduced_by_error:\r\n\t\t\t\t__UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t=\treduced_by_error[error]['''count''']\r\n\t\t\t\t__UpperCAmelCase : int\t\t\t\t\t\t\t\t\t\t\t\t\t=\tf'| {count} | {error[:1_00]} | |'\r\n\t\t\t\tlines.append(_UpperCAmelCase\t\t\t\t\t\t)\r\n\r\n\t\treturn \"\\n\".join(_UpperCAmelCase\t\t\t\t\t\t)\r\n\r\ndef a\t\t\t\t\t\t( _UpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\t\tDict\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t__UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\t'''| model | no. of errors | major error | count |'''\r\n\t\t__UpperCAmelCase : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\t'''|-:|-:|-:|-:|'''\r\n\t\t__UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t=\t[header, sep]\r\n\t\tfor model in reduced_by_model:\r\n\t\t\t\t__UpperCAmelCase : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t=\treduced_by_model[model]['''count''']\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t,\t\t\t\t\t__UpperCAmelCase : int\t\t\t\t\t\t\t\t\t\t\t\t\t=\tlist(reduced_by_model[model]['''errors'''].items()\t\t\t\t\t\t)[0]\r\n\t\t\t\t__UpperCAmelCase : Dict\t\t\t\t\t\t\t\t\t\t\t\t\t=\tf'| {model} | {count} | {error[:60]} | {_count} |'\r\n\t\t\t\tlines.append(_UpperCAmelCase\t\t\t\t\t\t)\r\n\r\n\t\treturn \"\\n\".join(_UpperCAmelCase\t\t\t\t\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t__A \t\t\t=argparse.ArgumentParser()\r\n\t\t\t\t# Required parameters\r\n\t\t\t\tparser.add_argument(\"--workflow_run_id\", type=str, required=True, help=\"A GitHub Actions workflow run id.\")\r\n\t\t\t\tparser.add_argument(\r\n\t\t\t\t \"--output_dir\",\r\n\t\t\t\t type=str,\r\n\t\t\t\t required=True,\r\n\t\t\t\t help=\"Where to store the downloaded artifacts and other result files.\",\r\n\t\t\t\t)\r\n\t\t\t\tparser.add_argument(\"--token\", default=None, type=str, help=\"A token that has actions:read permission.\")\r\n\t\t\t\t__A \t\t\t=parser.parse_args()\r\n\r\n\t\t\t\tos.makedirs(args.output_dir, exist_ok=True)\r\n\r\n\t\t\t\t__A \t\t\t=get_job_links(args.workflow_run_id, token=args.token)\r\n\t\t\t\t__A \t\t\t={}\r\n\t\t\t\t# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.\r\n\t\t\t\t# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.\r\n\t\t\t\tif _job_links:\r\n\t\t\t\t\t\t\t\tfor k, v in _job_links.items():\r\n\t\t\t\t\t\t\t\t\t\t\t\t# This is how GitHub actions combine job names.\r\n\t\t\t\t\t\t\t\t\t\t\t\tif \" / \" in k:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__A \t\t\t=k.find(\" / \")\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__A \t\t\t=k[index + len(\" / \") :]\r\n\t\t\t\t\t\t\t\t\t\t\t\t__A \t\t\t=v\r\n\t\t\t\twith open(os.path.join(args.output_dir, \"job_links.json\"), \"w\", encoding=\"UTF-8\") as fp:\r\n\t\t\t\t\t\t\t\tjson.dump(job_links, fp, ensure_ascii=False, indent=4)\r\n\r\n\t\t\t\t__A \t\t\t=get_artifacts_links(args.workflow_run_id, token=args.token)\r\n\t\t\t\twith open(os.path.join(args.output_dir, \"artifacts.json\"), \"w\", encoding=\"UTF-8\") as fp:\r\n\t\t\t\t\t\t\t\tjson.dump(artifacts, fp, ensure_ascii=False, indent=4)\r\n\r\n\t\t\t\tfor idx, (name, url) in enumerate(artifacts.items()):\r\n\t\t\t\t\t\t\t\tdownload_artifact(name, url, args.output_dir, args.token)\r\n\t\t\t\t\t\t\t\t# Be gentle to GitHub\r\n\t\t\t\t\t\t\t\ttime.sleep(1)\r\n\r\n\t\t\t\t__A \t\t\t=get_all_errors(args.output_dir, job_links=job_links)\r\n\r\n\t\t\t\t# `e[1]` is the error\r\n\t\t\t\t__A \t\t\t=Counter()\r\n\t\t\t\tcounter.update([e[1] for e in errors])\r\n\r\n\t\t\t\t# print the top 30 most common test errors\r\n\t\t\t\t__A \t\t\t=counter.most_common(3_0)\r\n\t\t\t\tfor item in most_common:\r\n\t\t\t\t\t\t\t\tprint(item)\r\n\r\n\t\t\t\twith open(os.path.join(args.output_dir, \"errors.json\"), \"w\", encoding=\"UTF-8\") as fp:\r\n\t\t\t\t\t\t\t\tjson.dump(errors, fp, ensure_ascii=False, indent=4)\r\n\r\n\t\t\t\t__A \t\t\t=reduce_by_error(errors)\r\n\t\t\t\t__A \t\t\t=reduce_by_model(errors)\r\n\r\n\t\t\t\t__A \t\t\t=make_github_table(reduced_by_error)\r\n\t\t\t\t__A \t\t\t=make_github_table_per_model(reduced_by_model)\r\n\r\n\t\t\t\twith open(os.path.join(args.output_dir, \"reduced_by_error.txt\"), \"w\", encoding=\"UTF-8\") as fp:\r\n\t\t\t\t\t\t\t\tfp.write(sa)\r\n\t\t\t\twith open(os.path.join(args.output_dir, \"reduced_by_model.txt\"), \"w\", encoding=\"UTF-8\") as fp:\r\n\t\t\t\t\t\t\t\tfp.write(sa)\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":226,"string":"226"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":789,"cells":{"code":{"kind":"string","value":"\r\n\r\nfrom ...utils import is_note_seq_available, is_transformers_available, is_torch_available\r\nfrom ...utils import OptionalDependencyNotAvailable\r\n\r\n\r\ntry:\r\n if not (is_transformers_available() and is_torch_available()):\r\n raise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n from ...utils.dummy_torch_and_transformers_objects import * # noqa F403\r\nelse:\r\n from .notes_encoder import SpectrogramNotesEncoder\r\n from .continous_encoder import SpectrogramContEncoder\r\n from .pipeline_spectrogram_diffusion import (\r\n SpectrogramContEncoder,\r\n SpectrogramDiffusionPipeline,\r\n TaFilmDecoder,\r\n )\r\n\r\ntry:\r\n if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):\r\n raise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403\r\nelse:\r\n from .midi_utils import MidiProcessor\r\n"},"code_codestyle":{"kind":"number","value":60,"string":"60"},"style_context":{"kind":"string","value":"\r\n\r\nimport unittest\r\n\r\nimport numpy as np\r\n\r\nfrom transformers.testing_utils import require_pytesseract, require_torch\r\nfrom transformers.utils import is_pytesseract_available, is_torch_available\r\n\r\nfrom ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs\r\n\r\n\r\nif is_torch_available():\r\n import torch\r\n\r\nif is_pytesseract_available():\r\n from PIL import Image\r\n\r\n from transformers import LayoutLMvaImageProcessor\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t__magic_name__\t\t\t( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __init__(\t\t\t\t\t\t\tself\t\t\t\t\t\t,\t\t_lowercase\t\t\t\t\t\t,\t\t_lowercase=7\t\t\t\t\t\t,\t\t_lowercase=3\t\t\t\t\t\t,\t\t_lowercase=18\t\t\t\t\t\t,\t\t_lowercase=30\t\t\t\t\t\t,\t\t_lowercase=400\t\t\t\t\t\t,\t\t_lowercase=True\t\t\t\t\t\t,\t\t_lowercase=None\t\t\t\t\t\t,\t\t_lowercase=True\t\t\t\t\t\t,\t\t)-> Optional[int]:\r\n UpperCamelCase_\t =\t\tsize if size is not None else {\"height\": 18, \"width\": 18}\r\n UpperCamelCase_\t =\t\tparent\r\n UpperCamelCase_\t =\t\tbatch_size\r\n UpperCamelCase_\t =\t\tnum_channels\r\n UpperCamelCase_\t =\t\timage_size\r\n UpperCamelCase_\t =\t\tmin_resolution\r\n UpperCamelCase_\t =\t\tmax_resolution\r\n UpperCamelCase_\t =\t\tdo_resize\r\n UpperCamelCase_\t =\t\tsize\r\n UpperCamelCase_\t =\t\tapply_ocr\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\t\t\tUpperCAmelCase_\t\t\t\t\t\t\t(\t\t\t\t\t\t\tself\t\t\t\t\t)-> str:\r\n return {\"do_resize\": self.do_resize, \"size\": self.size, \"apply_ocr\": self.apply_ocr}\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_torch\r\n@require_pytesseract\r\nclass \t\t__magic_name__\t\t\t( snake_case\t, unittest.TestCase\t\t\t\t\t\t\t):\r\n UpperCamelCase_\t\t\t\t\t:Union[str, Any]\t\t\t\t\t\t\t\t\t= LayoutLMvaImageProcessor if is_pytesseract_available() else None\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\t\t\tUpperCAmelCase_\t\t\t\t\t\t\t(\t\t\t\t\t\t\tself\t\t\t\t\t)-> Any:\r\n UpperCamelCase_\t =\t\tLayoutLMvaImageProcessingTester(self\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n @property\r\n def \t\t\t\t\tUpperCAmelCase_\t\t\t\t\t\t\t(\t\t\t\t\t\t\tself\t\t\t\t\t)-> Optional[Any]:\r\n return self.image_processor_tester.prepare_image_processor_dict()\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\t\t\tUpperCAmelCase_\t\t\t\t\t\t\t(\t\t\t\t\t\t\tself\t\t\t\t\t)-> Dict:\r\n UpperCamelCase_\t =\t\tself.image_processing_class(**self.image_processor_dict\t\t\t\t\t)\r\n self.assertTrue(hasattr(_lowercase\t\t\t\t\t\t,\t\t\"do_resize\"\t\t\t\t\t)\t\t\t\t\t)\r\n self.assertTrue(hasattr(_lowercase\t\t\t\t\t\t,\t\t\"size\"\t\t\t\t\t)\t\t\t\t\t)\r\n self.assertTrue(hasattr(_lowercase\t\t\t\t\t\t,\t\t\"apply_ocr\"\t\t\t\t\t)\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\t\t\tUpperCAmelCase_\t\t\t\t\t\t\t(\t\t\t\t\t\t\tself\t\t\t\t\t)-> List[Any]:\r\n UpperCamelCase_\t =\t\tself.image_processing_class.from_dict(self.image_processor_dict\t\t\t\t\t)\r\n self.assertEqual(image_processor.size\t\t\t\t\t\t,\t\t{\"height\": 18, \"width\": 18}\t\t\t\t\t)\r\n\r\n UpperCamelCase_\t =\t\tself.image_processing_class.from_dict(self.image_processor_dict\t\t\t\t\t\t,\t\tsize=42\t\t\t\t\t)\r\n self.assertEqual(image_processor.size\t\t\t\t\t\t,\t\t{\"height\": 42, \"width\": 42}\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\t\t\tUpperCAmelCase_\t\t\t\t\t\t\t(\t\t\t\t\t\t\tself\t\t\t\t\t)-> Any:\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\t\t\tUpperCAmelCase_\t\t\t\t\t\t\t(\t\t\t\t\t\t\tself\t\t\t\t\t)-> List[str]:\r\n # Initialize image_processing\r\n UpperCamelCase_\t =\t\tself.image_processing_class(**self.image_processor_dict\t\t\t\t\t)\r\n # create random PIL images\r\n UpperCamelCase_\t =\t\tprepare_image_inputs(self.image_processor_tester\t\t\t\t\t\t,\t\tequal_resolution=_lowercase\t\t\t\t\t)\r\n for image in image_inputs:\r\n self.assertIsInstance(_lowercase\t\t\t\t\t\t,\t\tImage.Image\t\t\t\t\t)\r\n\r\n # Test not batched input\r\n UpperCamelCase_\t =\t\timage_processing(image_inputs[0]\t\t\t\t\t\t,\t\treturn_tensors=\"pt\"\t\t\t\t\t)\r\n self.assertEqual(\r\n encoding.pixel_values.shape\t\t\t\t\t\t,\t\t(\r\n 1,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.size[\"height\"],\r\n self.image_processor_tester.size[\"width\"],\r\n )\t\t\t\t\t\t,\t\t)\r\n\r\n self.assertIsInstance(encoding.words\t\t\t\t\t\t,\t\t_lowercase\t\t\t\t\t)\r\n self.assertIsInstance(encoding.boxes\t\t\t\t\t\t,\t\t_lowercase\t\t\t\t\t)\r\n\r\n # Test batched\r\n UpperCamelCase_\t =\t\timage_processing(_lowercase\t\t\t\t\t\t,\t\treturn_tensors=\"pt\"\t\t\t\t\t).pixel_values\r\n self.assertEqual(\r\n encoded_images.shape\t\t\t\t\t\t,\t\t(\r\n self.image_processor_tester.batch_size,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.size[\"height\"],\r\n self.image_processor_tester.size[\"width\"],\r\n )\t\t\t\t\t\t,\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\t\t\tUpperCAmelCase_\t\t\t\t\t\t\t(\t\t\t\t\t\t\tself\t\t\t\t\t)-> str:\r\n # Initialize image_processing\r\n UpperCamelCase_\t =\t\tself.image_processing_class(**self.image_processor_dict\t\t\t\t\t)\r\n # create random numpy tensors\r\n UpperCamelCase_\t =\t\tprepare_image_inputs(self.image_processor_tester\t\t\t\t\t\t,\t\tequal_resolution=_lowercase\t\t\t\t\t\t,\t\tnumpify=_lowercase\t\t\t\t\t)\r\n for image in image_inputs:\r\n self.assertIsInstance(_lowercase\t\t\t\t\t\t,\t\tnp.ndarray\t\t\t\t\t)\r\n\r\n # Test not batched input\r\n UpperCamelCase_\t =\t\timage_processing(image_inputs[0]\t\t\t\t\t\t,\t\treturn_tensors=\"pt\"\t\t\t\t\t).pixel_values\r\n self.assertEqual(\r\n encoded_images.shape\t\t\t\t\t\t,\t\t(\r\n 1,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.size[\"height\"],\r\n self.image_processor_tester.size[\"width\"],\r\n )\t\t\t\t\t\t,\t\t)\r\n\r\n # Test batched\r\n UpperCamelCase_\t =\t\timage_processing(_lowercase\t\t\t\t\t\t,\t\treturn_tensors=\"pt\"\t\t\t\t\t).pixel_values\r\n self.assertEqual(\r\n encoded_images.shape\t\t\t\t\t\t,\t\t(\r\n self.image_processor_tester.batch_size,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.size[\"height\"],\r\n self.image_processor_tester.size[\"width\"],\r\n )\t\t\t\t\t\t,\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\t\t\tUpperCAmelCase_\t\t\t\t\t\t\t(\t\t\t\t\t\t\tself\t\t\t\t\t)-> List[str]:\r\n # Initialize image_processing\r\n UpperCamelCase_\t =\t\tself.image_processing_class(**self.image_processor_dict\t\t\t\t\t)\r\n # create random PyTorch tensors\r\n UpperCamelCase_\t =\t\tprepare_image_inputs(self.image_processor_tester\t\t\t\t\t\t,\t\tequal_resolution=_lowercase\t\t\t\t\t\t,\t\ttorchify=_lowercase\t\t\t\t\t)\r\n for image in image_inputs:\r\n self.assertIsInstance(_lowercase\t\t\t\t\t\t,\t\ttorch.Tensor\t\t\t\t\t)\r\n\r\n # Test not batched input\r\n UpperCamelCase_\t =\t\timage_processing(image_inputs[0]\t\t\t\t\t\t,\t\treturn_tensors=\"pt\"\t\t\t\t\t).pixel_values\r\n self.assertEqual(\r\n encoded_images.shape\t\t\t\t\t\t,\t\t(\r\n 1,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.size[\"height\"],\r\n self.image_processor_tester.size[\"width\"],\r\n )\t\t\t\t\t\t,\t\t)\r\n\r\n # Test batched\r\n UpperCamelCase_\t =\t\timage_processing(_lowercase\t\t\t\t\t\t,\t\treturn_tensors=\"pt\"\t\t\t\t\t).pixel_values\r\n self.assertEqual(\r\n encoded_images.shape\t\t\t\t\t\t,\t\t(\r\n self.image_processor_tester.batch_size,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.size[\"height\"],\r\n self.image_processor_tester.size[\"width\"],\r\n )\t\t\t\t\t\t,\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\t\t\tUpperCAmelCase_\t\t\t\t\t\t\t(\t\t\t\t\t\t\tself\t\t\t\t\t)-> Any:\r\n # with apply_OCR = True\r\n UpperCamelCase_\t =\t\tLayoutLMvaImageProcessor()\r\n\r\n from datasets import load_dataset\r\n\r\n UpperCamelCase_\t =\t\tload_dataset(\"hf-internal-testing/fixtures_docvqa\"\t\t\t\t\t\t,\t\tsplit=\"test\"\t\t\t\t\t)\r\n\r\n UpperCamelCase_\t =\t\tImage.open(ds[0][\"file\"]\t\t\t\t\t).convert(\"RGB\"\t\t\t\t\t)\r\n\r\n UpperCamelCase_\t =\t\timage_processing(_lowercase\t\t\t\t\t\t,\t\treturn_tensors=\"pt\"\t\t\t\t\t)\r\n\r\n self.assertEqual(encoding.pixel_values.shape\t\t\t\t\t\t,\t\t(1, 3, 224, 224)\t\t\t\t\t)\r\n self.assertEqual(len(encoding.words\t\t\t\t\t)\t\t\t\t\t\t,\t\tlen(encoding.boxes\t\t\t\t\t)\t\t\t\t\t)\r\n\r\n # fmt: off\r\n # the words and boxes were obtained with Tesseract 4.1.1\r\n UpperCamelCase_\t =\t\t[[\"11:14\", \"to\", \"11:39\", \"a.m\", \"11:39\", \"to\", \"11:44\", \"a.m.\", \"11:44\", \"a.m.\", \"to\", \"12:25\", \"p.m.\", \"12:25\", \"to\", \"12:58\", \"p.m.\", \"12:58\", \"to\", \"4:00\", \"p.m.\", \"2:00\", \"to\", \"5:00\", \"p.m.\", \"Coffee\", \"Break\", \"Coffee\", \"will\", \"be\", \"served\", \"for\", \"men\", \"and\", \"women\", \"in\", \"the\", \"lobby\", \"adjacent\", \"to\", \"exhibit\", \"area.\", \"Please\", \"move\", \"into\", \"exhibit\", \"area.\", \"(Exhibits\", \"Open)\", \"TRRF\", \"GENERAL\", \"SESSION\", \"(PART\", \"|)\", \"Presiding:\", \"Lee\", \"A.\", \"Waller\", \"TRRF\", \"Vice\", \"President\", \"“Introductory\", \"Remarks”\", \"Lee\", \"A.\", \"Waller,\", \"TRRF\", \"Vice\", \"Presi-\", \"dent\", \"Individual\", \"Interviews\", \"with\", \"TRRF\", \"Public\", \"Board\", \"Members\", \"and\", \"Sci-\", \"entific\", \"Advisory\", \"Council\", \"Mem-\", \"bers\", \"Conducted\", \"by\", \"TRRF\", \"Treasurer\", \"Philip\", \"G.\", \"Kuehn\", \"to\", \"get\", \"answers\", \"which\", \"the\", \"public\", \"refrigerated\", \"warehousing\", \"industry\", \"is\", \"looking\", \"for.\", \"Plus\", \"questions\", \"from\", \"the\", \"floor.\", \"Dr.\", \"Emil\", \"M.\", \"Mrak,\", \"University\", \"of\", \"Cal-\", \"ifornia,\", \"Chairman,\", \"TRRF\", \"Board;\", \"Sam\", \"R.\", \"Cecil,\", \"University\", \"of\", \"Georgia\", \"College\", \"of\", \"Agriculture;\", \"Dr.\", \"Stanley\", \"Charm,\", \"Tufts\", \"University\", \"School\", \"of\", \"Medicine;\", \"Dr.\", \"Robert\", \"H.\", \"Cotton,\", \"ITT\", \"Continental\", \"Baking\", \"Company;\", \"Dr.\", \"Owen\", \"Fennema,\", \"University\", \"of\", \"Wis-\", \"consin;\", \"Dr.\", \"Robert\", \"E.\", \"Hardenburg,\", \"USDA.\", \"Questions\", \"and\", \"Answers\", \"Exhibits\", \"Open\", \"Capt.\", \"Jack\", \"Stoney\", \"Room\", \"TRRF\", \"Scientific\", \"Advisory\", \"Council\", \"Meeting\", \"Ballroom\", \"Foyer\"]] # noqa: E231\r\n UpperCamelCase_\t =\t\t[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231\r\n # fmt: on\r\n\r\n self.assertListEqual(encoding.words\t\t\t\t\t\t,\t\t_lowercase\t\t\t\t\t)\r\n self.assertListEqual(encoding.boxes\t\t\t\t\t\t,\t\t_lowercase\t\t\t\t\t)\r\n\r\n # with apply_OCR = False\r\n UpperCamelCase_\t =\t\tLayoutLMvaImageProcessor(apply_ocr=_lowercase\t\t\t\t\t)\r\n\r\n UpperCamelCase_\t =\t\timage_processing(_lowercase\t\t\t\t\t\t,\t\treturn_tensors=\"pt\"\t\t\t\t\t)\r\n\r\n self.assertEqual(encoding.pixel_values.shape\t\t\t\t\t\t,\t\t(1, 3, 224, 224)\t\t\t\t\t)\r\n"},"style_context_codestyle":{"kind":"number","value":60,"string":"60"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":790,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport os\r\nimport sys\r\nimport tempfile\r\nimport unittest\r\nimport unittest.mock as mock\r\nfrom pathlib import Path\r\n\r\nfrom huggingface_hub import HfFolder, delete_repo\r\nfrom huggingface_hub.file_download import http_get\r\nfrom requests.exceptions import HTTPError\r\n\r\nfrom transformers import (\r\n AlbertTokenizer,\r\n AutoTokenizer,\r\n BertTokenizer,\r\n BertTokenizerFast,\r\n GPTaTokenizerFast,\r\n is_tokenizers_available,\r\n)\r\nfrom transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers\r\nfrom transformers.tokenization_utils import Trie\r\n\r\n\r\nsys.path.append(str(Path(__file__).parent.parent / \"\"\"utils\"\"\"))\r\n\r\nfrom test_module.custom_tokenization import CustomTokenizer # noqa E402\r\n\r\n\r\nif is_tokenizers_available():\r\n from test_module.custom_tokenization_fast import CustomTokenizerFast\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass UpperCamelCase__ (\tunittest.TestCase ):\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n def SCREAMING_SNAKE_CASE__ ( self\t\t\t\t: List[Any] ):\r\n # A mock response for an HTTP head request to emulate server down\r\n lowerCAmelCase_ :\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t\t= mock.Mock()\r\n lowerCAmelCase_ :\t\t\t\tDict\t\t\t\t\t\t\t\t= 5_0_0\r\n lowerCAmelCase_ :\t\t\t\tList[str]\t\t\t\t\t\t\t\t= {}\r\n lowerCAmelCase_ :\t\t\t\tDict\t\t\t\t\t\t\t\t= HTTPError\r\n lowerCAmelCase_ :\t\t\t\tAny\t\t\t\t\t\t\t\t= {}\r\n\r\n # Download this model to make sure it's in the cache.\r\n lowerCAmelCase_ :\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t\t= BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )\r\n\r\n # Under the mock environment we get a 500 error when trying to reach the tokenizer.\r\n with mock.patch('requests.Session.request' ,\t\t\treturn_value=__UpperCamelCase ) as mock_head:\r\n lowerCAmelCase_ :\t\t\t\tAny\t\t\t\t\t\t\t\t= BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )\r\n # This check we did call the fake head request\r\n mock_head.assert_called()\r\n\r\n\r\n\r\n @require_tokenizers\r\n def SCREAMING_SNAKE_CASE__ ( self\t\t\t\t: Optional[Any] ):\r\n # A mock response for an HTTP head request to emulate server down\r\n lowerCAmelCase_ :\t\t\t\tList[str]\t\t\t\t\t\t\t\t= mock.Mock()\r\n lowerCAmelCase_ :\t\t\t\tDict\t\t\t\t\t\t\t\t= 5_0_0\r\n lowerCAmelCase_ :\t\t\t\tList[str]\t\t\t\t\t\t\t\t= {}\r\n lowerCAmelCase_ :\t\t\t\tList[Any]\t\t\t\t\t\t\t\t= HTTPError\r\n lowerCAmelCase_ :\t\t\t\tOptional[Any]\t\t\t\t\t\t\t\t= {}\r\n\r\n # Download this model to make sure it's in the cache.\r\n lowerCAmelCase_ :\t\t\t\tint\t\t\t\t\t\t\t\t= GPTaTokenizerFast.from_pretrained('gpt2' )\r\n\r\n # Under the mock environment we get a 500 error when trying to reach the tokenizer.\r\n with mock.patch('requests.Session.request' ,\t\t\treturn_value=__UpperCamelCase ) as mock_head:\r\n lowerCAmelCase_ :\t\t\t\tList[str]\t\t\t\t\t\t\t\t= GPTaTokenizerFast.from_pretrained('gpt2' )\r\n # This check we did call the fake head request\r\n mock_head.assert_called()\r\n\r\n\r\n\r\n def SCREAMING_SNAKE_CASE__ ( self\t\t\t\t: Tuple ):\r\n # This test is for deprecated behavior and can be removed in v5\r\n try:\r\n lowerCAmelCase_ :\t\t\t\tstr\t\t\t\t\t\t\t\t= tempfile.mktemp()\r\n with open(__UpperCamelCase ,\t\t\t'wb' ) as f:\r\n http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,\t\t\t__UpperCamelCase )\r\n\r\n lowerCAmelCase_ :\t\t\t\tTuple\t\t\t\t\t\t\t\t= AlbertTokenizer.from_pretrained(__UpperCamelCase )\r\n finally:\r\n os.remove(__UpperCamelCase )\r\n\r\n # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in\r\n # the current folder and have the right name.\r\n if os.path.isfile('tokenizer.json' ):\r\n # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.\r\n return\r\n try:\r\n with open('tokenizer.json' ,\t\t\t'wb' ) as f:\r\n http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,\t\t\t__UpperCamelCase )\r\n lowerCAmelCase_ :\t\t\t\tTuple\t\t\t\t\t\t\t\t= AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )\r\n # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000\r\n self.assertEqual(tokenizer.vocab_size ,\t\t\t1_0_0_0 )\r\n # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.\r\n\r\n finally:\r\n os.remove('tokenizer.json' )\r\n\r\n\r\n\r\n\r\n\r\n def SCREAMING_SNAKE_CASE__ ( self\t\t\t\t: Union[str, Any] ):\r\n # This test is for deprecated behavior and can be removed in v5\r\n lowerCAmelCase_ :\t\t\t\tOptional[Any]\t\t\t\t\t\t\t\t= AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n@is_staging_test\r\nclass UpperCamelCase__ (\tunittest.TestCase ):\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n _SCREAMING_SNAKE_CASE \t\t\t\t= [\"\"\"[UNK]\"\"\", \"\"\"[CLS]\"\"\", \"\"\"[SEP]\"\"\", \"\"\"[PAD]\"\"\", \"\"\"[MASK]\"\"\", \"\"\"bla\"\"\", \"\"\"blou\"\"\"]\r\n\r\n\r\n\r\n @classmethod\r\n def SCREAMING_SNAKE_CASE__ ( cls\t\t\t\t: Tuple ):\r\n lowerCAmelCase_ :\t\t\t\tint\t\t\t\t\t\t\t\t= TOKEN\r\n HfFolder.save_token(__UpperCamelCase )\r\n\r\n\r\n\r\n @classmethod\r\n def SCREAMING_SNAKE_CASE__ ( cls\t\t\t\t: Any ):\r\n try:\r\n delete_repo(token=cls._token ,\t\t\trepo_id='test-tokenizer' )\r\n except HTTPError:\r\n pass\r\n\r\n try:\r\n delete_repo(token=cls._token ,\t\t\trepo_id='valid_org/test-tokenizer-org' )\r\n except HTTPError:\r\n pass\r\n\r\n try:\r\n delete_repo(token=cls._token ,\t\t\trepo_id='test-dynamic-tokenizer' )\r\n except HTTPError:\r\n pass\r\n\r\n\r\n\r\n def SCREAMING_SNAKE_CASE__ ( self\t\t\t\t: int ):\r\n with tempfile.TemporaryDirectory() as tmp_dir:\r\n lowerCAmelCase_ :\t\t\t\tAny\t\t\t\t\t\t\t\t= os.path.join(__UpperCamelCase ,\t\t\t'vocab.txt' )\r\n with open(__UpperCamelCase ,\t\t\t'w' ,\t\t\tencoding='utf-8' ) as vocab_writer:\r\n vocab_writer.write(''.join([x + '\\n' for x in self.vocab_tokens] ) )\r\n lowerCAmelCase_ :\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t\t= BertTokenizer(__UpperCamelCase )\r\n\r\n tokenizer.push_to_hub('test-tokenizer' ,\t\t\tuse_auth_token=self._token )\r\n lowerCAmelCase_ :\t\t\t\tDict\t\t\t\t\t\t\t\t= BertTokenizer.from_pretrained(F\"{USER}/test-tokenizer\" )\r\n self.assertDictEqual(new_tokenizer.vocab ,\t\t\ttokenizer.vocab )\r\n\r\n # Reset repo\r\n delete_repo(token=self._token ,\t\t\trepo_id='test-tokenizer' )\r\n\r\n # Push to hub via save_pretrained\r\n with tempfile.TemporaryDirectory() as tmp_dir:\r\n tokenizer.save_pretrained(__UpperCamelCase ,\t\t\trepo_id='test-tokenizer' ,\t\t\tpush_to_hub=__UpperCamelCase ,\t\t\tuse_auth_token=self._token )\r\n\r\n lowerCAmelCase_ :\t\t\t\tDict\t\t\t\t\t\t\t\t= BertTokenizer.from_pretrained(F\"{USER}/test-tokenizer\" )\r\n self.assertDictEqual(new_tokenizer.vocab ,\t\t\ttokenizer.vocab )\r\n\r\n\r\n\r\n def SCREAMING_SNAKE_CASE__ ( self\t\t\t\t: Optional[int] ):\r\n with tempfile.TemporaryDirectory() as tmp_dir:\r\n lowerCAmelCase_ :\t\t\t\tList[Any]\t\t\t\t\t\t\t\t= os.path.join(__UpperCamelCase ,\t\t\t'vocab.txt' )\r\n with open(__UpperCamelCase ,\t\t\t'w' ,\t\t\tencoding='utf-8' ) as vocab_writer:\r\n vocab_writer.write(''.join([x + '\\n' for x in self.vocab_tokens] ) )\r\n lowerCAmelCase_ :\t\t\t\tDict\t\t\t\t\t\t\t\t= BertTokenizer(__UpperCamelCase )\r\n\r\n tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,\t\t\tuse_auth_token=self._token )\r\n lowerCAmelCase_ :\t\t\t\tAny\t\t\t\t\t\t\t\t= BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )\r\n self.assertDictEqual(new_tokenizer.vocab ,\t\t\ttokenizer.vocab )\r\n\r\n # Reset repo\r\n delete_repo(token=self._token ,\t\t\trepo_id='valid_org/test-tokenizer-org' )\r\n\r\n # Push to hub via save_pretrained\r\n with tempfile.TemporaryDirectory() as tmp_dir:\r\n tokenizer.save_pretrained(\r\n __UpperCamelCase ,\t\t\trepo_id='valid_org/test-tokenizer-org' ,\t\t\tpush_to_hub=__UpperCamelCase ,\t\t\tuse_auth_token=self._token )\r\n\r\n lowerCAmelCase_ :\t\t\t\tint\t\t\t\t\t\t\t\t= BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )\r\n self.assertDictEqual(new_tokenizer.vocab ,\t\t\ttokenizer.vocab )\r\n\r\n\r\n\r\n\r\n\r\n @require_tokenizers\r\n def SCREAMING_SNAKE_CASE__ ( self\t\t\t\t: str ):\r\n CustomTokenizer.register_for_auto_class()\r\n with tempfile.TemporaryDirectory() as tmp_dir:\r\n lowerCAmelCase_ :\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t\t= os.path.join(__UpperCamelCase ,\t\t\t'vocab.txt' )\r\n with open(__UpperCamelCase ,\t\t\t'w' ,\t\t\tencoding='utf-8' ) as vocab_writer:\r\n vocab_writer.write(''.join([x + '\\n' for x in self.vocab_tokens] ) )\r\n lowerCAmelCase_ :\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t\t= CustomTokenizer(__UpperCamelCase )\r\n\r\n # No fast custom tokenizer\r\n tokenizer.push_to_hub('test-dynamic-tokenizer' ,\t\t\tuse_auth_token=self._token )\r\n\r\n lowerCAmelCase_ :\t\t\t\tstr\t\t\t\t\t\t\t\t= AutoTokenizer.from_pretrained(F\"{USER}/test-dynamic-tokenizer\" ,\t\t\ttrust_remote_code=__UpperCamelCase )\r\n # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module\r\n self.assertEqual(tokenizer.__class__.__name__ ,\t\t\t'CustomTokenizer' )\r\n\r\n # Fast and slow custom tokenizer\r\n CustomTokenizerFast.register_for_auto_class()\r\n with tempfile.TemporaryDirectory() as tmp_dir:\r\n lowerCAmelCase_ :\t\t\t\tList[str]\t\t\t\t\t\t\t\t= os.path.join(__UpperCamelCase ,\t\t\t'vocab.txt' )\r\n with open(__UpperCamelCase ,\t\t\t'w' ,\t\t\tencoding='utf-8' ) as vocab_writer:\r\n vocab_writer.write(''.join([x + '\\n' for x in self.vocab_tokens] ) )\r\n\r\n lowerCAmelCase_ :\t\t\t\tOptional[int]\t\t\t\t\t\t\t\t= BertTokenizerFast.from_pretrained(__UpperCamelCase )\r\n bert_tokenizer.save_pretrained(__UpperCamelCase )\r\n lowerCAmelCase_ :\t\t\t\tint\t\t\t\t\t\t\t\t= CustomTokenizerFast.from_pretrained(__UpperCamelCase )\r\n\r\n tokenizer.push_to_hub('test-dynamic-tokenizer' ,\t\t\tuse_auth_token=self._token )\r\n\r\n lowerCAmelCase_ :\t\t\t\tList[Any]\t\t\t\t\t\t\t\t= AutoTokenizer.from_pretrained(F\"{USER}/test-dynamic-tokenizer\" ,\t\t\ttrust_remote_code=__UpperCamelCase )\r\n # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module\r\n self.assertEqual(tokenizer.__class__.__name__ ,\t\t\t'CustomTokenizerFast' )\r\n lowerCAmelCase_ :\t\t\t\tTuple\t\t\t\t\t\t\t\t= AutoTokenizer.from_pretrained(\r\n F\"{USER}/test-dynamic-tokenizer\" ,\t\t\tuse_fast=__UpperCamelCase ,\t\t\ttrust_remote_code=__UpperCamelCase )\r\n # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module\r\n self.assertEqual(tokenizer.__class__.__name__ ,\t\t\t'CustomTokenizer' )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass UpperCamelCase__ (\tunittest.TestCase ):\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n def SCREAMING_SNAKE_CASE__ ( self\t\t\t\t: str ):\r\n lowerCAmelCase_ :\t\t\t\tOptional[int]\t\t\t\t\t\t\t\t= Trie()\r\n trie.add('Hello 友達' )\r\n self.assertEqual(trie.data ,\t\t\t{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )\r\n trie.add('Hello' )\r\n trie.data\r\n self.assertEqual(trie.data ,\t\t\t{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )\r\n\r\n\r\n\r\n def SCREAMING_SNAKE_CASE__ ( self\t\t\t\t: Dict ):\r\n lowerCAmelCase_ :\t\t\t\tstr\t\t\t\t\t\t\t\t= Trie()\r\n self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,\t\t\t['[CLS] This is a extra_id_100'] )\r\n trie.add('[CLS]' )\r\n trie.add('extra_id_1' )\r\n trie.add('extra_id_100' )\r\n self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,\t\t\t['[CLS]', ' This is a ', 'extra_id_100'] )\r\n\r\n\r\n\r\n def SCREAMING_SNAKE_CASE__ ( self\t\t\t\t: str ):\r\n lowerCAmelCase_ :\t\t\t\tDict\t\t\t\t\t\t\t\t= Trie()\r\n trie.add('A' )\r\n self.assertEqual(trie.split('ABC' ) ,\t\t\t['A', 'BC'] )\r\n self.assertEqual(trie.split('BCA' ) ,\t\t\t['BC', 'A'] )\r\n\r\n\r\n\r\n def SCREAMING_SNAKE_CASE__ ( self\t\t\t\t: Any ):\r\n lowerCAmelCase_ :\t\t\t\tOptional[Any]\t\t\t\t\t\t\t\t= Trie()\r\n trie.add('TOKEN]' )\r\n trie.add('[SPECIAL_TOKEN]' )\r\n self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,\t\t\t['This is something ', '[SPECIAL_TOKEN]'] )\r\n\r\n\r\n\r\n def SCREAMING_SNAKE_CASE__ ( self\t\t\t\t: List[str] ):\r\n lowerCAmelCase_ :\t\t\t\tOptional[Any]\t\t\t\t\t\t\t\t= Trie()\r\n trie.add('A' )\r\n trie.add('P' )\r\n trie.add('[SPECIAL_TOKEN]' )\r\n self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,\t\t\t['This is something ', '[SPECIAL_TOKEN]'] )\r\n\r\n\r\n\r\n def SCREAMING_SNAKE_CASE__ ( self\t\t\t\t: Tuple ):\r\n lowerCAmelCase_ :\t\t\t\tList[str]\t\t\t\t\t\t\t\t= Trie()\r\n trie.add('AB' )\r\n trie.add('B' )\r\n trie.add('C' )\r\n self.assertEqual(trie.split('ABC' ) ,\t\t\t['AB', 'C'] )\r\n\r\n\r\n\r\n def SCREAMING_SNAKE_CASE__ ( self\t\t\t\t: str ):\r\n lowerCAmelCase_ :\t\t\t\tint\t\t\t\t\t\t\t\t= Trie()\r\n trie.add('ABC' )\r\n trie.add('B' )\r\n trie.add('CD' )\r\n self.assertEqual(trie.split('ABCD' ) ,\t\t\t['ABC', 'D'] )\r\n\r\n\r\n\r\n\r\n\r\n def SCREAMING_SNAKE_CASE__ ( self\t\t\t\t: Dict ):\r\n # Even if the offsets are wrong, we necessarily output correct string\r\n # parts.\r\n lowerCAmelCase_ :\t\t\t\tOptional[int]\t\t\t\t\t\t\t\t= Trie()\r\n lowerCAmelCase_ :\t\t\t\tAny\t\t\t\t\t\t\t\t= trie.cut_text('ABC' ,\t\t\t[0, 0, 2, 1, 2, 3] )\r\n self.assertEqual(__UpperCamelCase ,\t\t\t['AB', 'C'] )\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":224,"string":"224"},"style_context":{"kind":"string","value":"\r\r\r\r\r\r\"\"\"simple docstring\"\"\"\rimport random\rimport unittest\r\rimport numpy as np\r\rfrom diffusers import (\r DPMSolverMultistepScheduler,\r EulerAncestralDiscreteScheduler,\r EulerDiscreteScheduler,\r LMSDiscreteScheduler,\r OnnxStableDiffusionImgaImgPipeline,\r PNDMScheduler,\r)\rfrom diffusers.utils import floats_tensor\rfrom diffusers.utils.testing_utils import (\r is_onnx_available,\r load_image,\r nightly,\r require_onnxruntime,\r require_torch_gpu,\r)\r\rfrom ..test_pipelines_onnx_common import OnnxPipelineTesterMixin\r\r\rif is_onnx_available():\r\t\timport onnxruntime as ort\r\r\r\r\r\r\rclass \t\t\t\t\t_UpperCAmelCase (\tlowercase_ ,\t\t\t\t\t\t\tunittest.TestCase ):\r\t\t\t\t\tUpperCamelCase =\t\t\t\t\t\t\t'''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''\r\r\r\t\t\t\t\tdef \tlowerCamelCase (\t\t\t\t\tself\t\t\t:Optional[int] , __UpperCamelCase\t\t\t:Union[str, Any]=0\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__UpperCamelCase\t\t)\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= np.random.RandomState(__UpperCamelCase\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= {\r\t\t\t\t\t\t\t\t\t\t\t\t \"prompt\": \"A painting of a squirrel eating a burger\",\r\t\t\t\t\t\t\t\t\t\t\t\t \"image\": image,\r\t\t\t\t\t\t\t\t\t\t\t\t \"generator\": generator,\r\t\t\t\t\t\t\t\t\t\t\t\t \"num_inference_steps\": 3,\r\t\t\t\t\t\t\t\t\t\t\t\t \"strength\": 0.75,\r\t\t\t\t\t\t\t\t\t\t\t\t \"guidance_scale\": 7.5,\r\t\t\t\t\t\t\t\t\t\t\t\t \"output_type\": \"numpy\",\r\t\t\t\t\t\t\t\t\t\t\t\t}\r\t\t\t\t\t\t\t\t\t\t\t\treturn inputs\r\r\r\t\t\t\t\tdef \tlowerCamelCase (\t\t\t\t\tself\t\t\t:Any\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider=\"CPUExecutionProvider\"\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__UpperCamelCase\t\t)\r\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= self.get_dummy_inputs()\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= pipe(**__UpperCamelCase\t\t).images\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= image[0, -3:, -3:, -1].flatten()\r\r\t\t\t\t\t\t\t\t\t\t\t\tassert image.shape == (1, 1_28, 1_28, 3)\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087]\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tassert np.abs(image_slice - expected_slice\t\t).max() < 1e-1\r\r\r\t\t\t\t\tdef \tlowerCamelCase (\t\t\t\t\tself\t\t\t:Dict\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider=\"CPUExecutionProvider\"\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__UpperCamelCase\t\t)\r\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= self.get_dummy_inputs()\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= pipe(**__UpperCamelCase\t\t).images\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= image[0, -3:, -3:, -1]\r\r\t\t\t\t\t\t\t\t\t\t\t\tassert image.shape == (1, 1_28, 1_28, 3)\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154]\t\t)\r\r\t\t\t\t\t\t\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t\t).max() < 1e-1\r\r\r\t\t\t\t\tdef \tlowerCamelCase (\t\t\t\t\tself\t\t\t:Optional[Any]\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider=\"CPUExecutionProvider\"\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= LMSDiscreteScheduler.from_config(pipe.scheduler.config\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__UpperCamelCase\t\t)\r\r\t\t\t\t\t\t\t\t\t\t\t\t# warmup pass to apply optimizations\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= pipe(**self.get_dummy_inputs()\t\t)\r\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= self.get_dummy_inputs()\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= pipe(**__UpperCamelCase\t\t).images\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= image[0, -3:, -3:, -1]\r\r\t\t\t\t\t\t\t\t\t\t\t\tassert image.shape == (1, 1_28, 1_28, 3)\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203]\t\t)\r\r\t\t\t\t\t\t\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t\t).max() < 1e-1\r\r\r\t\t\t\t\tdef \tlowerCamelCase (\t\t\t\t\tself\t\t\t:Dict\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider=\"CPUExecutionProvider\"\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= EulerDiscreteScheduler.from_config(pipe.scheduler.config\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__UpperCamelCase\t\t)\r\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= self.get_dummy_inputs()\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= pipe(**__UpperCamelCase\t\t).images\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= image[0, -3:, -3:, -1]\r\r\t\t\t\t\t\t\t\t\t\t\t\tassert image.shape == (1, 1_28, 1_28, 3)\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304]\t\t)\r\r\t\t\t\t\t\t\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t\t).max() < 1e-1\r\r\r\t\t\t\t\tdef \tlowerCamelCase (\t\t\t\t\tself\t\t\t:Optional[Any]\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider=\"CPUExecutionProvider\"\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__UpperCamelCase\t\t)\r\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= self.get_dummy_inputs()\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= pipe(**__UpperCamelCase\t\t).images\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= image[0, -3:, -3:, -1]\r\r\t\t\t\t\t\t\t\t\t\t\t\tassert image.shape == (1, 1_28, 1_28, 3)\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304]\t\t)\r\r\t\t\t\t\t\t\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t\t).max() < 1e-1\r\r\r\t\t\t\t\tdef \tlowerCamelCase (\t\t\t\t\tself\t\t\t:Union[str, Any]\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider=\"CPUExecutionProvider\"\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= DPMSolverMultistepScheduler.from_config(pipe.scheduler.config\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__UpperCamelCase\t\t)\r\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= self.get_dummy_inputs()\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= pipe(**__UpperCamelCase\t\t).images\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= image[0, -3:, -3:, -1]\r\r\t\t\t\t\t\t\t\t\t\t\t\tassert image.shape == (1, 1_28, 1_28, 3)\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552]\t\t)\r\r\t\t\t\t\t\t\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t\t).max() < 1e-1\r\r\r\r\r\r\r\r@nightly\r@require_onnxruntime\r@require_torch_gpu\rclass \t\t\t\t\t_UpperCAmelCase (\tunittest.TestCase ):\r\r\r\t\t\t\t\t@property\r\t\t\t\t\tdef \tlowerCamelCase (\t\t\t\t\tself\t\t\t:Optional[Any]\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\treturn (\r\t\t\t\t\t\t\t\t\t\t\t\t \"CUDAExecutionProvider\",\r\t\t\t\t\t\t\t\t\t\t\t\t {\r\t\t\t\t\t\t\t\t\t\t\t\t \"gpu_mem_limit\": \"15000000000\", # 15GB\r\t\t\t\t\t\t\t\t\t\t\t\t \"arena_extend_strategy\": \"kSameAsRequested\",\r\t\t\t\t\t\t\t\t\t\t\t\t },\r\t\t\t\t\t\t\t\t\t\t\t\t)\r\r\r\t\t\t\t\t@property\r\t\t\t\t\tdef \tlowerCamelCase (\t\t\t\t\tself\t\t\t:Optional[int]\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= ort.SessionOptions()\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= False\r\t\t\t\t\t\t\t\t\t\t\t\treturn options\r\r\r\t\t\t\t\tdef \tlowerCamelCase (\t\t\t\t\tself\t\t\t:Dict\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= load_image(\r\t\t\t\t\t\t\t\t\t\t\t\t \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\r\t\t\t\t\t\t\t\t\t\t\t\t \"/img2img/sketch-mountains-input.jpg\"\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= init_image.resize((7_68, 5_12)\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t# using the PNDM scheduler by default\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= OnnxStableDiffusionImgaImgPipeline.from_pretrained(\r\t\t\t\t\t\t\t\t\t\t\t\t \"CompVis/stable-diffusion-v1-4\" , revision=\"onnx\" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )\r\t\t\t\t\t\t\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__UpperCamelCase\t\t)\r\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= \"A fantasy landscape, trending on artstation\"\r\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= np.random.RandomState(0\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= pipe(\r\t\t\t\t\t\t\t\t\t\t\t\t prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type=\"np\" , )\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= output.images\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= images[0, 2_55:2_58, 3_83:3_86, -1]\r\r\t\t\t\t\t\t\t\t\t\t\t\tassert images.shape == (1, 5_12, 7_68, 3)\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019]\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues\r\r\t\t\t\t\t\t\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t\t).max() < 2e-2\r\r\r\t\t\t\t\tdef \tlowerCamelCase (\t\t\t\t\tself\t\t\t:Any\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= load_image(\r\t\t\t\t\t\t\t\t\t\t\t\t \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\r\t\t\t\t\t\t\t\t\t\t\t\t \"/img2img/sketch-mountains-input.jpg\"\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= init_image.resize((7_68, 5_12)\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= LMSDiscreteScheduler.from_pretrained(\r\t\t\t\t\t\t\t\t\t\t\t\t \"runwayml/stable-diffusion-v1-5\" , subfolder=\"scheduler\" , revision=\"onnx\"\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= OnnxStableDiffusionImgaImgPipeline.from_pretrained(\r\t\t\t\t\t\t\t\t\t\t\t\t \"runwayml/stable-diffusion-v1-5\" , revision=\"onnx\" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )\r\t\t\t\t\t\t\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__UpperCamelCase\t\t)\r\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= \"A fantasy landscape, trending on artstation\"\r\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= np.random.RandomState(0\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= pipe(\r\t\t\t\t\t\t\t\t\t\t\t\t prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type=\"np\" , )\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= output.images\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= images[0, 2_55:2_58, 3_83:3_86, -1]\r\r\t\t\t\t\t\t\t\t\t\t\t\tassert images.shape == (1, 5_12, 7_68, 3)\r\t\t\t\t\t\t\t\t\t\t\t\tA\t\t= np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431]\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues\r\r\t\t\t\t\t\t\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t\t).max() < 2e-2\r\r\r"},"style_context_codestyle":{"kind":"number","value":292,"string":"292"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":791,"cells":{"code":{"kind":"string","value":"\"\"\"simple docstring\"\"\"\n\n\n\n\nimport copy\nimport os\nfrom typing import Union\n\nfrom ...configuration_utils import PretrainedConfig\nfrom ...utils import logging\n\n\n_a\t\t\t\t:\t\tstr\t = logging.get_logger(__name__)\n\n_a\t\t\t\t:\t\tTuple\t = {\n 'google/pix2struct-textcaps-base': (\n 'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'\n ),\n}\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tSCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: Optional[Any]\t\t =\t\t\"pix2struct_text_model\"\n _UpperCamelCase\t\t\t\t\t\t\t: Optional[int]\t\t =\t\t[\"past_key_values\"]\n _UpperCamelCase\t\t\t\t\t\t\t: Union[str, Any]\t\t =\t\t{\n \"hidden_size\": \"hidden_size\",\n \"num_attention_heads\": \"num_heads\",\n \"num_hidden_layers\": \"num_layers\",\n }\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\ta__=50244 ,\t\t\t\t\ta__=768 ,\t\t\t\t\ta__=64 ,\t\t\t\t\ta__=2048 ,\t\t\t\t\ta__=12 ,\t\t\t\t\ta__=12 ,\t\t\t\t\ta__=32 ,\t\t\t\t\ta__=128 ,\t\t\t\t\ta__=0.1 ,\t\t\t\t\ta__=1e-6 ,\t\t\t\t\ta__=1.0 ,\t\t\t\t\ta__=\"gelu_new\" ,\t\t\t\t\ta__=0 ,\t\t\t\t\ta__=False ,\t\t\t\t\ta__=0 ,\t\t\t\t\ta__=1 ,\t\t\t\t\ta__=False ,\t\t\t\t\ta__=True ,\t\t\t\t\t**a__ ,\t\t\t\t\t):\n _lowerCAmelCase\t\t\t\t\t\t:\tDict =\t\t\t\t\tvocab_size\n _lowerCAmelCase\t\t\t\t\t\t:\tstr =\t\t\t\t\thidden_size\n _lowerCAmelCase\t\t\t\t\t\t:\tOptional[Any] =\t\t\t\t\td_kv\n _lowerCAmelCase\t\t\t\t\t\t:\tTuple =\t\t\t\t\td_ff\n _lowerCAmelCase\t\t\t\t\t\t:\tAny =\t\t\t\t\tnum_layers\n _lowerCAmelCase\t\t\t\t\t\t:\tTuple =\t\t\t\t\tnum_heads\n _lowerCAmelCase\t\t\t\t\t\t:\tUnion[str, Any] =\t\t\t\t\trelative_attention_num_buckets\n _lowerCAmelCase\t\t\t\t\t\t:\tList[Any] =\t\t\t\t\trelative_attention_max_distance\n _lowerCAmelCase\t\t\t\t\t\t:\tUnion[str, Any] =\t\t\t\t\tdropout_rate\n _lowerCAmelCase\t\t\t\t\t\t:\tUnion[str, Any] =\t\t\t\t\tlayer_norm_epsilon\n _lowerCAmelCase\t\t\t\t\t\t:\tOptional[int] =\t\t\t\t\tinitializer_factor\n _lowerCAmelCase\t\t\t\t\t\t:\tOptional[Any] =\t\t\t\t\tuse_cache\n\n _lowerCAmelCase\t\t\t\t\t\t:\tTuple =\t\t\t\t\teos_token_id\n _lowerCAmelCase\t\t\t\t\t\t:\tTuple =\t\t\t\t\tdecoder_start_token_id\n\n # for backwards compatibility\n _lowerCAmelCase\t\t\t\t\t\t:\tDict =\t\t\t\t\tdense_act_fn\n\n super().__init__(\n pad_token_id=a__ ,\t\t\t\t\teos_token_id=a__ ,\t\t\t\t\tdecoder_start_token_id=a__ ,\t\t\t\t\ttie_word_embeddings=a__ ,\t\t\t\t\tis_decoder=a__ ,\t\t\t\t\t**a__ ,\t\t\t\t\t)\n\n\n\n\n\n\n @classmethod\n def __A (\t\tcls ,\t\t\t\t\ta__ ,\t\t\t\t\t**a__ ):\n cls._set_token_in_kwargs(a__ )\n\n _lowerCAmelCase , _lowerCAmelCase\t\t\t\t\t\t:\tstr =\t\t\t\t\tcls.get_config_dict(a__ ,\t\t\t\t\t**a__ )\n\n # get the text config dict if we are loading from Pix2StructConfig\n if config_dict.get(\"\"\"model_type\"\"\" ) == \"pix2struct\":\n _lowerCAmelCase\t\t\t\t\t\t:\tList[str] =\t\t\t\t\tconfig_dict[\"\"\"text_config\"\"\"]\n\n if \"model_type\" in config_dict and hasattr(cls ,\t\t\t\t\t\"\"\"model_type\"\"\" ) and config_dict[\"model_type\"] != cls.model_type:\n logger.warning(\n F\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n F\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\" )\n\n return cls.from_dict(a__ ,\t\t\t\t\t**a__ )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tSCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: List[str]\t\t =\t\t\"pix2struct_vision_model\"\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\ta__=768 ,\t\t\t\t\ta__=768 ,\t\t\t\t\ta__=2048 ,\t\t\t\t\ta__=64 ,\t\t\t\t\ta__=12 ,\t\t\t\t\ta__=12 ,\t\t\t\t\ta__=\"gelu_new\" ,\t\t\t\t\ta__=1e-6 ,\t\t\t\t\ta__=0.0 ,\t\t\t\t\ta__=0.0 ,\t\t\t\t\ta__=1e-10 ,\t\t\t\t\ta__=1.0 ,\t\t\t\t\ta__=4096 ,\t\t\t\t\ta__=32 ,\t\t\t\t\ta__=128 ,\t\t\t\t\t**a__ ,\t\t\t\t\t):\n super().__init__(**a__ )\n\n _lowerCAmelCase\t\t\t\t\t\t:\tint =\t\t\t\t\thidden_size\n _lowerCAmelCase\t\t\t\t\t\t:\tAny =\t\t\t\t\tpatch_embed_hidden_size\n _lowerCAmelCase\t\t\t\t\t\t:\tint =\t\t\t\t\td_ff\n _lowerCAmelCase\t\t\t\t\t\t:\tstr =\t\t\t\t\tdropout_rate\n _lowerCAmelCase\t\t\t\t\t\t:\tint =\t\t\t\t\tnum_hidden_layers\n _lowerCAmelCase\t\t\t\t\t\t:\tAny =\t\t\t\t\tnum_attention_heads\n _lowerCAmelCase\t\t\t\t\t\t:\tOptional[int] =\t\t\t\t\tinitializer_range\n _lowerCAmelCase\t\t\t\t\t\t:\tList[str] =\t\t\t\t\tinitializer_factor\n _lowerCAmelCase\t\t\t\t\t\t:\tDict =\t\t\t\t\tattention_dropout\n _lowerCAmelCase\t\t\t\t\t\t:\tstr =\t\t\t\t\tlayer_norm_eps\n _lowerCAmelCase\t\t\t\t\t\t:\tOptional[int] =\t\t\t\t\tdense_act_fn\n _lowerCAmelCase\t\t\t\t\t\t:\tstr =\t\t\t\t\tseq_len\n _lowerCAmelCase\t\t\t\t\t\t:\tTuple =\t\t\t\t\trelative_attention_num_buckets\n _lowerCAmelCase\t\t\t\t\t\t:\tList[str] =\t\t\t\t\trelative_attention_max_distance\n _lowerCAmelCase\t\t\t\t\t\t:\tint =\t\t\t\t\td_kv\n\n\n\n\n\n\n @classmethod\n def __A (\t\tcls ,\t\t\t\t\ta__ ,\t\t\t\t\t**a__ ):\n cls._set_token_in_kwargs(a__ )\n\n _lowerCAmelCase , _lowerCAmelCase\t\t\t\t\t\t:\tDict =\t\t\t\t\tcls.get_config_dict(a__ ,\t\t\t\t\t**a__ )\n\n # get the vision config dict if we are loading from Pix2StructConfig\n if config_dict.get(\"\"\"model_type\"\"\" ) == \"pix2struct\":\n _lowerCAmelCase\t\t\t\t\t\t:\tList[str] =\t\t\t\t\tconfig_dict[\"\"\"vision_config\"\"\"]\n\n if \"model_type\" in config_dict and hasattr(cls ,\t\t\t\t\t\"\"\"model_type\"\"\" ) and config_dict[\"model_type\"] != cls.model_type:\n logger.warning(\n F\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n F\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\" )\n\n return cls.from_dict(a__ ,\t\t\t\t\t**a__ )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tSCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: Optional[Any]\t\t =\t\t\"pix2struct\"\n _UpperCamelCase\t\t\t\t\t\t\t: List[str]\t\t =\t\tTrue\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\ta__=None ,\t\t\t\t\ta__=None ,\t\t\t\t\ta__=1.0 ,\t\t\t\t\ta__=0.0_2 ,\t\t\t\t\ta__=False ,\t\t\t\t\ta__=False ,\t\t\t\t\ta__=True ,\t\t\t\t\t**a__ ,\t\t\t\t\t):\n super().__init__(tie_word_embeddings=a__ ,\t\t\t\t\tis_encoder_decoder=a__ ,\t\t\t\t\t**a__ )\n\n if text_config is None:\n _lowerCAmelCase\t\t\t\t\t\t:\tstr =\t\t\t\t\t{}\n logger.info(\"\"\"text_config is None. Initializing the Pix2StructTextConfig with default values.\"\"\" )\n\n if vision_config is None:\n _lowerCAmelCase\t\t\t\t\t\t:\tUnion[str, Any] =\t\t\t\t\t{}\n logger.info(\"\"\"vision_config is None. Initializing the Pix2StructVisionConfig with default values.\"\"\" )\n\n _lowerCAmelCase\t\t\t\t\t\t:\tList[Any] =\t\t\t\t\tPixaStructTextConfig(**a__ )\n _lowerCAmelCase\t\t\t\t\t\t:\tDict =\t\t\t\t\tPixaStructVisionConfig(**a__ )\n\n _lowerCAmelCase\t\t\t\t\t\t:\tOptional[Any] =\t\t\t\t\tself.text_config.decoder_start_token_id\n _lowerCAmelCase\t\t\t\t\t\t:\tDict =\t\t\t\t\tself.text_config.pad_token_id\n _lowerCAmelCase\t\t\t\t\t\t:\tList[str] =\t\t\t\t\tself.text_config.eos_token_id\n\n _lowerCAmelCase\t\t\t\t\t\t:\tUnion[str, Any] =\t\t\t\t\tinitializer_factor\n _lowerCAmelCase\t\t\t\t\t\t:\tint =\t\t\t\t\tinitializer_range\n\n _lowerCAmelCase\t\t\t\t\t\t:\tOptional[Any] =\t\t\t\t\tself.initializer_range\n _lowerCAmelCase\t\t\t\t\t\t:\tTuple =\t\t\t\t\tself.initializer_range\n\n _lowerCAmelCase\t\t\t\t\t\t:\tDict =\t\t\t\t\tis_vqa\n\n\n\n\n\n\n @classmethod\n def __A (\t\tcls ,\t\t\t\t\ta__ ,\t\t\t\t\ta__ ,\t\t\t\t\t**a__ ):\n return cls(text_config=text_config.to_dict() ,\t\t\t\t\tvision_config=vision_config.to_dict() ,\t\t\t\t\t**a__ )\n\n\n\n\n\n\n def __A (\t\tself ):\n _lowerCAmelCase\t\t\t\t\t\t:\tUnion[str, Any] =\t\t\t\t\tcopy.deepcopy(self.__dict__ )\n _lowerCAmelCase\t\t\t\t\t\t:\tUnion[str, Any] =\t\t\t\t\tself.text_config.to_dict()\n _lowerCAmelCase\t\t\t\t\t\t:\tUnion[str, Any] =\t\t\t\t\tself.vision_config.to_dict()\n _lowerCAmelCase\t\t\t\t\t\t:\tTuple =\t\t\t\t\tself.__class__.model_type\n return output\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":126,"string":"126"},"style_context":{"kind":"string","value":"\"\"\"simple docstring\"\"\"\n\n\n\n\nfrom ..utils import DummyObject, requires_backends\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: Optional[Any]\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: List[Any]\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: int\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: str\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: Optional[int]\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: List[Any]\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: str\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: Union[str, Any]\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: Optional[int]\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: List[str]\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: Any\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: str\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: Tuple\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: str\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: Tuple\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: str\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: Tuple\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: str\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: Dict\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: Dict\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: Union[str, Any]\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: Tuple\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: List[Any]\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: Optional[int]\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: Optional[Any]\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: Optional[Any]\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: List[Any]\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: int\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: Dict\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: List[str]\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t__A\t\t\t\t\t\t\t(\t\t\tmetaclass=SCREAMING_SNAKE_CASE_\t\t):\n _UpperCamelCase\t\t\t\t\t\t\t: int\t\t =\t\t[\"sentencepiece\"]\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\t\t*a__ ,\t\t\t\t\t**a__ ):\n requires_backends(self ,\t\t\t\t\t[\"\"\"sentencepiece\"\"\"] )\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":126,"string":"126"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":792,"cells":{"code":{"kind":"string","value":"\r\r'''simple docstring'''\r\r\r\rimport itertools\rfrom dataclasses import dataclass\rfrom typing import List, Optional\r\rimport pyarrow as pa\rimport pyarrow.parquet as pq\r\rimport datasets\rfrom datasets.table import table_cast\r\r\r__snake_case :\tList[Any] \t\t\t\t\t\t=\t\t\tdatasets.utils.logging.get_logger(__name__)\r\r\r\r@dataclass\rclass lowerCamelCase ( datasets.BuilderConfig\t\t\t\t\t\t):\r\r\r\r\t\t\t'''simple docstring'''\r\r\r\r\r\r\r\t\t\t__snake_case \t= 1_0000\r\t\t\t__snake_case \t= None\r\t\t\t__snake_case \t= None\r\r\r\rclass lowerCamelCase ( datasets.ArrowBasedBuilder\t\t\t\t\t\t):\r\r\r\r\t\t\t'''simple docstring'''\r\r\r\r\r\r\r\t\t\t__snake_case \t= ParquetConfig\r\r\r\t\t\tdef lowercase__\t\t\t\t\t\t( self :\t\t\t\t\t\tOptional[Any] )\t\t\t->\t\tstr:\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\t\t\t\t\t\t\t\t\treturn datasets.DatasetInfo(features=self.config.features )\r\r\r\t\t\tdef lowercase__\t\t\t\t\t\t( self :\t\t\t\t\t\tList[str]\t\t,\t\t\tlowerCAmelCase_ :\t\t\t\t\t\tList[str] )\t\t\t->\t\tint:\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\t\t\t\t\t\t\t\t\tif not self.config.data_files:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(f\"At least one data file must be specified, but got data_files={self.config.data_files}\" )\r\t\t\t\t\t\t\t\t\tA__ : str =dl_manager.download_and_extract(self.config.data_files )\r\t\t\t\t\t\t\t\t\tif isinstance(lowerCAmelCase_\t\t,\t\t\t(str, list, tuple) ):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : Any =data_files\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif isinstance(lowerCAmelCase_\t\t,\t\t\tlowerCAmelCase_ ):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : List[Any] =[files]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Use `dl_manager.iter_files` to skip hidden files in an extracted archive\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : List[str] =[dl_manager.iter_files(lowerCAmelCase_ ) for file in files]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn [datasets.SplitGenerator(name=datasets.Split.TRAIN\t\t,\t\t\tgen_kwargs={\"\"\"files\"\"\": files} )]\r\t\t\t\t\t\t\t\t\tA__ : Optional[int] =[]\r\t\t\t\t\t\t\t\t\tfor split_name, files in data_files.items():\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif isinstance(lowerCAmelCase_\t\t,\t\t\tlowerCAmelCase_ ):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : Any =[files]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Use `dl_manager.iter_files` to skip hidden files in an extracted archive\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : Tuple =[dl_manager.iter_files(lowerCAmelCase_ ) for file in files]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Infer features is they are stoed in the arrow schema\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif self.info.features is None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor file in itertools.chain.from_iterable(lowerCAmelCase_ ):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twith open(lowerCAmelCase_\t\t,\t\t\t\"\"\"rb\"\"\" ) as f:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : Union[str, Any] =datasets.Features.from_arrow_schema(pq.read_schema(lowerCAmelCase_ ) )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsplits.append(datasets.SplitGenerator(name=lowerCAmelCase_\t\t,\t\t\tgen_kwargs={\"\"\"files\"\"\": files} ) )\r\t\t\t\t\t\t\t\t\treturn splits\r\r\r\t\t\tdef lowercase__\t\t\t\t\t\t( self :\t\t\t\t\t\tOptional[int]\t\t,\t\t\tlowerCAmelCase_ :\t\t\t\t\t\tpa.Table )\t\t\t->\t\tpa.Table:\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\t\t\t\t\t\t\t\t\tif self.info.features is not None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# more expensive cast to support nested features with keys in a different order\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# allows str <-> int/float or str to Audio for example\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : str =table_cast(lowerCAmelCase_\t\t,\t\t\tself.info.features.arrow_schema )\r\t\t\t\t\t\t\t\t\treturn pa_table\r\r\r\r\t\t\tdef lowercase__\t\t\t\t\t\t( self :\t\t\t\t\t\tint\t\t,\t\t\tlowerCAmelCase_ :\t\t\t\t\t\tDict )\t\t\t->\t\tList[Any]:\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\t\t\t\t\t\t\t\t\tA__ : int =self.info.features.arrow_schema if self.info.features is not None else None\r\t\t\t\t\t\t\t\t\tif self.info.features is not None and self.config.columns is not None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif sorted(field.name for field in schema ) != sorted(self.config.columns ):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t f\"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'\" )\r\t\t\t\t\t\t\t\t\tfor file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase_ ) ):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twith open(lowerCAmelCase_\t\t,\t\t\t\"\"\"rb\"\"\" ) as f:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : Union[str, Any] =pq.ParquetFile(lowerCAmelCase_ )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttry:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor batch_idx, record_batch in enumerate(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t parquet_file.iter_batches(batch_size=self.config.batch_size\t\t,\t\t\tcolumns=self.config.columns ) ):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : Any =pa.Table.from_batches([record_batch] )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Uncomment for debugging (will print the Arrow table size and elements)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# logger.warning(f\"pa_table: {pa_table} num rows: {pa_table.num_rows}\")\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# logger.warning('\\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tyield f\"{file_idx}_{batch_idx}\", self._cast_table(lowerCAmelCase_ )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\texcept ValueError as e:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.error(f\"Failed to read file '{file}' with error {type(lowerCAmelCase_ )}: {e}\" )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise\r\r"},"code_codestyle":{"kind":"number","value":134,"string":"134"},"style_context":{"kind":"string","value":"\r\r'''simple docstring'''\r\r\r\rimport argparse\rimport collections\r\rimport numpy as np\rimport torch\rfrom flax import traverse_util\rfrom tax import checkpoints\r\rfrom transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration\rfrom transformers.utils import logging\r\r\rlogging.set_verbosity_info()\rdef \t\t\t\t\t\t__lowerCamelCase ( __snake_case :\t\tList[str],\t\t__snake_case :\t\tUnion[str, Any],\t\t__snake_case :\t\tDict ) ->\t\t\t\t\t\t\tDict:\r\r\r\r\r\r\r\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t\t\t\t\treturn params[f\"{prefix}/{prefix}/relpos_bias/rel_embedding\"][:, i, :]\rdef \t\t\t\t\t\t__lowerCamelCase ( __snake_case :\t\tstr,\t\t__snake_case :\t\tint,\t\t__snake_case :\t\tDict,\t\t__snake_case :\t\tint=\"attention\" ) ->\t\t\t\t\t\t\tstr:\r\r\r\r\r\r\r\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t\t\t\t\tA__ : Union[str, Any] =np.ascontiguousarray(params[f\"{prefix}/{prefix}/{layer_name}/key/kernel\"][:, i, :, :] )\r\t\t\t\t\t\tA__ : str =k_tmp.reshape(k_tmp.shape[0],\t\tk_tmp.shape[1] * k_tmp.shape[2] )\r\t\t\t\t\t\tA__ : List[Any] =np.ascontiguousarray(params[f\"{prefix}/{prefix}/{layer_name}/out/kernel\"][:, i, :, :] )\r\t\t\t\t\t\tA__ : Optional[int] =o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1],\t\to_tmp.shape[2] )\r\t\t\t\t\t\tA__ : Dict =np.ascontiguousarray(params[f\"{prefix}/{prefix}/{layer_name}/query/kernel\"][:, i, :, :] )\r\t\t\t\t\t\tA__ : Dict =q_tmp.reshape(q_tmp.shape[0],\t\tq_tmp.shape[1] * q_tmp.shape[2] )\r\t\t\t\t\t\tA__ : Union[str, Any] =np.ascontiguousarray(params[f\"{prefix}/{prefix}/{layer_name}/value/kernel\"][:, i, :, :] )\r\t\t\t\t\t\tA__ : List[str] =v_tmp.reshape(v_tmp.shape[0],\t\tv_tmp.shape[1] * v_tmp.shape[2] )\r\t\t\t\t\t\treturn k, o, q, v\rdef \t\t\t\t\t\t__lowerCamelCase ( __snake_case :\t\tDict,\t\t__snake_case :\t\tAny,\t\t__snake_case :\t\tTuple,\t\t__snake_case :\t\tOptional[Any]=False ) ->\t\t\t\t\t\t\tAny:\r\r\r\r\r\r\r\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t\t\t\t\tif split_mlp_wi:\r\t\t\t\t\t\t\t\t\t\t\t\tA__ : Any =params[f\"{prefix}/{prefix}/mlp/wi_0/kernel\"][:, i, :]\r\t\t\t\t\t\t\t\t\t\t\t\tA__ : int =params[f\"{prefix}/{prefix}/mlp/wi_1/kernel\"][:, i, :]\r\t\t\t\t\t\t\t\t\t\t\t\tA__ : Optional[Any] =(wi_a, wi_a)\r\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\tA__ : Optional[int] =params[f\"{prefix}/{prefix}/mlp/wi/kernel\"][:, i, :]\r\r\t\t\t\t\t\tA__ : int =params[f\"{prefix}/{prefix}/mlp/wo/kernel\"][:, i, :]\r\t\t\t\t\t\treturn wi, wo\rdef \t\t\t\t\t\t__lowerCamelCase ( __snake_case :\t\tOptional[Any],\t\t__snake_case :\t\tstr,\t\t__snake_case :\t\tAny,\t\t__snake_case :\t\tint ) ->\t\t\t\t\t\t\tList[Any]:\r\r\r\r\r\r\r\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t\t\t\t\treturn params[f\"{prefix}/{prefix}/{layer_name}/scale\"][:, i]\rdef \t\t\t\t\t\t__lowerCamelCase ( __snake_case :\t\tdict,\t\t*, __snake_case :\t\tint,\t\t__snake_case :\t\tbool,\t\t__snake_case :\t\tbool = False ) ->\t\t\t\t\t\t\tUnion[str, Any]:\r\r\r\r\r\r\r\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t\t\t\t\tA__ : Optional[int] =traverse_util.flatten_dict(variables[\"\"\"target\"\"\"] )\r\t\t\t\t\t\tA__ : int ={\"\"\"/\"\"\".join(__snake_case ): v for k, v in old.items()}\r\r\t\t\t\t\t\t# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi\r\t\t\t\t\t\tA__ : List[Any] =\"\"\"encoder/encoder/mlp/wi_0/kernel\"\"\" in old\r\t\t\t\t\t\tprint(\"\"\"Split MLP:\"\"\",\t\t__snake_case )\r\r\t\t\t\t\t\tA__ : Optional[int] =collections.OrderedDict()\r\r\t\t\t\t\t\t# Shared embeddings.\r\t\t\t\t\t\tA__ : List[Any] =old[\"\"\"token_embedder/embedding\"\"\"]\r\r\t\t\t\t\t\t# Encoder.\r\t\t\t\t\t\tfor i in range(__snake_case ):\r\t\t\t\t\t\t\t\t\t\t\t\t# Block i, layer 0 (Self Attention).\r\t\t\t\t\t\t\t\t\t\t\t\tA__ : Optional[Any] =tax_layer_norm_lookup(__snake_case,\t\t__snake_case,\t\t\"\"\"encoder\"\"\",\t\t\"\"\"pre_attention_layer_norm\"\"\" )\r\t\t\t\t\t\t\t\t\t\t\t\tA__ ,\t\t\t\t\t\tA__ ,\t\t\t\t\t\tA__ ,\t\t\t\t\t\tA__ : Optional[int] =tax_attention_lookup(__snake_case,\t\t__snake_case,\t\t\"\"\"encoder\"\"\",\t\t\"\"\"attention\"\"\" )\r\t\t\t\t\t\t\t\t\t\t\t\tA__ : List[str] =layer_norm\r\t\t\t\t\t\t\t\t\t\t\t\tA__ : Dict =k.T\r\t\t\t\t\t\t\t\t\t\t\t\tA__ : Optional[int] =o.T\r\t\t\t\t\t\t\t\t\t\t\t\tA__ : str =q.T\r\t\t\t\t\t\t\t\t\t\t\t\tA__ : Any =v.T\r\r\t\t\t\t\t\t\t\t\t\t\t\t# Block i, layer 1 (MLP).\r\t\t\t\t\t\t\t\t\t\t\t\tA__ : List[Any] =tax_layer_norm_lookup(__snake_case,\t\t__snake_case,\t\t\"\"\"encoder\"\"\",\t\t\"\"\"pre_mlp_layer_norm\"\"\" )\r\t\t\t\t\t\t\t\t\t\t\t\tA__ ,\t\t\t\t\t\tA__ : int =tax_mlp_lookup(__snake_case,\t\t__snake_case,\t\t\"\"\"encoder\"\"\",\t\t__snake_case )\r\t\t\t\t\t\t\t\t\t\t\t\tA__ : Optional[int] =layer_norm\r\t\t\t\t\t\t\t\t\t\t\t\tif split_mlp_wi:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : List[str] =wi[0].T\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : List[str] =wi[1].T\r\t\t\t\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : Optional[int] =wi.T\r\t\t\t\t\t\t\t\t\t\t\t\tA__ : Optional[Any] =wo.T\r\t\t\t\t\t\t\t\t\t\t\t\tif scalable_attention:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# convert the rel_embedding of each layer\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : int =tax_relpos_bias_lookup(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t __snake_case,\t\t__snake_case,\t\t\"\"\"encoder\"\"\" ).T\r\r\t\t\t\t\t\tA__ : Optional[int] =old[\"\"\"encoder/encoder_norm/scale\"\"\"]\r\r\t\t\t\t\t\tif not scalable_attention:\r\t\t\t\t\t\t\t\t\t\t\t\tA__ : List[Any] =tax_relpos_bias_lookup(\r\t\t\t\t\t\t\t\t\t\t\t\t __snake_case,\t\t0,\t\t\"\"\"encoder\"\"\" ).T\r\t\t\t\t\t\t\t\t\t\t\t\tA__ : Tuple =tax_relpos_bias_lookup(\r\t\t\t\t\t\t\t\t\t\t\t\t __snake_case,\t\t0,\t\t\"\"\"decoder\"\"\" ).T\r\r\t\t\t\t\t\tif not is_encoder_only:\r\t\t\t\t\t\t\t\t\t\t\t\t# Decoder.\r\t\t\t\t\t\t\t\t\t\t\t\tfor i in range(__snake_case ):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Block i, layer 0 (Self Attention).\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : List[str] =tax_layer_norm_lookup(__snake_case,\t\t__snake_case,\t\t\"\"\"decoder\"\"\",\t\t\"\"\"pre_self_attention_layer_norm\"\"\" )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ ,\t\t\t\t\t\tA__ ,\t\t\t\t\t\tA__ ,\t\t\t\t\t\tA__ : List[str] =tax_attention_lookup(__snake_case,\t\t__snake_case,\t\t\"\"\"decoder\"\"\",\t\t\"\"\"self_attention\"\"\" )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : str =layer_norm\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : List[str] =k.T\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : int =o.T\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : Tuple =q.T\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : Optional[Any] =v.T\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Block i, layer 1 (Cross Attention).\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : int =tax_layer_norm_lookup(__snake_case,\t\t__snake_case,\t\t\"\"\"decoder\"\"\",\t\t\"\"\"pre_cross_attention_layer_norm\"\"\" )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ ,\t\t\t\t\t\tA__ ,\t\t\t\t\t\tA__ ,\t\t\t\t\t\tA__ : Optional[Any] =tax_attention_lookup(__snake_case,\t\t__snake_case,\t\t\"\"\"decoder\"\"\",\t\t\"\"\"encoder_decoder_attention\"\"\" )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : str =layer_norm\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : Union[str, Any] =k.T\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : str =o.T\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : Any =q.T\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : str =v.T\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Block i, layer 2 (MLP).\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : str =tax_layer_norm_lookup(__snake_case,\t\t__snake_case,\t\t\"\"\"decoder\"\"\",\t\t\"\"\"pre_mlp_layer_norm\"\"\" )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ ,\t\t\t\t\t\tA__ : Optional[int] =tax_mlp_lookup(__snake_case,\t\t__snake_case,\t\t\"\"\"decoder\"\"\",\t\t__snake_case )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : Dict =layer_norm\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif split_mlp_wi:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : List[Any] =wi[0].T\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : Union[str, Any] =wi[1].T\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : Optional[int] =wi.T\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : str =wo.T\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif scalable_attention:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# convert the rel_embedding of each layer\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : str =tax_relpos_bias_lookup(__snake_case,\t\t__snake_case,\t\t\"\"\"decoder\"\"\" ).T\r\r\t\t\t\t\t\t\t\t\t\t\t\tA__ : str =old[\"\"\"decoder/decoder_norm/scale\"\"\"]\r\r\t\t\t\t\t\t\t\t\t\t\t\t# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)\r\t\t\t\t\t\t\t\t\t\t\t\tif \"decoder/logits_dense/kernel\" in old:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : Tuple =old[\"\"\"decoder/logits_dense/kernel\"\"\"].T\r\r\t\t\t\t\t\treturn new\rdef \t\t\t\t\t\t__lowerCamelCase ( __snake_case :\t\tDict,\t\t__snake_case :\t\tbool ) ->\t\t\t\t\t\t\tOptional[Any]:\r\r\r\r\r\r\r\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t\t\t\t\tA__ : Any =collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )\r\r\t\t\t\t\t\t# Add what is missing.\r\t\t\t\t\t\tif \"encoder.embed_tokens.weight\" not in state_dict:\r\t\t\t\t\t\t\t\t\t\t\t\tA__ : Union[str, Any] =state_dict[\"\"\"shared.weight\"\"\"]\r\r\t\t\t\t\t\tif not is_encoder_only:\r\t\t\t\t\t\t\t\t\t\t\t\tif \"decoder.embed_tokens.weight\" not in state_dict:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : List[str] =state_dict[\"\"\"shared.weight\"\"\"]\r\r\t\t\t\t\t\t\t\t\t\t\t\tif \"lm_head.weight\" not in state_dict: # For old 1.0 models.\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint(\"\"\"Using shared word embeddings as lm_head.\"\"\" )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ : Optional[Any] =state_dict[\"\"\"shared.weight\"\"\"]\r\r\t\t\t\t\t\treturn state_dict\rdef \t\t\t\t\t\t__lowerCamelCase ( __snake_case :\t\tstr,\t\t__snake_case :\t\tstr,\t\t__snake_case :\t\tOptional[Any],\t\t__snake_case :\t\tint,\t\t__snake_case :\t\tOptional[int] ) ->\t\t\t\t\t\t\tOptional[int]:\r\r\r\r\r\r\r\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t\t\t\t\tA__ : str =checkpoints.load_tax_checkpoint(__snake_case )\r\t\t\t\t\t\tA__ : Optional[Any] =convert_tax_to_pytorch(\r\t\t\t\t\t\t __snake_case,\t\tnum_layers=config.num_layers,\t\tis_encoder_only=__snake_case,\t\tscalable_attention=__snake_case )\r\t\t\t\t\t\tA__ : str =make_state_dict(__snake_case,\t\t__snake_case )\r\t\t\t\t\t\tmodel.load_state_dict(__snake_case,\t\tstrict=__snake_case )\rdef \t\t\t\t\t\t__lowerCamelCase ( __snake_case :\t\tOptional[int],\t\t__snake_case :\t\tDict,\t\t__snake_case :\t\tOptional[int],\t\t__snake_case :\t\tbool = False,\t\t__snake_case :\t\tbool = False,\t\t) ->\t\t\t\t\t\t\tDict:\r\r\r\r\r\r\r\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t\t\t\t\tA__ : Tuple =MTaConfig.from_json_file(__snake_case )\r\t\t\t\t\t\tprint(f\"Building PyTorch model from configuration: {config}\" )\r\t\t\t\t\t\t# Non-v1.1 checkpoints could also use T5Model, but this works for all.\r\t\t\t\t\t\t# The v1.0 checkpoints will simply have an LM head that is the word embeddings.\r\t\t\t\t\t\tif is_encoder_only:\r\t\t\t\t\t\t\t\t\t\t\t\tA__ : List[Any] =UMTaEncoderModel(__snake_case )\r\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\tA__ : int =UMTaForConditionalGeneration(__snake_case )\r\r\t\t\t\t\t\t# Load weights from tf checkpoint\r\t\t\t\t\t\tload_tax_weights_in_ta(__snake_case,\t\t__snake_case,\t\t__snake_case,\t\t__snake_case,\t\t__snake_case )\r\r\t\t\t\t\t\t# Save pytorch-model\r\t\t\t\t\t\tprint(f\"Save PyTorch model to {pytorch_dump_path}\" )\r\t\t\t\t\t\tmodel.save_pretrained(__snake_case )\r\r\t\t\t\t\t\t# Verify that we can load the checkpoint.\r\t\t\t\t\t\tmodel.from_pretrained(__snake_case )\r\t\t\t\t\t\tprint(\"\"\"Done\"\"\" )\r\r\rif __name__ == \"__main__\":\r\t\t__snake_case :\tstr \t\t\t\t\t\t=\t\t\targparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')\r\t\t# Required parameters\r\t\tparser.add_argument(\r\t\t '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'\r\t\t)\r\t\tparser.add_argument(\r\t\t '--config_file',\r\t\t default=None,\r\t\t type=str,\r\t\t required=True,\r\t\t help='The config json file corresponding to the pre-trained T5 model.\\nThis specifies the model architecture.',\r\t\t)\r\t\tparser.add_argument(\r\t\t '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'\r\t\t)\r\t\tparser.add_argument(\r\t\t '--is_encoder_only', action="https://huggingface.co/datasets/infinityofspace/python_codestyles-mixed1-500/viewer/default/store_true", help='Check if the model is encoder-decoder model', default=False\r\t\t)\r\t\tparser.add_argument(\r\t\t '--scalable_attention',\r\t\t action="https://huggingface.co/datasets/infinityofspace/python_codestyles-mixed1-500/viewer/default/store_true",\r\t\t help='Whether the model uses scaled attention (umt5 model)',\r\t\t default=False,\r\t\t)\r\t\t__snake_case :\tOptional[Any] \t\t\t\t\t\t=\t\t\tparser.parse_args()\r\t\tconvert_tax_checkpoint_to_pytorch(\r\t\t args.tax_checkpoint_path,\r\t\t args.config_file,\r\t\t args.pytorch_dump_path,\r\t\t args.is_encoder_only,\r\t\t args.scalable_attention,\r\t\t)\r\r"},"style_context_codestyle":{"kind":"number","value":134,"string":"134"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":793,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\rclass SCREAMING_SNAKE_CASE__\t\t\t\t\t:\r\r\r\r\r\r\t'''simple docstring'''\r\r\r\r\tdef __init__( self : List[str] , lowercase : int , lowercase : List[Any]=None , lowercase : List[Any]=None ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\t_snake_case = data\r\t\t\t\t_snake_case = previous\r\t\t\t\t_snake_case = next_node\r\r\r\r\tdef __str__( self : Tuple ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\treturn f'''{self.data}'''\r\r\r\r\tdef \t\t\tA\t\t\t\t\t\t\t( self : Tuple ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\treturn self.data\r\r\r\r\tdef \t\t\tA\t\t\t\t\t\t\t( self : List[str] ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\treturn self.next\r\r\r\r\r\r\tdef \t\t\tA\t\t\t\t\t\t\t( self : Dict ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\treturn self.previous\r\r\r\r\rclass SCREAMING_SNAKE_CASE__\t\t\t\t\t:\r\r\r\r\r\r\t'''simple docstring'''\r\r\r\r\tdef __init__( self : List[str] , lowercase : List[str] ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\t_snake_case = head\r\r\r\r\tdef __iter__( self : Optional[Any] ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\treturn self\r\r\r\r\r\r\tdef \t\t\tA\t\t\t\t\t\t\t( self : Optional[int] ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\tif not self.current:\r\t\t\t\t\t\t\traise StopIteration\r\t\t\t\telse:\r\t\t\t\t\t\t\t_snake_case = self.current.get_data()\r\t\t\t\t\t\t\t_snake_case = self.current.get_next()\r\t\t\t\t\t\t\treturn value\r\r\r\r\rclass SCREAMING_SNAKE_CASE__\t\t\t\t\t:\r\r\r\r\r\r\t'''simple docstring'''\r\r\r\r\tdef __init__( self : Tuple ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\t_snake_case = None # First node in list\r\t\t\t\t_snake_case = None # Last node in list\r\r\r\r\tdef __str__( self : Union[str, Any] ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\t_snake_case = self.head\r\t\t\t\t_snake_case = []\r\t\t\t\twhile current is not None:\r\t\t\t\t\t\t\tnodes.append(current.get_data() )\r\t\t\t\t\t\t\t_snake_case = current.get_next()\r\t\t\t\treturn \" \".join(str(a__ ) for node in nodes )\r\r\r\r\tdef __contains__( self : Dict , lowercase : int ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\t_snake_case = self.head\r\t\t\t\twhile current:\r\t\t\t\t\t\t\tif current.get_data() == value:\r\t\t\t\t\t\t\t\t\t\treturn True\r\t\t\t\t\t\t\t_snake_case = current.get_next()\r\t\t\t\treturn False\r\r\r\r\tdef __iter__( self : int ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\treturn LinkedListIterator(self.head )\r\r\r\r\tdef \t\t\tA\t\t\t\t\t\t\t( self : int ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\tif self.head:\r\t\t\t\t\t\t\treturn self.head.get_data()\r\t\t\t\treturn None\r\r\r\r\tdef \t\t\tA\t\t\t\t\t\t\t( self : List[Any] ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\tif self.tail:\r\t\t\t\t\t\t\treturn self.tail.get_data()\r\t\t\t\treturn None\r\r\r\r\tdef \t\t\tA\t\t\t\t\t\t\t( self : Union[str, Any] , lowercase : Node ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\tif self.head is None:\r\t\t\t\t\t\t\t_snake_case = node\r\t\t\t\t\t\t\t_snake_case = node\r\t\t\t\telse:\r\t\t\t\t\t\t\tself.insert_before_node(self.head , a__ )\r\r\r\r\tdef \t\t\tA\t\t\t\t\t\t\t( self : int , lowercase : Node ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\tif self.head is None:\r\t\t\t\t\t\t\tself.set_head(a__ )\r\t\t\t\telse:\r\t\t\t\t\t\t\tself.insert_after_node(self.tail , a__ )\r\r\r\r\tdef \t\t\tA\t\t\t\t\t\t\t( self : Any , lowercase : int ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\t_snake_case = Node(a__ )\r\t\t\t\tif self.head is None:\r\t\t\t\t\t\t\tself.set_head(a__ )\r\t\t\t\telse:\r\t\t\t\t\t\t\tself.set_tail(a__ )\r\r\r\r\tdef \t\t\tA\t\t\t\t\t\t\t( self : List[Any] , lowercase : Node , lowercase : Node ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\t_snake_case = node\r\t\t\t\t_snake_case = node.previous\r\r\t\t\t\tif node.get_previous() is None:\r\t\t\t\t\t\t\t_snake_case = node_to_insert\r\t\t\t\telse:\r\t\t\t\t\t\t\t_snake_case = node_to_insert\r\r\t\t\t\t_snake_case = node_to_insert\r\r\r\r\tdef \t\t\tA\t\t\t\t\t\t\t( self : Optional[int] , lowercase : Node , lowercase : Node ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\t_snake_case = node\r\t\t\t\t_snake_case = node.next\r\r\t\t\t\tif node.get_next() is None:\r\t\t\t\t\t\t\t_snake_case = node_to_insert\r\t\t\t\telse:\r\t\t\t\t\t\t\t_snake_case = node_to_insert\r\r\t\t\t\t_snake_case = node_to_insert\r\r\r\r\tdef \t\t\tA\t\t\t\t\t\t\t( self : Dict , lowercase : int , lowercase : int ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\t_snake_case = 1\r\t\t\t\t_snake_case = Node(a__ )\r\t\t\t\t_snake_case = self.head\r\t\t\t\twhile node:\r\t\t\t\t\t\t\tif current_position == position:\r\t\t\t\t\t\t\t\t\t\tself.insert_before_node(a__ , a__ )\r\t\t\t\t\t\t\t\t\t\treturn\r\t\t\t\t\t\t\tcurrent_position += 1\r\t\t\t\t\t\t\t_snake_case = node.next\r\t\t\t\tself.insert_after_node(self.tail , a__ )\r\r\r\r\tdef \t\t\tA\t\t\t\t\t\t\t( self : Any , lowercase : int ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\t_snake_case = self.head\r\t\t\t\twhile node:\r\t\t\t\t\t\t\tif node.get_data() == item:\r\t\t\t\t\t\t\t\t\t\treturn node\r\t\t\t\t\t\t\t_snake_case = node.get_next()\r\t\t\t\traise Exception('Node not found' )\r\r\r\r\tdef \t\t\tA\t\t\t\t\t\t\t( self : Dict , lowercase : str ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\tif (node := self.get_node(a__ )) is not None:\r\t\t\t\t\t\t\tif node == self.head:\r\t\t\t\t\t\t\t\t\t\t_snake_case = self.head.get_next()\r\r\t\t\t\t\t\t\tif node == self.tail:\r\t\t\t\t\t\t\t\t\t\t_snake_case = self.tail.get_previous()\r\r\t\t\t\t\t\t\tself.remove_node_pointers(a__ )\r\r\r\r\t@staticmethod\r\tdef \t\t\tA\t\t\t\t\t\t\t( lowercase : Node ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\tif node.get_next():\r\t\t\t\t\t\t\t_snake_case = node.previous\r\r\t\t\t\tif node.get_previous():\r\t\t\t\t\t\t\t_snake_case = node.next\r\r\t\t\t\t_snake_case = None\r\t\t\t\t_snake_case = None\r\r\r\r\r\r\tdef \t\t\tA\t\t\t\t\t\t\t( self : Tuple ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\treturn self.head is None\r\r\r\r\r\r\rdef \ta_\t\t(\t)\t\t\t-> None:\r\t\t\tpass\r\r\rif __name__ == \"__main__\":\r\t\t\timport doctest\r\r\t\t\tdoctest.testmod()"},"code_codestyle":{"kind":"number","value":360,"string":"360"},"style_context":{"kind":"string","value":"\rfrom pathlib import Path\r\rimport cva\rimport numpy as np\rfrom matplotlib import pyplot as plt\r\r\r\r\r\r\rdef \ta_\t\t(\t__lowercase : np.ndarray\t, __lowercase : np.ndarray\t, __lowercase : np.ndarray\t, __lowercase : int\t, __lowercase : int\t\t\t)\t\t\t-> np.ndarray:\r\t\t\t_snake_case = cva.getAffineTransform(__lowercase\t, __lowercase\t\t\t)\r\t\t\treturn cva.warpAffine(__lowercase\t, __lowercase\t, (rows, cols)\t\t\t)\r\r\rif __name__ == \"__main__\":\r\t\t\t# read original image\r\t\t\t_lowerCamelCase\t: Optional[Any] = cva.imread(\r\t\t\t str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')\r\t\t\t)\r\t\t\t# turn image in gray scale value\r\t\t\t_lowerCamelCase\t: List[str] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)\r\t\t\t# get image shape\r\t\t\t_lowerCamelCase ,\t\t\t\t\t_lowerCamelCase\t: List[Any] = gray_img.shape\r\r\t\t\t# set different points to rotate image\r\t\t\t_lowerCamelCase\t: str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)\r\t\t\t_lowerCamelCase\t: Optional[Any] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)\r\t\t\t_lowerCamelCase\t: List[str] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)\r\t\t\t_lowerCamelCase\t: Dict = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)\r\r\t\t\t# add all rotated images in a list\r\t\t\t_lowerCamelCase\t: int = [\r\t\t\t gray_img,\r\t\t\t get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),\r\t\t\t get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),\r\t\t\t get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),\r\t\t\t]\r\r\t\t\t# plot different image rotations\r\t\t\t_lowerCamelCase\t: Any = plt.figure(1)\r\t\t\t_lowerCamelCase\t: List[Any] = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']\r\t\t\tfor i, image in enumerate(images):\r\t\t\t\t\t\tplt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')\r\t\t\t\t\t\tplt.title(titles[i])\r\t\t\t\t\t\tplt.axis('''off''')\r\t\t\t\t\t\tplt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5)\r\t\t\tplt.show()"},"style_context_codestyle":{"kind":"number","value":130,"string":"130"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":794,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\nimport unittest\r\n\r\nimport numpy as np\r\n\r\nfrom transformers.testing_utils import require_torch, require_vision\r\nfrom transformers.utils import is_torch_available, is_vision_available\r\n\r\nfrom ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs\r\n\r\n\r\nif is_torch_available():\r\n import torch\r\n\r\nif is_vision_available():\r\n from PIL import Image\r\n\r\n from transformers import LevitImageProcessor\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\ta_\t\t\t\t\t\t(\t\t\tunittest.TestCase\t\t\t\t):\r\n\r\n def __init__( self\t\t\t,\t\t_SCREAMING_SNAKE_CASE\t\t\t,\t\t_SCREAMING_SNAKE_CASE=7\t\t\t,\t\t_SCREAMING_SNAKE_CASE=3\t\t\t,\t\t_SCREAMING_SNAKE_CASE=18\t\t\t,\t\t_SCREAMING_SNAKE_CASE=30\t\t\t,\t\t_SCREAMING_SNAKE_CASE=400\t\t\t,\t\t_SCREAMING_SNAKE_CASE=True\t\t\t,\t\t_SCREAMING_SNAKE_CASE=None\t\t\t,\t\t_SCREAMING_SNAKE_CASE=True\t\t\t,\t\t_SCREAMING_SNAKE_CASE=None\t\t\t,\t\t_SCREAMING_SNAKE_CASE=True\t\t\t,\t\t_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5]\t\t\t,\t\t_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5]\t\t\t,\t\t) -> Tuple:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tsize if size is not None else {\"\"\"shortest_edge\"\"\": 18}\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tcrop_size if crop_size is not None else {\"\"\"height\"\"\": 18, \"\"\"width\"\"\": 18}\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tparent\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tbatch_size\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tnum_channels\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\timage_size\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tmin_resolution\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tmax_resolution\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tdo_resize\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tsize\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tdo_center_crop\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tcrop_size\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tdo_normalize\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\timage_mean\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\timage_std\r\n\r\n\r\n def A__\t\t\t\t\t\t( self ) -> Any:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n return {\r\n \"image_mean\": self.image_mean,\r\n \"image_std\": self.image_std,\r\n \"do_normalize\": self.do_normalize,\r\n \"do_resize\": self.do_resize,\r\n \"do_center_crop\": self.do_center_crop,\r\n \"size\": self.size,\r\n \"crop_size\": self.crop_size,\r\n }\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_torch\r\n@require_vision\r\nclass \t\t\t\ta_\t\t\t\t\t\t(\t\t\tlowerCamelCase\t, unittest.TestCase\t\t\t\t):\r\n lowercase \t\t= LevitImageProcessor if is_vision_available() else None\r\n\r\n def A__\t\t\t\t\t\t( self ) -> str:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tLevitImageProcessingTester(self )\r\n\r\n @property\r\n def A__\t\t\t\t\t\t( self ) -> Union[str, Any]:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n return self.image_processor_tester.prepare_image_processor_dict()\r\n\r\n def A__\t\t\t\t\t\t( self ) -> str:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tself.image_processing_class(**self.image_processor_dict )\r\n self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE\t\t\t,\t\t\"\"\"image_mean\"\"\" ) )\r\n self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE\t\t\t,\t\t\"\"\"image_std\"\"\" ) )\r\n self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE\t\t\t,\t\t\"\"\"do_normalize\"\"\" ) )\r\n self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE\t\t\t,\t\t\"\"\"do_resize\"\"\" ) )\r\n self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE\t\t\t,\t\t\"\"\"do_center_crop\"\"\" ) )\r\n self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE\t\t\t,\t\t\"\"\"size\"\"\" ) )\r\n\r\n def A__\t\t\t\t\t\t( self ) -> Tuple:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tself.image_processing_class.from_dict(self.image_processor_dict )\r\n self.assertEqual(image_processor.size\t\t\t,\t\t{\"\"\"shortest_edge\"\"\": 18} )\r\n self.assertEqual(image_processor.crop_size\t\t\t,\t\t{\"\"\"height\"\"\": 18, \"\"\"width\"\"\": 18} )\r\n\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tself.image_processing_class.from_dict(self.image_processor_dict\t\t\t,\t\tsize=42\t\t\t,\t\tcrop_size=84 )\r\n self.assertEqual(image_processor.size\t\t\t,\t\t{\"\"\"shortest_edge\"\"\": 42} )\r\n self.assertEqual(image_processor.crop_size\t\t\t,\t\t{\"\"\"height\"\"\": 84, \"\"\"width\"\"\": 84} )\r\n\r\n def A__\t\t\t\t\t\t( self ) -> Tuple:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n pass\r\n\r\n def A__\t\t\t\t\t\t( self ) -> List[str]:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tself.image_processing_class(**self.image_processor_dict )\r\n # create random PIL images\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tprepare_image_inputs(self.image_processor_tester\t\t\t,\t\tequal_resolution=_SCREAMING_SNAKE_CASE )\r\n for image in image_inputs:\r\n self.assertIsInstance(_SCREAMING_SNAKE_CASE\t\t\t,\t\tImage.Image )\r\n\r\n # Test not batched input\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\timage_processing(image_inputs[0]\t\t\t,\t\treturn_tensors=\"\"\"pt\"\"\" ).pixel_values\r\n self.assertEqual(\r\n encoded_images.shape\t\t\t,\t\t(\r\n 1,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.crop_size[\"\"\"height\"\"\"],\r\n self.image_processor_tester.crop_size[\"\"\"width\"\"\"],\r\n )\t\t\t,\t\t)\r\n\r\n # Test batched\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\timage_processing(_SCREAMING_SNAKE_CASE\t\t\t,\t\treturn_tensors=\"\"\"pt\"\"\" ).pixel_values\r\n self.assertEqual(\r\n encoded_images.shape\t\t\t,\t\t(\r\n self.image_processor_tester.batch_size,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.crop_size[\"\"\"height\"\"\"],\r\n self.image_processor_tester.crop_size[\"\"\"width\"\"\"],\r\n )\t\t\t,\t\t)\r\n\r\n def A__\t\t\t\t\t\t( self ) -> Optional[Any]:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tself.image_processing_class(**self.image_processor_dict )\r\n # create random numpy tensors\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tprepare_image_inputs(self.image_processor_tester\t\t\t,\t\tequal_resolution=_SCREAMING_SNAKE_CASE\t\t\t,\t\tnumpify=_SCREAMING_SNAKE_CASE )\r\n for image in image_inputs:\r\n self.assertIsInstance(_SCREAMING_SNAKE_CASE\t\t\t,\t\tnp.ndarray )\r\n\r\n # Test not batched input\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\timage_processing(image_inputs[0]\t\t\t,\t\treturn_tensors=\"\"\"pt\"\"\" ).pixel_values\r\n self.assertEqual(\r\n encoded_images.shape\t\t\t,\t\t(\r\n 1,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.crop_size[\"\"\"height\"\"\"],\r\n self.image_processor_tester.crop_size[\"\"\"width\"\"\"],\r\n )\t\t\t,\t\t)\r\n\r\n # Test batched\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\timage_processing(_SCREAMING_SNAKE_CASE\t\t\t,\t\treturn_tensors=\"\"\"pt\"\"\" ).pixel_values\r\n self.assertEqual(\r\n encoded_images.shape\t\t\t,\t\t(\r\n self.image_processor_tester.batch_size,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.crop_size[\"\"\"height\"\"\"],\r\n self.image_processor_tester.crop_size[\"\"\"width\"\"\"],\r\n )\t\t\t,\t\t)\r\n\r\n\r\n def A__\t\t\t\t\t\t( self ) -> Tuple:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tself.image_processing_class(**self.image_processor_dict )\r\n # create random PyTorch tensors\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tprepare_image_inputs(self.image_processor_tester\t\t\t,\t\tequal_resolution=_SCREAMING_SNAKE_CASE\t\t\t,\t\ttorchify=_SCREAMING_SNAKE_CASE )\r\n for image in image_inputs:\r\n self.assertIsInstance(_SCREAMING_SNAKE_CASE\t\t\t,\t\ttorch.Tensor )\r\n\r\n # Test not batched input\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\timage_processing(image_inputs[0]\t\t\t,\t\treturn_tensors=\"\"\"pt\"\"\" ).pixel_values\r\n self.assertEqual(\r\n encoded_images.shape\t\t\t,\t\t(\r\n 1,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.crop_size[\"\"\"height\"\"\"],\r\n self.image_processor_tester.crop_size[\"\"\"width\"\"\"],\r\n )\t\t\t,\t\t)\r\n\r\n # Test batched\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\timage_processing(_SCREAMING_SNAKE_CASE\t\t\t,\t\treturn_tensors=\"\"\"pt\"\"\" ).pixel_values\r\n self.assertEqual(\r\n encoded_images.shape\t\t\t,\t\t(\r\n self.image_processor_tester.batch_size,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.crop_size[\"\"\"height\"\"\"],\r\n self.image_processor_tester.crop_size[\"\"\"width\"\"\"],\r\n )\t\t\t,\t\t)\r\n"},"code_codestyle":{"kind":"number","value":321,"string":"321"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\nimport datasets\r\n\r\nfrom .evaluate import evaluate\r\n\r\n\r\nSCREAMING_SNAKE_CASE__ = '\\\\n@article{hendrycks2021cuad,\\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\\n journal={arXiv preprint arXiv:2103.06268},\\n year={2021}\\n}\\n'\r\n\r\nSCREAMING_SNAKE_CASE__ = '\\nThis metric wrap the official scoring script for version 1 of the Contract\\nUnderstanding Atticus Dataset (CUAD).\\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\\n'\r\n\r\nSCREAMING_SNAKE_CASE__ = '\\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\\nArgs:\\n predictions: List of question-answers dictionaries with the following key-values:\\n - \\'id\\': id of the question-answer pair as given in the references (see below)\\n - \\'prediction_text\\': list of possible texts for the answer, as a list of strings\\n depending on a threshold on the confidence probability of each prediction.\\n references: List of question-answers dictionaries with the following key-values:\\n - \\'id\\': id of the question-answer pair (see above),\\n - \\'answers\\': a Dict in the CUAD dataset format\\n {\\n \\'text\\': list of possible texts for the answer, as a list of strings\\n \\'answer_start\\': list of start positions for the answer, as a list of ints\\n }\\n Note that answer_start values are not taken into account to compute the metric.\\nReturns:\\n \\'exact_match\\': Exact match (the normalized answer exactly match the gold answer)\\n \\'f1\\': The F-score of predicted tokens versus the gold answer\\n \\'aupr\\': Area Under the Precision-Recall curve\\n \\'prec_at_80_recall\\': Precision at 80% recall\\n \\'prec_at_90_recall\\': Precision at 90% recall\\nExamples:\\n >>> predictions = [{\\'prediction_text\\': [\\'The seller:\\', \\'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\\'], \\'id\\': \\'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\\'}]\\n >>> references = [{\\'answers\\': {\\'answer_start\\': [143, 49], \\'text\\': [\\'The seller:\\', \\'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\\']}, \\'id\\': \\'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\\'}]\\n >>> cuad_metric = datasets.load_metric(\"cuad\")\\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\\n >>> print(results)\\n {\\'exact_match\\': 100.0, \\'f1\\': 100.0, \\'aupr\\': 0.0, \\'prec_at_80_recall\\': 1.0, \\'prec_at_90_recall\\': 1.0}\\n'\r\n\r\n\r\n\r\n\r\n\r\n\r\n@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION\t, _KWARGS_DESCRIPTION\t\t\t\t)\r\nclass \t\t\t\ta_\t\t\t\t\t\t(\t\t\tdatasets.Metric\t\t\t\t):\r\n\r\n def A__\t\t\t\t\t\t( self ) -> Tuple:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n return datasets.MetricInfo(\r\n description=_DESCRIPTION\t\t\t,\t\tcitation=_CITATION\t\t\t,\t\tinputs_description=_KWARGS_DESCRIPTION\t\t\t,\t\tfeatures=datasets.Features(\r\n {\r\n \"\"\"predictions\"\"\": {\r\n \"\"\"id\"\"\": datasets.Value(\"\"\"string\"\"\" ),\r\n \"\"\"prediction_text\"\"\": datasets.features.Sequence(datasets.Value(\"\"\"string\"\"\" ) ),\r\n },\r\n \"\"\"references\"\"\": {\r\n \"\"\"id\"\"\": datasets.Value(\"\"\"string\"\"\" ),\r\n \"\"\"answers\"\"\": datasets.features.Sequence(\r\n {\r\n \"\"\"text\"\"\": datasets.Value(\"\"\"string\"\"\" ),\r\n \"\"\"answer_start\"\"\": datasets.Value(\"\"\"int32\"\"\" ),\r\n } ),\r\n },\r\n } )\t\t\t,\t\tcodebase_urls=[\"\"\"https://www.atticusprojectai.org/cuad\"\"\"]\t\t\t,\t\treference_urls=[\"\"\"https://www.atticusprojectai.org/cuad\"\"\"]\t\t\t,\t\t)\r\n\r\n\r\n def A__\t\t\t\t\t\t( self\t\t\t,\t\t_SCREAMING_SNAKE_CASE\t\t\t,\t\t_SCREAMING_SNAKE_CASE ) -> Optional[int]:\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\t{prediction[\"\"\"id\"\"\"]: prediction[\"\"\"prediction_text\"\"\"] for prediction in predictions}\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\t[\r\n {\r\n \"\"\"paragraphs\"\"\": [\r\n {\r\n \"\"\"qas\"\"\": [\r\n {\r\n \"\"\"answers\"\"\": [{\"\"\"text\"\"\": answer_text} for answer_text in ref[\"\"\"answers\"\"\"][\"\"\"text\"\"\"]],\r\n \"\"\"id\"\"\": ref[\"\"\"id\"\"\"],\r\n }\r\n for ref in references\r\n ]\r\n }\r\n ]\r\n }\r\n ]\r\n UpperCamelCase\t\t\t\t =\t\t\t\t\tevaluate(dataset=_SCREAMING_SNAKE_CASE\t\t\t,\t\tpredictions=_SCREAMING_SNAKE_CASE )\r\n return score\r\n"},"style_context_codestyle":{"kind":"number","value":321,"string":"321"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":795,"cells":{"code":{"kind":"string","value":"\r\r\r\r\"\"\"simple docstring\"\"\"\r\r\r\rimport os\r\r\r\r\r\r\rdef lowerCamelCase () ->\t\t\t\t\t\t\tAny:\r lowercase\t\t\t\t\t\t:List[str] \t\t\t\t\t\t\t=\t\t\tos.path.join(os.path.dirname(a_)\t\t\t,\t\t\t'''num.txt''')\r with open(a_) as file_hand:\r return str(sum(int(a_) for line in file_hand))[:10]\r\r\rif __name__ == \"__main__\":\r print(solution())\r\r"},"code_codestyle":{"kind":"number","value":358,"string":"358"},"style_context":{"kind":"string","value":"\r\r\r\"\"\"simple docstring\"\"\"\r\r\r\rfrom ...configuration_utils import PretrainedConfig\rfrom ...utils import logging\r\r\rUpperCAmelCase \t\t\t\t\t\t= logging.get_logger(__name__)\r\rUpperCAmelCase \t\t\t\t\t\t= {\r '''caidas/swin2sr-classicalsr-x2-64''': (\r '''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''\r ),\r}\rclass __magic_name__\t\t\t\t\t\t( __UpperCAmelCase ):\r __A : Tuple \t\t\t\t\t=\t\t\t\t\"swin2sr\"\r\r __A : Dict \t\t\t\t\t=\t\t\t\t{\r \"hidden_size\": \"embed_dim\",\r \"num_attention_heads\": \"num_heads\",\r \"num_hidden_layers\": \"num_layers\",\r }\r\r\r\r\r\r\r def __init__(\t\t\t\tself\t\t\t\t\t: List[str]\t\t\t\t\t, snake_case__\t\t\t\t\t: List[str]=6_4\t\t\t\t\t, snake_case__\t\t\t\t\t: Union[str, Any]=1\t\t\t\t\t, snake_case__\t\t\t\t\t: Tuple=3\t\t\t\t\t, snake_case__\t\t\t\t\t: int=1_8_0\t\t\t\t\t, snake_case__\t\t\t\t\t: Union[str, Any]=[6, 6, 6, 6, 6, 6]\t\t\t\t\t, snake_case__\t\t\t\t\t: List[str]=[6, 6, 6, 6, 6, 6]\t\t\t\t\t, snake_case__\t\t\t\t\t: Tuple=8\t\t\t\t\t, snake_case__\t\t\t\t\t: List[Any]=2.0\t\t\t\t\t, snake_case__\t\t\t\t\t: Any=True\t\t\t\t\t, snake_case__\t\t\t\t\t: Dict=0.0\t\t\t\t\t, snake_case__\t\t\t\t\t: Dict=0.0\t\t\t\t\t, snake_case__\t\t\t\t\t: Dict=0.1\t\t\t\t\t, snake_case__\t\t\t\t\t: Dict=\"gelu\"\t\t\t\t\t, snake_case__\t\t\t\t\t: Optional[int]=False\t\t\t\t\t, snake_case__\t\t\t\t\t: Any=0.02\t\t\t\t\t, snake_case__\t\t\t\t\t: Any=1e-5\t\t\t\t\t, snake_case__\t\t\t\t\t: Optional[int]=2\t\t\t\t\t, snake_case__\t\t\t\t\t: Optional[int]=1.0\t\t\t\t\t, snake_case__\t\t\t\t\t: Optional[Any]=\"1conv\"\t\t\t\t\t, snake_case__\t\t\t\t\t: List[str]=\"pixelshuffle\"\t\t\t\t\t, **snake_case__\t\t\t\t\t: Tuple\t\t\t\t\t, ):\r\r '''simple docstring'''\r\r\r\r\r\r\r super().__init__(**snake_case__\t\t\t)\r\r lowercase\t\t\t\t\t\t:Dict \t\t\t\t\t\t\t=\t\t\timage_size\r lowercase\t\t\t\t\t\t:List[str] \t\t\t\t\t\t\t=\t\t\tpatch_size\r lowercase\t\t\t\t\t\t:Tuple \t\t\t\t\t\t\t=\t\t\tnum_channels\r lowercase\t\t\t\t\t\t:int \t\t\t\t\t\t\t=\t\t\tembed_dim\r lowercase\t\t\t\t\t\t:Any \t\t\t\t\t\t\t=\t\t\tdepths\r lowercase\t\t\t\t\t\t:Union[str, Any] \t\t\t\t\t\t\t=\t\t\tlen(snake_case__\t\t\t)\r lowercase\t\t\t\t\t\t:List[str] \t\t\t\t\t\t\t=\t\t\tnum_heads\r lowercase\t\t\t\t\t\t:int \t\t\t\t\t\t\t=\t\t\twindow_size\r lowercase\t\t\t\t\t\t:Tuple \t\t\t\t\t\t\t=\t\t\tmlp_ratio\r lowercase\t\t\t\t\t\t:List[Any] \t\t\t\t\t\t\t=\t\t\tqkv_bias\r lowercase\t\t\t\t\t\t:Optional[int] \t\t\t\t\t\t\t=\t\t\thidden_dropout_prob\r lowercase\t\t\t\t\t\t:Tuple \t\t\t\t\t\t\t=\t\t\tattention_probs_dropout_prob\r lowercase\t\t\t\t\t\t:Tuple \t\t\t\t\t\t\t=\t\t\tdrop_path_rate\r lowercase\t\t\t\t\t\t:Optional[Any] \t\t\t\t\t\t\t=\t\t\thidden_act\r lowercase\t\t\t\t\t\t:Union[str, Any] \t\t\t\t\t\t\t=\t\t\tuse_absolute_embeddings\r lowercase\t\t\t\t\t\t:Dict \t\t\t\t\t\t\t=\t\t\tlayer_norm_eps\r lowercase\t\t\t\t\t\t:Optional[Any] \t\t\t\t\t\t\t=\t\t\tinitializer_range\r lowercase\t\t\t\t\t\t:Optional[Any] \t\t\t\t\t\t\t=\t\t\tupscale\r lowercase\t\t\t\t\t\t:Any \t\t\t\t\t\t\t=\t\t\timg_range\r lowercase\t\t\t\t\t\t:Optional[int] \t\t\t\t\t\t\t=\t\t\tresi_connection\r lowercase\t\t\t\t\t\t:Union[str, Any] \t\t\t\t\t\t\t=\t\t\tupsampler\r\r"},"style_context_codestyle":{"kind":"number","value":172,"string":"172"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":796,"cells":{"code":{"kind":"string","value":"\r\nfrom dataclasses import dataclass, field\r\nfrom typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union\r\n\r\nimport pyarrow as pa\r\n\r\n\r\nif TYPE_CHECKING:\r\n from .features import FeatureType\r\n\r\n\r\n\r\n\r\n@dataclass\r\nclass UpperCAmelCase_\t\t\t:\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n a__ =\t\t42\r\n a__ =\t\tNone\r\n # Automatically constructed\r\n a__ =\t\t\"dict\"\r\n a__ =\t\tNone\r\n a__ =\t\tfield(default=\"\"\"Translation\"\"\" , init=_A , repr=_A )\r\n def __call__( self\t\t\t: Tuple )\t\t\t->\tOptional[int]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\t\t\t\t\t_lowercase\t\t\t\t\t\t\t( self\t\t\t: Union[str, Any] )\t\t\t->\tUnion[\"FeatureType\", Dict[str, \"FeatureType\"]]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n from .features import Value\r\n\r\n return {k: Value(\"\"\"string\"\"\" ) for k in sorted(self.languages )}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n@dataclass\r\nclass UpperCAmelCase_\t\t\t:\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n a__ =\t\tNone\r\n a__ =\t\tNone\r\n a__ =\t\tNone\r\n # Automatically constructed\r\n a__ =\t\t\"dict\"\r\n a__ =\t\tNone\r\n a__ =\t\tfield(default=\"\"\"TranslationVariableLanguages\"\"\" , init=_A , repr=_A )\r\n def \t\t\t\t\t\t\t_lowercase\t\t\t\t\t\t\t( self\t\t\t: Optional[Any] )\t\t\t->\tTuple:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n __magic_name__\t\t\t\t = sorted(set(self.languages ) ) if self.languages else None\r\n __magic_name__\t\t\t\t = len(self.languages ) if self.languages else None\r\n def __call__( self\t\t\t: Any )\t\t\t->\tOptional[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n return pa.struct({\"\"\"language\"\"\": pa.list_(pa.string() ), \"\"\"translation\"\"\": pa.list_(pa.string() )} )\r\n def \t\t\t\t\t\t\t_lowercase\t\t\t\t\t\t\t( self\t\t\t: str\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: List[Any] )\t\t\t->\tOptional[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n __magic_name__\t\t\t\t = set(self.languages )\r\n if self.languages and set(UpperCamelCase__ ) - lang_set:\r\n raise ValueError(\r\n F'''Some languages in example ({', '.join(sorted(set(UpperCamelCase__ ) - lang_set ) )}) are not in valid set ({', '.join(UpperCamelCase__ )}).''' )\r\n\r\n # Convert dictionary into tuples, splitting out cases where there are\r\n # multiple translations for a single language.\r\n __magic_name__\t\t\t\t = []\r\n for lang, text in translation_dict.items():\r\n if isinstance(UpperCamelCase__\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__ ):\r\n translation_tuples.append((lang, text) )\r\n else:\r\n translation_tuples.extend([(lang, el) for el in text] )\r\n\r\n # Ensure translations are in ascending order by language code.\r\n __magic_name__ , __magic_name__\t\t\t\t = zip(*sorted(UpperCamelCase__ ) )\r\n\r\n return {\"language\": languages, \"translation\": translations}\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\t\t\t\t\t_lowercase\t\t\t\t\t\t\t( self\t\t\t: Union[str, Any] )\t\t\t->\tUnion[\"FeatureType\", Dict[str, \"FeatureType\"]]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n from .features import Sequence, Value\r\n\r\n return {\r\n \"language\": Sequence(Value(\"\"\"string\"\"\" ) ),\r\n \"translation\": Sequence(Value(\"\"\"string\"\"\" ) ),\r\n }\r\n\r\n"},"code_codestyle":{"kind":"number","value":88,"string":"88"},"style_context":{"kind":"string","value":"\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...utils import logging\r\n\r\n\r\n__lowerCAmelCase\t\t\t: Union[str, Any] = logging.get_logger(__name__)\r\n\r\n__lowerCAmelCase\t\t\t: Tuple = {\r\n 'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',\r\n # See all PEGASUS models at https://huggingface.co/models?filter=pegasus\r\n}\r\n\r\n\r\n\r\n\r\nclass UpperCAmelCase_\t\t\t( _A ):\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n a__ =\t\t\"\"\"pegasus\"\"\"\r\n a__ =\t\t[\"\"\"past_key_values\"\"\"]\r\n a__ =\t\t{\"\"\"num_attention_heads\"\"\": \"\"\"encoder_attention_heads\"\"\", \"\"\"hidden_size\"\"\": \"\"\"d_model\"\"\"}\r\n def __init__( self\t\t\t: Optional[int]\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: Optional[int]=5_0265\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: Optional[int]=1024\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: Any=12\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: Union[str, Any]=4096\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: Any=16\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: Union[str, Any]=12\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: List[str]=4096\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: Tuple=16\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: Optional[int]=0.0\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: List[Any]=0.0\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: List[str]=True\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: List[Any]=True\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: List[Any]=\"gelu\"\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: List[Any]=1024\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: Optional[Any]=0.1\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: str=0.0\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: Any=0.0\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: Union[str, Any]=0.02\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: Any=0\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: int=False\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: Any=0\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: List[str]=1\t\t\t,\t\t\t\t\t\t\tUpperCamelCase__\t\t\t: Tuple=1\t\t\t,\t\t\t\t\t\t\t**UpperCamelCase__\t\t\t: Union[str, Any]\t\t\t,\t\t\t\t\t\t\t)\t\t\t->\tstr:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n __magic_name__\t\t\t\t = vocab_size\r\n __magic_name__\t\t\t\t = max_position_embeddings\r\n __magic_name__\t\t\t\t = d_model\r\n __magic_name__\t\t\t\t = encoder_ffn_dim\r\n __magic_name__\t\t\t\t = encoder_layers\r\n __magic_name__\t\t\t\t = encoder_attention_heads\r\n __magic_name__\t\t\t\t = decoder_ffn_dim\r\n __magic_name__\t\t\t\t = decoder_layers\r\n __magic_name__\t\t\t\t = decoder_attention_heads\r\n __magic_name__\t\t\t\t = dropout\r\n __magic_name__\t\t\t\t = attention_dropout\r\n __magic_name__\t\t\t\t = activation_dropout\r\n __magic_name__\t\t\t\t = activation_function\r\n __magic_name__\t\t\t\t = init_std\r\n __magic_name__\t\t\t\t = encoder_layerdrop\r\n __magic_name__\t\t\t\t = decoder_layerdrop\r\n __magic_name__\t\t\t\t = use_cache\r\n __magic_name__\t\t\t\t = encoder_layers\r\n __magic_name__\t\t\t\t = scale_embedding # scale factor will be sqrt(d_model) if True\r\n super().__init__(\r\n pad_token_id=UpperCamelCase__\t\t\t,\t\t\t\t\t\t\teos_token_id=UpperCamelCase__\t\t\t,\t\t\t\t\t\t\tis_encoder_decoder=UpperCamelCase__\t\t\t,\t\t\t\t\t\t\tdecoder_start_token_id=UpperCamelCase__\t\t\t,\t\t\t\t\t\t\tforced_eos_token_id=UpperCamelCase__\t\t\t,\t\t\t\t\t\t\t**UpperCamelCase__\t\t\t,\t\t\t\t\t\t\t)\r\n @property\r\n def \t\t\t\t\t\t\t_lowercase\t\t\t\t\t\t\t( self\t\t\t: List[Any] )\t\t\t->\tint:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n return self.encoder_attention_heads\r\n\r\n\r\n\r\n\r\n\r\n @property\r\n def \t\t\t\t\t\t\t_lowercase\t\t\t\t\t\t\t( self\t\t\t: Dict )\t\t\t->\tint:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n return self.d_model\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":88,"string":"88"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":797,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\nimport math\r\n\r\ndef _SCREAMING_SNAKE_CASE (\t\t\t\t\t\t\tlowercase :\tint\t\t\t\t\t):\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n lowerCamelCase_\t\t\t\t\t\t\t= []\r\n lowerCamelCase_\t\t\t\t\t\t\t= 2\r\n lowerCamelCase_\t\t\t\t\t\t\t= int(math.sqrt(lowercase\t\t\t\t\t)\t\t\t\t\t) # Size of every segment\r\n lowerCamelCase_\t\t\t\t\t\t\t= [True] * (end + 1)\r\n lowerCamelCase_\t\t\t\t\t\t\t= []\r\n\r\n while start <= end:\r\n if temp[start] is True:\r\n in_prime.append(lowercase\t\t\t\t\t)\r\n for i in range(start * start ,\t\t\t\t\t\t\tend + 1 ,\t\t\t\t\t\t\tlowercase\t\t\t\t\t):\r\n lowerCamelCase_\t\t\t\t\t\t\t= False\r\n start += 1\r\n prime += in_prime\r\n\r\n lowerCamelCase_\t\t\t\t\t\t\t= end + 1\r\n lowerCamelCase_\t\t\t\t\t\t\t= min(2 * end ,\t\t\t\t\t\t\tlowercase\t\t\t\t\t)\r\n\r\n while low <= n:\r\n lowerCamelCase_\t\t\t\t\t\t\t= [True] * (high - low + 1)\r\n for each in in_prime:\r\n lowerCamelCase_\t\t\t\t\t\t\t= math.floor(low / each\t\t\t\t\t) * each\r\n if t < low:\r\n t += each\r\n\r\n for j in range(lowercase ,\t\t\t\t\t\t\thigh + 1 ,\t\t\t\t\t\t\tlowercase\t\t\t\t\t):\r\n lowerCamelCase_\t\t\t\t\t\t\t= False\r\n\r\n for j in range(len(lowercase\t\t\t\t\t)\t\t\t\t\t):\r\n if temp[j] is True:\r\n prime.append(j + low\t\t\t\t\t)\r\n\r\n lowerCamelCase_\t\t\t\t\t\t\t= high + 1\r\n lowerCamelCase_\t\t\t\t\t\t\t= min(high + end ,\t\t\t\t\t\t\tlowercase\t\t\t\t\t)\r\n\r\n return prime\r\n\r\n\r\n\r\n\r\n\r\n\r\nprint(sieve(10**6))\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":208,"string":"208"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\nfrom ..utils import is_flax_available, is_torch_available\r\n\r\n\r\nif is_torch_available():\r\n from .autoencoder_kl import AutoencoderKL\r\n from .controlnet import ControlNetModel\r\n from .dual_transformer_ad import DualTransformeraDModel\r\n from .modeling_utils import ModelMixin\r\n from .prior_transformer import PriorTransformer\r\n from .ta_film_transformer import TaFilmDecoder\r\n from .transformer_ad import TransformeraDModel\r\n from .unet_ad import UNetaDModel\r\n from .unet_ad import UNetaDModel\r\n from .unet_ad_condition import UNetaDConditionModel\r\n from .unet_ad_condition import UNetaDConditionModel\r\n from .vq_model import VQModel\r\n\r\nif is_flax_available():\r\n from .controlnet_flax import FlaxControlNetModel\r\n from .unet_ad_condition_flax import FlaxUNetaDConditionModel\r\n from .vae_flax import FlaxAutoencoderKL\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":208,"string":"208"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":798,"cells":{"code":{"kind":"string","value":"import re\r\nfrom pathlib import Path\r\nfrom unittest import TestCase\r\n\r\nimport pytest\r\n\r\n\r\n\r\n\r\n\r\n@pytest.mark.integration\r\nclass __snake_case (\t\t\t_lowerCamelCase\t):\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tdef \t\t\t__a\t\t\t( self , __UpperCamelCase\t\t) -> List[str]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\twith open(__UpperCamelCase , encoding='utf-8'\t\t) as input_file:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t:\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t= re.compile(r'(?!.*\\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\\b)(?<=\\s)(open)\\((.*)\\)'\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t:\tList[Any]\t\t\t\t\t\t\t\t\t\t\t\t= input_file.read()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t:\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t= regexp.search(__UpperCamelCase\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\treturn match\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tdef \t\t\t__a\t\t\t( self , __UpperCamelCase\t\t) -> int:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\twith open(__UpperCamelCase , encoding='utf-8'\t\t) as input_file:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t:\tAny\t\t\t\t\t\t\t\t\t\t\t\t= re.compile(r'#[^\\r\\n]*print\\(|\\\"[^\\r\\n]*print\\(|\\\"\\\"\\\".*?print\\(.*?\\\"\\\"\\\"|(print\\()' , re.DOTALL\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t:\tAny\t\t\t\t\t\t\t\t\t\t\t\t= input_file.read()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t:\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t= regexp.finditer(__UpperCamelCase\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t:\tList[str]\t\t\t\t\t\t\t\t\t\t\t\t= [match for match in matches if match is not None and match.group(1\t\t) is not None]\r\n\t\t\t\t\t\t\t\t\treturn matches[0] if matches else None\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tdef \t\t\t__a\t\t\t( self\t\t) -> Optional[int]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t:\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t= Path('./datasets'\t\t)\r\n\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t:\tTuple\t\t\t\t\t\t\t\t\t\t\t\t= list(dataset_paths.absolute().glob('**/*.py'\t\t)\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\tfor dataset in dataset_files:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tif self._no_encoding_on_file_open(str(__UpperCamelCase\t\t)\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise AssertionError(F\"\"\"open(...) must use utf-8 encoding in {dataset}\"\"\"\t\t)\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tdef \t\t\t__a\t\t\t( self\t\t) -> Union[str, Any]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t:\tList[Any]\t\t\t\t\t\t\t\t\t\t\t\t= Path('./datasets'\t\t)\r\n\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t:\tAny\t\t\t\t\t\t\t\t\t\t\t\t= list(dataset_paths.absolute().glob('**/*.py'\t\t)\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\tfor dataset in dataset_files:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tif self._no_print_statements(str(__UpperCamelCase\t\t)\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise AssertionError(F\"\"\"print statement found in {dataset}. Use datasets.logger/logging instead.\"\"\"\t\t)\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":143,"string":"143"},"style_context":{"kind":"string","value":"import json\r\nimport os\r\nimport subprocess\r\nimport unittest\r\nfrom ast import literal_eval\r\n\r\nimport pytest\r\nfrom parameterized import parameterized, parameterized_class\r\n\r\nfrom . import is_sagemaker_available\r\n\r\n\r\nif is_sagemaker_available():\r\n\t\t\t\tfrom sagemaker import Session, TrainingJobAnalytics\r\n\t\t\t\tfrom sagemaker.huggingface import HuggingFace\r\n\r\n\r\n\r\n\r\n\r\n@pytest.mark.skipif(\r\n literal_eval(os.getenv(\"\"\"TEST_SAGEMAKER\"\"\" ,\"\"\"False\"\"\"\t)\t) is not True ,reason=\"\"\"Skipping test because should only be run when releasing minor transformers version\"\"\" ,)\r\n@pytest.mark.usefixtures(\"\"\"sm_env\"\"\"\t)\r\n@parameterized_class(\r\n [\r\n {\r\n \"\"\"framework\"\"\": \"\"\"pytorch\"\"\",\r\n \"\"\"script\"\"\": \"\"\"run_glue_model_parallelism.py\"\"\",\r\n \"\"\"model_name_or_path\"\"\": \"\"\"roberta-large\"\"\",\r\n \"\"\"instance_type\"\"\": \"\"\"ml.p3dn.24xlarge\"\"\",\r\n \"\"\"results\"\"\": {\"\"\"train_runtime\"\"\": 1600, \"\"\"eval_accuracy\"\"\": 0.3, \"\"\"eval_loss\"\"\": 1.2},\r\n },\r\n {\r\n \"\"\"framework\"\"\": \"\"\"pytorch\"\"\",\r\n \"\"\"script\"\"\": \"\"\"run_glue.py\"\"\",\r\n \"\"\"model_name_or_path\"\"\": \"\"\"roberta-large\"\"\",\r\n \"\"\"instance_type\"\"\": \"\"\"ml.p3dn.24xlarge\"\"\",\r\n \"\"\"results\"\"\": {\"\"\"train_runtime\"\"\": 1600, \"\"\"eval_accuracy\"\"\": 0.3, \"\"\"eval_loss\"\"\": 1.2},\r\n },\r\n ]\t)\r\nclass __snake_case (\t\t\tunittest.TestCase\t):\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tdef \t\t\t__a\t\t\t( self\t\t) -> Tuple:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\tif self.framework == \"pytorch\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tsubprocess.run(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t F\"\"\"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py\"\"\".split() , encoding='utf-8' , check=__UpperCamelCase , )\r\n\t\t\t\t\t\t\t\t\tassert hasattr(self , 'env'\t\t)\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tdef \t\t\t__a\t\t\t( self , __UpperCamelCase\t\t) -> Optional[int]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t:\tTuple\t\t\t\t\t\t\t\t\t\t\t\t= {\r\n\t\t\t\t\t\t\t\t\t 'enabled': True,\r\n\t\t\t\t\t\t\t\t\t 'processes_per_host': 8,\r\n\t\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t:\tAny\t\t\t\t\t\t\t\t\t\t\t\t= {\r\n\t\t\t\t\t\t\t\t\t 'enabled': True,\r\n\t\t\t\t\t\t\t\t\t 'parameters': {\r\n\t\t\t\t\t\t\t\t\t 'microbatches': 4,\r\n\t\t\t\t\t\t\t\t\t 'placement_strategy': 'spread',\r\n\t\t\t\t\t\t\t\t\t 'pipeline': 'interleaved',\r\n\t\t\t\t\t\t\t\t\t 'optimize': 'speed',\r\n\t\t\t\t\t\t\t\t\t 'partitions': 4,\r\n\t\t\t\t\t\t\t\t\t 'ddp': True,\r\n\t\t\t\t\t\t\t\t\t },\r\n\t\t\t\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t:\tOptional[int]\t\t\t\t\t\t\t\t\t\t\t\t= {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}\r\n\r\n\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t:\tint\t\t\t\t\t\t\t\t\t\t\t\t= 'trainer' if self.script == 'run_glue.py' else 'smtrainer'\r\n\t\t\t\t\t\t\t\t\t# creates estimator\r\n\t\t\t\t\t\t\t\t\treturn HuggingFace(\r\n\t\t\t\t\t\t\t\t\t entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F\"\"\"{self.env.base_job_name}-{instance_count}-smp-{name_extension}\"\"\" , instance_count=__UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCamelCase , hyperparameters={\r\n\t\t\t\t\t\t\t\t\t **self.env.hyperparameters,\r\n\t\t\t\t\t\t\t\t\t 'model_name_or_path': self.model_name_or_path,\r\n\t\t\t\t\t\t\t\t\t 'max_steps': 500,\r\n\t\t\t\t\t\t\t\t\t } , metric_definitions=self.env.metric_definitions , distribution=__UpperCamelCase , py_version='py36' , )\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tdef \t\t\t__a\t\t\t( self , __UpperCamelCase\t\t) -> List[Any]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\tTrainingJobAnalytics(__UpperCamelCase\t\t).export_csv(F\"\"\"{self.env.test_path}/{job_name}_metrics.csv\"\"\"\t\t)\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t@parameterized.expand([(1,)]\t\t)\r\n\t\t\t\t\tdef \t\t\t__a\t\t\t( self , __UpperCamelCase\t\t) -> List[Any]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t:\tstr\t\t\t\t\t\t\t\t\t\t\t\t= self.create_estimator(__UpperCamelCase\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# run training\r\n\t\t\t\t\t\t\t\t\testimator.fit()\r\n\r\n\t\t\t\t\t\t\t\t\t# result dataframe\r\n\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t:\tDict\t\t\t\t\t\t\t\t\t\t\t\t= TrainingJobAnalytics(estimator.latest_training_job.name\t\t).dataframe()\r\n\r\n\t\t\t\t\t\t\t\t\t# extract kpis\r\n\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t:\tAny\t\t\t\t\t\t\t\t\t\t\t\t= list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value']\t\t)\r\n\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t:\tList[str]\t\t\t\t\t\t\t\t\t\t\t\t= list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value']\t\t)\r\n\t\t\t\t\t\t\t\t\t# get train time from SageMaker job, this includes starting, preprocessing, stopping\r\n\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t:\tAny\t\t\t\t\t\t\t\t\t\t\t\t= (\r\n\t\t\t\t\t\t\t\t\t Session().describe_training_job(estimator.latest_training_job.name\t\t).get('TrainingTimeInSeconds' , 999999\t\t)\r\n\t\t\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# assert kpis\r\n\t\t\t\t\t\t\t\t\tassert train_runtime <= self.results[\"train_runtime\"]\r\n\t\t\t\t\t\t\t\t\tassert all(t >= self.results['eval_accuracy'] for t in eval_accuracy\t\t)\r\n\t\t\t\t\t\t\t\t\tassert all(t <= self.results['eval_loss'] for t in eval_loss\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# dump tests result into json file to share in PR\r\n\t\t\t\t\t\t\t\t\twith open(F\"\"\"{estimator.latest_training_job.name}.json\"\"\" , 'w'\t\t) as outfile:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tjson.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __UpperCamelCase\t\t)\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":143,"string":"143"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":799,"cells":{"code":{"kind":"string","value":"\n\n\n\nfrom typing import TYPE_CHECKING\n\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available\n\n\n__snake_case\t\t\t\t\t\t:Optional[Any]\t\t\t\t\t\t=\t\t\t\t\t{\n '''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],\n}\n\ntry:\n if not is_torch_available():\n raise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n pass\nelse:\n __snake_case\t\t\t\t\t\t:Union[str, Any]\t\t\t\t\t\t=\t\t\t\t\t[\n '''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',\n '''ErnieForCausalLM''',\n '''ErnieForMaskedLM''',\n '''ErnieForMultipleChoice''',\n '''ErnieForNextSentencePrediction''',\n '''ErnieForPreTraining''',\n '''ErnieForQuestionAnswering''',\n '''ErnieForSequenceClassification''',\n '''ErnieForTokenClassification''',\n '''ErnieModel''',\n '''ErniePreTrainedModel''',\n ]\n\nif TYPE_CHECKING:\n from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig\n\n try:\n if not is_torch_available():\n raise OptionalDependencyNotAvailable()\n except OptionalDependencyNotAvailable:\n pass\n else:\n from .modeling_ernie import (\n ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,\n ErnieForCausalLM,\n ErnieForMaskedLM,\n ErnieForMultipleChoice,\n ErnieForNextSentencePrediction,\n ErnieForPreTraining,\n ErnieForQuestionAnswering,\n ErnieForSequenceClassification,\n ErnieForTokenClassification,\n ErnieModel,\n ErniePreTrainedModel,\n )\n\nelse:\n import sys\n\n __snake_case\t\t\t\t\t\t:List[Any]\t\t\t\t\t\t=\t\t\t\t\t_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)\n\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":131,"string":"131"},"style_context":{"kind":"string","value":"\n\n\n\nfrom typing import TYPE_CHECKING\n\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available\n\n\n__snake_case\t\t\t\t\t\t:Tuple\t\t\t\t\t\t=\t\t\t\t\t{\n '''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],\n}\ntry:\n if not is_tokenizers_available():\n raise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n pass\nelse:\n __snake_case\t\t\t\t\t\t:List[Any]\t\t\t\t\t\t=\t\t\t\t\t['''BloomTokenizerFast''']\n\ntry:\n if not is_torch_available():\n raise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n pass\nelse:\n __snake_case\t\t\t\t\t\t:Any\t\t\t\t\t\t=\t\t\t\t\t[\n '''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',\n '''BloomForCausalLM''',\n '''BloomModel''',\n '''BloomPreTrainedModel''',\n '''BloomForSequenceClassification''',\n '''BloomForTokenClassification''',\n '''BloomForQuestionAnswering''',\n ]\n\nif TYPE_CHECKING:\n from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig\n\n try:\n if not is_tokenizers_available():\n raise OptionalDependencyNotAvailable()\n except OptionalDependencyNotAvailable:\n pass\n else:\n from .tokenization_bloom_fast import BloomTokenizerFast\n\n try:\n if not is_torch_available():\n raise OptionalDependencyNotAvailable()\n except OptionalDependencyNotAvailable:\n pass\n else:\n from .modeling_bloom import (\n BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,\n BloomForCausalLM,\n BloomForQuestionAnswering,\n BloomForSequenceClassification,\n BloomForTokenClassification,\n BloomModel,\n BloomPreTrainedModel,\n )\n\nelse:\n import sys\n\n __snake_case\t\t\t\t\t\t:Any\t\t\t\t\t\t=\t\t\t\t\t_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)\n\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":131,"string":"131"},"label":{"kind":"number","value":1,"string":"1"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":7,"numItemsPerPage":100,"numTotalItems":153992,"offset":700,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NTczODg3MSwic3ViIjoiL2RhdGFzZXRzL2luZmluaXR5b2ZzcGFjZS9weXRob25fY29kZXN0eWxlcy1taXhlZDEtNTAwIiwiZXhwIjoxNzU1NzQyNDcxLCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0.4BFs6Z0ZU4rhEdM07OjBZYmhklTbK5hb_ZwVbx3JPK_lCN9oyjeylsNKp59OG_PHO72-VDTWnj4IWmCLVjpqCA","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput __a = 8 def a ( snake_case__: Optional[int] , snake_case__: Union[str, Any]=BITS ): '''simple docstring''' lowercase_ = x.device lowercase_ = (x * 255).int().clamp(0 , 255 ) lowercase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__lowerCAmelCase ) lowercase_ = rearrange(__lowerCAmelCase , '''d -> d 1 1''' ) lowercase_ = rearrange(__lowerCAmelCase , '''b c h w -> b c 1 h w''' ) lowercase_ = ((x & mask) != 0).float() lowercase_ = rearrange(__lowerCAmelCase , '''b c d h w -> b (c d) h w''' ) lowercase_ = bits * 2 - 1 return bits def a ( snake_case__: int , snake_case__: Optional[Any]=BITS ): '''simple docstring''' lowercase_ = x.device lowercase_ = (x > 0).int() lowercase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__lowerCAmelCase , dtype=torch.intaa ) lowercase_ = rearrange(__lowerCAmelCase , '''d -> d 1 1''' ) lowercase_ = rearrange(__lowerCAmelCase , '''b (c d) h w -> b c d h w''' , d=8 ) lowercase_ = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' ) return (dec / 255).clamp(0.0 , 1.0 ) def a ( self: str , snake_case__: torch.FloatTensor , snake_case__: int , snake_case__: torch.FloatTensor , snake_case__: float = 0.0 , snake_case__: bool = True , snake_case__: Any=None , snake_case__: bool = True , ): '''simple docstring''' if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) lowercase_ = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas lowercase_ = self.alphas_cumprod[timestep] lowercase_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod lowercase_ = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowercase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" lowercase_ = self.bit_scale if self.config.clip_sample: lowercase_ = torch.clamp(__lowerCAmelCase , -scale , __lowerCAmelCase ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) lowercase_ = self._get_variance(__lowerCAmelCase , __lowerCAmelCase ) lowercase_ = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide lowercase_ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowercase_ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowercase_ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 lowercase_ = model_output.device if torch.is_tensor(__lowerCAmelCase ) else '''cpu''' lowercase_ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__lowerCAmelCase ).to(__lowerCAmelCase ) lowercase_ = self._get_variance(__lowerCAmelCase , __lowerCAmelCase ) ** 0.5 * eta * noise lowercase_ = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase ) def a ( self: int , snake_case__: torch.FloatTensor , snake_case__: int , snake_case__: torch.FloatTensor , snake_case__: Dict="epsilon" , snake_case__: Dict=None , snake_case__: bool = True , ): '''simple docstring''' lowercase_ = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: lowercase_ , lowercase_ = torch.split(__lowerCAmelCase , sample.shape[1] , dim=1 ) else: lowercase_ = None # 1. compute alphas, betas lowercase_ = self.alphas_cumprod[t] lowercase_ = self.alphas_cumprod[t - 1] if t > 0 else self.one lowercase_ = 1 - alpha_prod_t lowercase_ = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": lowercase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": lowercase_ = model_output else: raise ValueError(F'''Unsupported prediction_type {prediction_type}.''' ) # 3. Clip "predicted x_0" lowercase_ = self.bit_scale if self.config.clip_sample: lowercase_ = torch.clamp(__lowerCAmelCase , -scale , __lowerCAmelCase ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowercase_ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t lowercase_ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowercase_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise lowercase_ = 0 if t > 0: lowercase_ = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__lowerCAmelCase ).to(model_output.device ) lowercase_ = (self._get_variance(__lowerCAmelCase , predicted_variance=__lowerCAmelCase ) ** 0.5) * noise lowercase_ = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase ) class lowercase__( lowerCamelCase_ ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : UNetaDConditionModel , SCREAMING_SNAKE_CASE_ : Union[DDIMScheduler, DDPMScheduler] , SCREAMING_SNAKE_CASE_ : Optional[float] = 1.0 , ) -> Union[str, Any]: super().__init__() lowercase_ = bit_scale lowercase_ = ( ddim_bit_scheduler_step if isinstance(__snake_case , __snake_case ) else ddpm_bit_scheduler_step ) self.register_modules(unet=__snake_case , scheduler=__snake_case ) @torch.no_grad() def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] = 2_5_6 , SCREAMING_SNAKE_CASE_ : Optional[int] = 2_5_6 , SCREAMING_SNAKE_CASE_ : Optional[int] = 5_0 , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = 1 , SCREAMING_SNAKE_CASE_ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : int , ) -> Union[Tuple, ImagePipelineOutput]: lowercase_ = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=__snake_case , ) lowercase_ = decimal_to_bits(__snake_case ) * self.bit_scale lowercase_ = latents.to(self.device ) self.scheduler.set_timesteps(__snake_case ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual lowercase_ = self.unet(__snake_case , __snake_case ).sample # compute the previous noisy sample x_t -> x_t-1 lowercase_ = self.scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample lowercase_ = bits_to_decimal(__snake_case ) if output_type == "pil": lowercase_ = self.numpy_to_pil(__snake_case ) if not return_dict: return (image,) return ImagePipelineOutput(images=__snake_case )
30
def __lowercase ( __lowerCAmelCase : list[int] ): a__ = [] if len(__lowerCAmelCase ) == 1: return [nums.copy()] for _ in range(len(__lowerCAmelCase ) ): a__ = nums.pop(0 ) a__ = permute(__lowerCAmelCase ) for perm in permutations: perm.append(__lowerCAmelCase ) result.extend(__lowerCAmelCase ) nums.append(__lowerCAmelCase ) return result def __lowercase ( __lowerCAmelCase : Optional[int] ): def backtrack(__lowerCAmelCase : Any ): if start == len(__lowerCAmelCase ) - 1: output.append(nums[:] ) else: for i in range(__lowerCAmelCase , len(__lowerCAmelCase ) ): a__ , a__ = nums[i], nums[start] backtrack(start + 1 ) a__ , a__ = nums[i], nums[start] # backtrack a__ = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function snake_case : Optional[Any] = permutea([1, 2, 3]) print(res) doctest.testmod()
240
0
'''simple docstring''' from __future__ import annotations from cmath import sqrt def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> tuple[complex, complex]: if a == 0: raise ValueError('''Coefficient \'a\' must not be zero.''' ) A: int = b * b - 4 * a * c A: Optional[int] = (-b + sqrt(_UpperCAmelCase )) / (2 * a) A: List[Any] = (-b - sqrt(_UpperCAmelCase )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def SCREAMING_SNAKE_CASE( ) -> Optional[int]: A: Union[str, Any] = quadratic_roots(a=5 , b=6 , c=1 ) print(F"""The solutions are: {solutiona} and {solutiona}""" ) if __name__ == "__main__": main()
350
'''simple docstring''' import requests UpperCamelCase = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=''' def SCREAMING_SNAKE_CASE( __lowercase ) -> None: # fetching a list of articles in json format A: Tuple = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page['''articles'''] , 1 ): print(F"""{i}.) {article['title']}""" ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
334
0
"""simple docstring""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) UpperCAmelCase__ = _symbol_database.Default() UpperCAmelCase__ = _descriptor_pool.Default().AddSerializedFile( B'\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' ) UpperCAmelCase__ = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS is False: UpperCAmelCase__ = None UpperCAmelCase__ = B'H\003' # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" UpperCAmelCase__ = 45 UpperCAmelCase__ = 1581 UpperCAmelCase__ = 1517 UpperCAmelCase__ = 1570 UpperCAmelCase__ = 1584 UpperCAmelCase__ = 1793 UpperCAmelCase__ = 1795 UpperCAmelCase__ = 1916 UpperCAmelCase__ = 1864 UpperCAmelCase__ = 1905 UpperCAmelCase__ = 1919 UpperCAmelCase__ = 2429 UpperCAmelCase__ = 2208 UpperCAmelCase__ = 2418 UpperCAmelCase__ = 2323 UpperCAmelCase__ = 2407 # @@protoc_insertion_point(module_scope)
288
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __snake_case = logging.get_logger(__name__) __snake_case = TypeVar("""DatasetType""", Dataset, IterableDataset) def _lowercase ( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "first_exhausted" , ) -> DatasetType: '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(UpperCamelCase_ ): if not isinstance(UpperCamelCase_ , (Dataset, IterableDataset) ): if isinstance(UpperCamelCase_ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( F'Dataset at position {i} has at least one split: {list(UpperCamelCase_ )}\n' F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCamelCase_ ) )}\']' ) raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase_ ).__name__}.' ) if i == 0: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = ( (Dataset, IterableDataset) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError( F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' ) if dataset_type is Dataset: return _interleave_map_style_datasets( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , stopping_strategy=UpperCamelCase_ ) else: return _interleave_iterable_datasets( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , stopping_strategy=UpperCamelCase_ ) def _lowercase ( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 0 , ) -> DatasetType: '''simple docstring''' if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(UpperCamelCase_ ): if not isinstance(UpperCamelCase_ , (Dataset, IterableDataset) ): if isinstance(UpperCamelCase_ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( F'Dataset at position {i} has at least one split: {list(UpperCamelCase_ )}\n' F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCamelCase_ ) )}\']' ) raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase_ ).__name__}.' ) if i == 0: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = ( (Dataset, IterableDataset) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError( F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , axis=UpperCamelCase_ ) else: return _concatenate_iterable_datasets(UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , axis=UpperCamelCase_ )
176
0
'''simple docstring''' _lowercase : Dict = { "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } _lowercase : int = {value: key for key, value in encode_dict.items()} def lowerCamelCase ( UpperCAmelCase__ : str ) -> str: lowercase_ : int = """""" for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception("""encode() accepts only letters of the alphabet and spaces""" ) return encoded def lowerCamelCase ( UpperCAmelCase__ : str ) -> str: if set(UpperCAmelCase__ ) - {"A", "B", " "} != set(): raise Exception("""decode() accepts only 'A', 'B' and spaces""" ) lowercase_ : str = """""" for word in coded.split(): while len(UpperCAmelCase__ ) != 0: decoded += decode_dict[word[:5]] lowercase_ : List[Any] = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
367
'''simple docstring''' from __future__ import annotations from typing import Any def lowerCamelCase ( UpperCAmelCase__ : list ) -> int: if not postfix_notation: return 0 lowercase_ : Any = {"""+""", """-""", """*""", """/"""} lowercase_ : list[Any] = [] for token in postfix_notation: if token in operations: lowercase_ , lowercase_ : Dict = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(UpperCAmelCase__ ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
21
0
import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a__ = logging.get_logger(__name__) a__ = '▁' a__ = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'} a__ = { 'sentencepiece_model_file': 'sentencepiece.bpe.model', 'vocab_file': 'vocab.txt', } a__ = { 'vocab_file': { 'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt', 'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt', }, 'sentencepiece_model_file': { 'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model', 'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model', }, } a__ = { 'ernie-m-base': 5_14, 'ernie-m-large': 5_14, } a__ = { 'ernie-m-base': {'do_lower_case': False}, 'ernie-m-large': {'do_lower_case': False}, } class snake_case ( UpperCamelCase__ ): '''simple docstring''' snake_case_ : Optional[Any] = ["""input_ids"""] snake_case_ : Dict = VOCAB_FILES_NAMES snake_case_ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION snake_case_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ : Dict = PRETRAINED_VOCAB_FILES_MAP snake_case_ : int = RESOURCE_FILES_NAMES def __init__( self : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict=None , lowerCAmelCase : Any=False , lowerCAmelCase : List[Any]="utf8" , lowerCAmelCase : List[str]="[UNK]" , lowerCAmelCase : Optional[Any]="[SEP]" , lowerCAmelCase : str="[PAD]" , lowerCAmelCase : Any="[CLS]" , lowerCAmelCase : Tuple="[MASK]" , lowerCAmelCase : Tuple = None , **lowerCAmelCase : Optional[Any] , ) -> None: """simple docstring""" _snake_case : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , vocab_file=lowerCAmelCase , encoding=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , ) _snake_case : List[Any] = do_lower_case _snake_case : Optional[int] = sentencepiece_model_ckpt _snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(lowerCAmelCase) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: _snake_case : Tuple = self.load_vocab(filepath=lowerCAmelCase) else: _snake_case : List[Any] = {self.sp_model.id_to_piece(lowerCAmelCase): id for id in range(self.sp_model.get_piece_size())} _snake_case : Optional[int] = {v: k for k, v in self.vocab.items()} def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : Union[str, Any]) -> Dict: """simple docstring""" if text is None: return None _snake_case : Dict = self.tokenize(lowerCAmelCase) _snake_case : Dict = """""", [] for i, ch in enumerate(lowerCAmelCase): if ch in self.SP_CHAR_MAPPING: _snake_case : List[str] = self.SP_CHAR_MAPPING.get(lowerCAmelCase) else: _snake_case : Tuple = unicodedata.normalize("""NFKC""" , lowerCAmelCase) if self.is_whitespace(lowerCAmelCase): continue normalized_text += ch char_mapping.extend([i] * len(lowerCAmelCase)) _snake_case : List[str] = normalized_text, [], 0 if self.do_lower_case: _snake_case : Dict = text.lower() for token in split_tokens: if token[:1] == "▁": _snake_case : Dict = token[1:] _snake_case : str = text[offset:].index(lowerCAmelCase) + offset _snake_case : Tuple = start + len(lowerCAmelCase) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1)) _snake_case : str = end return token_mapping @property def UpperCamelCase_ ( self : Optional[Any]) -> Tuple: """simple docstring""" return len(self.vocab) def UpperCamelCase_ ( self : List[str]) -> Any: """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder) def __getstate__( self : List[str]) -> Union[str, Any]: """simple docstring""" _snake_case : Tuple = self.__dict__.copy() _snake_case : Optional[int] = None return state def __setstate__( self : str , lowerCAmelCase : str) -> Union[str, Any]: """simple docstring""" _snake_case : Any = d # for backward compatibility if not hasattr(self , """sp_model_kwargs"""): _snake_case : Dict = {} _snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.sentencepiece_model_ckpt) def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : List[Any]) -> List[str]: """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(lowerCAmelCase , lowerCAmelCase) for c in text)) def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=False , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Any=0.1) -> List[str]: """simple docstring""" if self.sp_model_kwargs.get("""enable_sampling""") is True: _snake_case : Optional[Any] = True if self.sp_model_kwargs.get("""alpha""") is not None: _snake_case : Tuple = self.sp_model_kwargs.get("""alpha""") if self.sp_model_kwargs.get("""nbest_size""") is not None: _snake_case : Optional[Any] = self.sp_model_kwargs.get("""nbest_size""") if not enable_sampling: _snake_case : Any = self.sp_model.EncodeAsPieces(lowerCAmelCase) else: _snake_case : str = self.sp_model.SampleEncodeAsPieces(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) _snake_case : Dict = [] for pi, piece in enumerate(lowerCAmelCase): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(lowerCAmelCase) and pi != 0: new_pieces.append(lowerCAmelCase) continue else: continue _snake_case : Any = 0 for i, chunk in enumerate(lowerCAmelCase): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(lowerCAmelCase) or self.is_punct(lowerCAmelCase): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i]) new_pieces.append(lowerCAmelCase) _snake_case : int = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i]) _snake_case : str = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i]) _snake_case : Tuple = i if len(lowerCAmelCase) > lst_i: new_pieces.append(piece[lst_i:]) return new_pieces def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : List[str]) -> Any: """simple docstring""" _snake_case : Union[str, Any] = """""".join(lowerCAmelCase).replace(lowerCAmelCase , """ """).strip() return out_string def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : str) -> Optional[Any]: """simple docstring""" _snake_case : Optional[Any] = self.convert_ids_to_tokens(lowerCAmelCase) _snake_case : Tuple = """""".join(lowerCAmelCase).replace(lowerCAmelCase , """ """).strip() return out_string def UpperCamelCase_ ( self : Dict , lowerCAmelCase : List[Any]) -> Dict: """simple docstring""" return self.vocab.get(lowerCAmelCase , self.vocab.get(self.unk_token)) def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : int) -> Tuple: """simple docstring""" return self.reverse_vocab.get(lowerCAmelCase , self.unk_token) def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any]=None) -> Dict: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _snake_case : List[str] = [self.cls_token_id] _snake_case : str = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any]=None) -> Tuple: """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def UpperCamelCase_ ( self : Any , lowerCAmelCase : Dict , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=False) -> int: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""") return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(lowerCAmelCase)) + [1, 1] + ([0] * len(lowerCAmelCase)) + [1] return [1] + ([0] * len(lowerCAmelCase)) + [1] def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : int = None) -> List[int]: """simple docstring""" if token_ids_a is None: # [CLS] X [SEP] return (len(lowerCAmelCase) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(lowerCAmelCase) + 1) + [1] * (len(lowerCAmelCase) + 3) def UpperCamelCase_ ( self : Any , lowerCAmelCase : Dict) -> Union[str, Any]: """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def UpperCamelCase_ ( self : str , lowerCAmelCase : Optional[int]) -> str: """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : str) -> List[Any]: """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def UpperCamelCase_ ( self : str , lowerCAmelCase : Union[str, Any]) -> Optional[int]: """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(lowerCAmelCase) == 1: _snake_case : Dict = unicodedata.category(lowerCAmelCase) if cat == "Zs": return True return False def UpperCamelCase_ ( self : int , lowerCAmelCase : Tuple) -> List[str]: """simple docstring""" _snake_case : Union[str, Any] = {} with io.open(lowerCAmelCase , """r""" , encoding="""utf-8""") as f: for index, line in enumerate(lowerCAmelCase): _snake_case : int = line.rstrip("""\n""") _snake_case : Tuple = int(lowerCAmelCase) return token_to_idx def UpperCamelCase_ ( self : Any , lowerCAmelCase : int , lowerCAmelCase : Optional[int] = None) -> Tuple[str]: """simple docstring""" _snake_case : Union[str, Any] = 0 if os.path.isdir(lowerCAmelCase): _snake_case : Optional[Any] = os.path.join( lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) else: _snake_case : Optional[int] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory with open(lowerCAmelCase , """w""" , encoding="""utf-8""") as writer: for token, token_index in sorted(self.vocab.items() , key=lambda lowerCAmelCase: kv[1]): if index != token_index: logger.warning( F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' """ Please check that the vocabulary is not corrupted!""") _snake_case : Any = token_index writer.write(token + """\n""") index += 1 _snake_case : int = os.path.join(lowerCAmelCase , """sentencepiece.bpe.model""") with open(lowerCAmelCase , """wb""") as fi: _snake_case : List[str] = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase) return (vocab_file,)
317
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class __lowercase (unittest.TestCase ): """simple docstring""" def __init__( self , A , A=7 , A=3 , A=3_0 , A=4_0_0 , A=True , A=None , A=0.9 , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , ) -> Dict: snake_case : Optional[int] = size if size is not None else {"""shortest_edge""": 3_0} snake_case : Optional[int] = crop_size if crop_size is not None else {"""height""": 3_0, """width""": 3_0} snake_case : int = parent snake_case : List[str] = batch_size snake_case : Any = num_channels snake_case : Optional[Any] = min_resolution snake_case : Any = max_resolution snake_case : Dict = do_resize_and_center_crop snake_case : Any = size snake_case : List[Any] = crop_pct snake_case : int = crop_size snake_case : int = do_normalize snake_case : List[Any] = image_mean snake_case : Tuple = image_std def UpperCAmelCase ( self ) -> int: return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __lowercase (UpperCamelCase__ , unittest.TestCase ): """simple docstring""" _snake_case = PoolFormerImageProcessor if is_vision_available() else None def UpperCAmelCase ( self ) -> Optional[Any]: snake_case : str = PoolFormerImageProcessingTester(self ) @property def UpperCAmelCase ( self ) -> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase ( self ) -> Dict: snake_case : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A , """do_resize_and_center_crop""" ) ) self.assertTrue(hasattr(A , """size""" ) ) self.assertTrue(hasattr(A , """crop_pct""" ) ) self.assertTrue(hasattr(A , """do_normalize""" ) ) self.assertTrue(hasattr(A , """image_mean""" ) ) self.assertTrue(hasattr(A , """image_std""" ) ) def UpperCAmelCase ( self ) -> int: snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 3_0} ) self.assertEqual(image_processor.crop_size , {"""height""": 3_0, """width""": 3_0} ) snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} ) self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} ) def UpperCAmelCase ( self ) -> Tuple: pass def UpperCAmelCase ( self ) -> List[Any]: # Initialize image_processing snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A , Image.Image ) # Test not batched input snake_case : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched snake_case : Tuple = image_processing(A , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCAmelCase ( self ) -> Dict: # Initialize image_processing snake_case : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A ) for image in image_inputs: self.assertIsInstance(A , np.ndarray ) # Test not batched input snake_case : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched snake_case : Any = image_processing(A , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCAmelCase ( self ) -> List[str]: # Initialize image_processing snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A ) for image in image_inputs: self.assertIsInstance(A , torch.Tensor ) # Test not batched input snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched snake_case : int = image_processing(A , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
124
0
"""simple docstring""" import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]: if isinstance(UpperCAmelCase , torch.Tensor ): return image elif isinstance(UpperCAmelCase , PIL.Image.Image ): snake_case_ = [image] if isinstance(image[0] , PIL.Image.Image ): snake_case_ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image] snake_case_ = np.concatenate(UpperCAmelCase , axis=0 ) snake_case_ = np.array(UpperCAmelCase ).astype(np.floataa ) / 255.0 snake_case_ = image.transpose(0 , 3 , 1 , 2 ) snake_case_ = 2.0 * image - 1.0 snake_case_ = torch.from_numpy(UpperCAmelCase ) elif isinstance(image[0] , torch.Tensor ): snake_case_ = torch.cat(UpperCAmelCase , dim=0 ) return image def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=0.9_995 ) -> Any: if not isinstance(UpperCAmelCase , np.ndarray ): snake_case_ = True snake_case_ = va.device snake_case_ = va.cpu().numpy() snake_case_ = va.cpu().numpy() snake_case_ = np.sum(va * va / (np.linalg.norm(UpperCAmelCase ) * np.linalg.norm(UpperCAmelCase )) ) if np.abs(UpperCAmelCase ) > DOT_THRESHOLD: snake_case_ = (1 - t) * va + t * va else: snake_case_ = np.arccos(UpperCAmelCase ) snake_case_ = np.sin(UpperCAmelCase ) snake_case_ = theta_a * t snake_case_ = np.sin(UpperCAmelCase ) snake_case_ = np.sin(theta_a - theta_t ) / sin_theta_a snake_case_ = sin_theta_t / sin_theta_a snake_case_ = sa * va + sa * va if inputs_are_torch: snake_case_ = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase ) return va def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Tuple: snake_case_ = F.normalize(UpperCAmelCase , dim=-1 ) snake_case_ = F.normalize(UpperCAmelCase , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> str: for param in model.parameters(): snake_case_ = value class UpperCamelCase ( lowerCAmelCase__ ): def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, ) -> List[Any]: super().__init__() self.register_modules( vae=lowerCAmelCase__, text_encoder=lowerCAmelCase__, clip_model=lowerCAmelCase__, tokenizer=lowerCAmelCase__, unet=lowerCAmelCase__, scheduler=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, coca_model=lowerCAmelCase__, coca_tokenizer=lowerCAmelCase__, coca_transform=lowerCAmelCase__, ) snake_case_ = ( feature_extractor.size if isinstance(feature_extractor.size, lowerCAmelCase__) else feature_extractor.size['shortest_edge'] ) snake_case_ = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std) set_requires_grad(self.text_encoder, lowerCAmelCase__) set_requires_grad(self.clip_model, lowerCAmelCase__) def a_ ( self, lowerCAmelCase__ = "auto") -> Optional[Any]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory snake_case_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCAmelCase__) def a_ ( self) -> Optional[int]: self.enable_attention_slicing(lowerCAmelCase__) def a_ ( self) -> Tuple: set_requires_grad(self.vae, lowerCAmelCase__) def a_ ( self) -> Optional[Any]: set_requires_grad(self.vae, lowerCAmelCase__) def a_ ( self) -> str: set_requires_grad(self.unet, lowerCAmelCase__) def a_ ( self) -> int: set_requires_grad(self.unet, lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Dict: # get the original timestep using init_timestep snake_case_ = min(int(num_inference_steps * strength), lowerCAmelCase__) snake_case_ = max(num_inference_steps - init_timestep, 0) snake_case_ = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=None) -> Any: if not isinstance(lowerCAmelCase__, torch.Tensor): raise ValueError(f'`image` has to be of type `torch.Tensor` but is {type(lowerCAmelCase__)}') snake_case_ = image.to(device=lowerCAmelCase__, dtype=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__): snake_case_ = [ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(lowerCAmelCase__) ] snake_case_ = torch.cat(lowerCAmelCase__, dim=0) else: snake_case_ = self.vae.encode(lowerCAmelCase__).latent_dist.sample(lowerCAmelCase__) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor snake_case_ = 0.18215 * init_latents snake_case_ = init_latents.repeat_interleave(lowerCAmelCase__, dim=0) snake_case_ = randn_tensor(init_latents.shape, generator=lowerCAmelCase__, device=lowerCAmelCase__, dtype=lowerCAmelCase__) # get latents snake_case_ = self.scheduler.add_noise(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) snake_case_ = init_latents return latents def a_ ( self, lowerCAmelCase__) -> Tuple: snake_case_ = self.coca_transform(lowerCAmelCase__).unsqueeze(0) with torch.no_grad(), torch.cuda.amp.autocast(): snake_case_ = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype)) snake_case_ = self.coca_tokenizer.decode(generated[0].cpu().numpy()) return generated.split('<end_of_text>')[0].replace('<start_of_text>', '').rstrip(' .,') def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Union[str, Any]: snake_case_ = self.feature_extractor.preprocess(lowerCAmelCase__) snake_case_ = torch.from_numpy(clip_image_input['pixel_values'][0]).unsqueeze(0).to(self.device).half() snake_case_ = self.clip_model.get_image_features(lowerCAmelCase__) snake_case_ = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=lowerCAmelCase__) snake_case_ = image_embeddings_clip.repeat_interleave(lowerCAmelCase__, dim=0) return image_embeddings_clip @torch.enable_grad() def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> Any: snake_case_ = latents.detach().requires_grad_() snake_case_ = self.scheduler.scale_model_input(lowerCAmelCase__, lowerCAmelCase__) # predict the noise residual snake_case_ = self.unet(lowerCAmelCase__, lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__).sample if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)): snake_case_ = self.scheduler.alphas_cumprod[timestep] snake_case_ = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf snake_case_ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 snake_case_ = torch.sqrt(lowerCAmelCase__) snake_case_ = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler, lowerCAmelCase__): snake_case_ = self.scheduler.sigmas[index] snake_case_ = latents - sigma * noise_pred else: raise ValueError(f'scheduler type {type(self.scheduler)} not supported') # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor snake_case_ = 1 / 0.18215 * sample snake_case_ = self.vae.decode(lowerCAmelCase__).sample snake_case_ = (image / 2 + 0.5).clamp(0, 1) snake_case_ = transforms.Resize(self.feature_extractor_size)(lowerCAmelCase__) snake_case_ = self.normalize(lowerCAmelCase__).to(latents.dtype) snake_case_ = self.clip_model.get_image_features(lowerCAmelCase__) snake_case_ = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=lowerCAmelCase__) snake_case_ = spherical_dist_loss(lowerCAmelCase__, lowerCAmelCase__).mean() * clip_guidance_scale snake_case_ = -torch.autograd.grad(lowerCAmelCase__, lowerCAmelCase__)[0] if isinstance(self.scheduler, lowerCAmelCase__): snake_case_ = latents.detach() + grads * (sigma**2) snake_case_ = noise_pred_original else: snake_case_ = noise_pred_original - torch.sqrt(lowerCAmelCase__) * grads return noise_pred, latents @torch.no_grad() def __call__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = 512, lowerCAmelCase__ = 512, lowerCAmelCase__ = 0.6, lowerCAmelCase__ = 50, lowerCAmelCase__ = 7.5, lowerCAmelCase__ = 1, lowerCAmelCase__ = 0.0, lowerCAmelCase__ = 100, lowerCAmelCase__ = None, lowerCAmelCase__ = "pil", lowerCAmelCase__ = True, lowerCAmelCase__ = 0.8, lowerCAmelCase__ = 0.1, lowerCAmelCase__ = 0.1, ) -> List[str]: if isinstance(lowerCAmelCase__, lowerCAmelCase__) and len(lowerCAmelCase__) != batch_size: raise ValueError(f'You have passed {batch_size} batch_size, but only {len(lowerCAmelCase__)} generators.') if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.') if isinstance(lowerCAmelCase__, torch.Generator) and batch_size > 1: snake_case_ = [generator] + [None] * (batch_size - 1) snake_case_ = [ ('model', self.coca_model is None), ('tokenizer', self.coca_tokenizer is None), ('transform', self.coca_transform is None), ] snake_case_ = [x[0] for x in coca_is_none if x[1]] snake_case_ = ', '.join(lowerCAmelCase__) # generate prompts with coca model if prompt is None if content_prompt is None: if len(lowerCAmelCase__): raise ValueError( f'Content prompt is None and CoCa [{coca_is_none_str}] is None.' f'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.') snake_case_ = self.get_image_description(lowerCAmelCase__) if style_prompt is None: if len(lowerCAmelCase__): raise ValueError( f'Style prompt is None and CoCa [{coca_is_none_str}] is None.' f' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.') snake_case_ = self.get_image_description(lowerCAmelCase__) # get prompt text embeddings for content and style snake_case_ = self.tokenizer( lowerCAmelCase__, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=lowerCAmelCase__, return_tensors='pt', ) snake_case_ = self.text_encoder(content_text_input.input_ids.to(self.device))[0] snake_case_ = self.tokenizer( lowerCAmelCase__, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=lowerCAmelCase__, return_tensors='pt', ) snake_case_ = self.text_encoder(style_text_input.input_ids.to(self.device))[0] snake_case_ = slerp(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) # duplicate text embeddings for each generation per prompt snake_case_ = text_embeddings.repeat_interleave(lowerCAmelCase__, dim=0) # set timesteps snake_case_ = 'offset' in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys()) snake_case_ = {} if accepts_offset: snake_case_ = 1 self.scheduler.set_timesteps(lowerCAmelCase__, **lowerCAmelCase__) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device) snake_case_ , snake_case_ = self.get_timesteps(lowerCAmelCase__, lowerCAmelCase__, self.device) snake_case_ = timesteps[:1].repeat(lowerCAmelCase__) # Preprocess image snake_case_ = preprocess(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) snake_case_ = self.prepare_latents( lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, text_embeddings.dtype, self.device, lowerCAmelCase__) snake_case_ = preprocess(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) snake_case_ = self.prepare_latents( lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, text_embeddings.dtype, self.device, lowerCAmelCase__) snake_case_ = slerp(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) if clip_guidance_scale > 0: snake_case_ = self.get_clip_image_embeddings(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = self.get_clip_image_embeddings(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = slerp( lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. snake_case_ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: snake_case_ = content_text_input.input_ids.shape[-1] snake_case_ = self.tokenizer([''], padding='max_length', max_length=lowerCAmelCase__, return_tensors='pt') snake_case_ = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt snake_case_ = uncond_embeddings.repeat_interleave(lowerCAmelCase__, dim=0) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes snake_case_ = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. snake_case_ = (batch_size, self.unet.config.in_channels, height // 8, width // 8) snake_case_ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps snake_case_ = torch.randn(lowerCAmelCase__, generator=lowerCAmelCase__, device='cpu', dtype=lowerCAmelCase__).to( self.device) else: snake_case_ = torch.randn(lowerCAmelCase__, generator=lowerCAmelCase__, device=self.device, dtype=lowerCAmelCase__) else: if latents.shape != latents_shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}') snake_case_ = latents.to(self.device) # scale the initial noise by the standard deviation required by the scheduler snake_case_ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] snake_case_ = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) snake_case_ = {} if accepts_eta: snake_case_ = eta # check if the scheduler accepts generator snake_case_ = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: snake_case_ = generator with self.progress_bar(total=lowerCAmelCase__): for i, t in enumerate(lowerCAmelCase__): # expand the latents if we are doing classifier free guidance snake_case_ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents snake_case_ = self.scheduler.scale_model_input(lowerCAmelCase__, lowerCAmelCase__) # predict the noise residual snake_case_ = self.unet(lowerCAmelCase__, lowerCAmelCase__, encoder_hidden_states=lowerCAmelCase__).sample # perform classifier free guidance if do_classifier_free_guidance: snake_case_ , snake_case_ = noise_pred.chunk(2) snake_case_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: snake_case_ = ( text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings ) snake_case_ , snake_case_ = self.cond_fn( lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) # compute the previous noisy sample x_t -> x_t-1 snake_case_ = self.scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor snake_case_ = 1 / 0.18215 * latents snake_case_ = self.vae.decode(lowerCAmelCase__).sample snake_case_ = (image / 2 + 0.5).clamp(0, 1) snake_case_ = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": snake_case_ = self.numpy_to_pil(lowerCAmelCase__) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=lowerCAmelCase__, nsfw_content_detected=lowerCAmelCase__)
312
"""simple docstring""" from math import pi def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float: return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
312
1
def SCREAMING_SNAKE_CASE_ ( ) -> list[list[int]]: """simple docstring""" return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )] UpperCAmelCase_ : Dict = generate_large_matrix() UpperCAmelCase_ : List[Any] = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] ) -> None: """simple docstring""" assert all(row == sorted(__A , reverse=__A ) for row in grid ) assert all(list(__A ) == sorted(__A , reverse=__A ) for col in zip(*__A ) ) def SCREAMING_SNAKE_CASE_ ( __A : list[int] ) -> int: """simple docstring""" a_ : str = 0 a_ : Any = len(__A ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: a_ : Any = (left + right) // 2 a_ : Optional[Any] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: a_ : Optional[Any] = mid + 1 else: a_ : int = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__A ) def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] ) -> int: """simple docstring""" a_ : int = 0 a_ : Optional[int] = len(grid[0] ) for i in range(len(__A ) ): a_ : int = find_negative_index(grid[i][:bound] ) total += bound return (len(__A ) * len(grid[0] )) - total def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] ) -> int: """simple docstring""" return len([number for row in grid for number in row if number < 0] ) def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] ) -> int: """simple docstring""" a_ : Tuple = 0 for row in grid: for i, number in enumerate(__A ): if number < 0: total += len(__A ) - i break return total def SCREAMING_SNAKE_CASE_ ( ) -> None: """simple docstring""" from timeit import timeit print('Running benchmarks' ) a_ : str = ( 'from __main__ import count_negatives_binary_search, ' 'count_negatives_brute_force, count_negatives_brute_force_with_break, grid' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): a_ : Any = timeit(F"""{func}(grid=grid)""" , setup=__A , number=5_00 ) print(F"""{func}() took {time:0.4f} seconds""" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
32
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool UpperCAmelCase_ : Tuple = { 'Acehnese Arabic': 'ace_Arab', 'Acehnese Latin': 'ace_Latn', 'Mesopotamian Arabic': 'acm_Arab', 'Ta\'izzi-Adeni Arabic': 'acq_Arab', 'Tunisian Arabic': 'aeb_Arab', 'Afrikaans': 'afr_Latn', 'South Levantine Arabic': 'ajp_Arab', 'Akan': 'aka_Latn', 'Amharic': 'amh_Ethi', 'North Levantine Arabic': 'apc_Arab', 'Modern Standard Arabic': 'arb_Arab', 'Modern Standard Arabic Romanized': 'arb_Latn', 'Najdi Arabic': 'ars_Arab', 'Moroccan Arabic': 'ary_Arab', 'Egyptian Arabic': 'arz_Arab', 'Assamese': 'asm_Beng', 'Asturian': 'ast_Latn', 'Awadhi': 'awa_Deva', 'Central Aymara': 'ayr_Latn', 'South Azerbaijani': 'azb_Arab', 'North Azerbaijani': 'azj_Latn', 'Bashkir': 'bak_Cyrl', 'Bambara': 'bam_Latn', 'Balinese': 'ban_Latn', 'Belarusian': 'bel_Cyrl', 'Bemba': 'bem_Latn', 'Bengali': 'ben_Beng', 'Bhojpuri': 'bho_Deva', 'Banjar Arabic': 'bjn_Arab', 'Banjar Latin': 'bjn_Latn', 'Standard Tibetan': 'bod_Tibt', 'Bosnian': 'bos_Latn', 'Buginese': 'bug_Latn', 'Bulgarian': 'bul_Cyrl', 'Catalan': 'cat_Latn', 'Cebuano': 'ceb_Latn', 'Czech': 'ces_Latn', 'Chokwe': 'cjk_Latn', 'Central Kurdish': 'ckb_Arab', 'Crimean Tatar': 'crh_Latn', 'Welsh': 'cym_Latn', 'Danish': 'dan_Latn', 'German': 'deu_Latn', 'Southwestern Dinka': 'dik_Latn', 'Dyula': 'dyu_Latn', 'Dzongkha': 'dzo_Tibt', 'Greek': 'ell_Grek', 'English': 'eng_Latn', 'Esperanto': 'epo_Latn', 'Estonian': 'est_Latn', 'Basque': 'eus_Latn', 'Ewe': 'ewe_Latn', 'Faroese': 'fao_Latn', 'Fijian': 'fij_Latn', 'Finnish': 'fin_Latn', 'Fon': 'fon_Latn', 'French': 'fra_Latn', 'Friulian': 'fur_Latn', 'Nigerian Fulfulde': 'fuv_Latn', 'Scottish Gaelic': 'gla_Latn', 'Irish': 'gle_Latn', 'Galician': 'glg_Latn', 'Guarani': 'grn_Latn', 'Gujarati': 'guj_Gujr', 'Haitian Creole': 'hat_Latn', 'Hausa': 'hau_Latn', 'Hebrew': 'heb_Hebr', 'Hindi': 'hin_Deva', 'Chhattisgarhi': 'hne_Deva', 'Croatian': 'hrv_Latn', 'Hungarian': 'hun_Latn', 'Armenian': 'hye_Armn', 'Igbo': 'ibo_Latn', 'Ilocano': 'ilo_Latn', 'Indonesian': 'ind_Latn', 'Icelandic': 'isl_Latn', 'Italian': 'ita_Latn', 'Javanese': 'jav_Latn', 'Japanese': 'jpn_Jpan', 'Kabyle': 'kab_Latn', 'Jingpho': 'kac_Latn', 'Kamba': 'kam_Latn', 'Kannada': 'kan_Knda', 'Kashmiri Arabic': 'kas_Arab', 'Kashmiri Devanagari': 'kas_Deva', 'Georgian': 'kat_Geor', 'Central Kanuri Arabic': 'knc_Arab', 'Central Kanuri Latin': 'knc_Latn', 'Kazakh': 'kaz_Cyrl', 'Kabiyè': 'kbp_Latn', 'Kabuverdianu': 'kea_Latn', 'Khmer': 'khm_Khmr', 'Kikuyu': 'kik_Latn', 'Kinyarwanda': 'kin_Latn', 'Kyrgyz': 'kir_Cyrl', 'Kimbundu': 'kmb_Latn', 'Northern Kurdish': 'kmr_Latn', 'Kikongo': 'kon_Latn', 'Korean': 'kor_Hang', 'Lao': 'lao_Laoo', 'Ligurian': 'lij_Latn', 'Limburgish': 'lim_Latn', 'Lingala': 'lin_Latn', 'Lithuanian': 'lit_Latn', 'Lombard': 'lmo_Latn', 'Latgalian': 'ltg_Latn', 'Luxembourgish': 'ltz_Latn', 'Luba-Kasai': 'lua_Latn', 'Ganda': 'lug_Latn', 'Luo': 'luo_Latn', 'Mizo': 'lus_Latn', 'Standard Latvian': 'lvs_Latn', 'Magahi': 'mag_Deva', 'Maithili': 'mai_Deva', 'Malayalam': 'mal_Mlym', 'Marathi': 'mar_Deva', 'Minangkabau Arabic ': 'min_Arab', 'Minangkabau Latin': 'min_Latn', 'Macedonian': 'mkd_Cyrl', 'Plateau Malagasy': 'plt_Latn', 'Maltese': 'mlt_Latn', 'Meitei Bengali': 'mni_Beng', 'Halh Mongolian': 'khk_Cyrl', 'Mossi': 'mos_Latn', 'Maori': 'mri_Latn', 'Burmese': 'mya_Mymr', 'Dutch': 'nld_Latn', 'Norwegian Nynorsk': 'nno_Latn', 'Norwegian Bokmål': 'nob_Latn', 'Nepali': 'npi_Deva', 'Northern Sotho': 'nso_Latn', 'Nuer': 'nus_Latn', 'Nyanja': 'nya_Latn', 'Occitan': 'oci_Latn', 'West Central Oromo': 'gaz_Latn', 'Odia': 'ory_Orya', 'Pangasinan': 'pag_Latn', 'Eastern Panjabi': 'pan_Guru', 'Papiamento': 'pap_Latn', 'Western Persian': 'pes_Arab', 'Polish': 'pol_Latn', 'Portuguese': 'por_Latn', 'Dari': 'prs_Arab', 'Southern Pashto': 'pbt_Arab', 'Ayacucho Quechua': 'quy_Latn', 'Romanian': 'ron_Latn', 'Rundi': 'run_Latn', 'Russian': 'rus_Cyrl', 'Sango': 'sag_Latn', 'Sanskrit': 'san_Deva', 'Santali': 'sat_Olck', 'Sicilian': 'scn_Latn', 'Shan': 'shn_Mymr', 'Sinhala': 'sin_Sinh', 'Slovak': 'slk_Latn', 'Slovenian': 'slv_Latn', 'Samoan': 'smo_Latn', 'Shona': 'sna_Latn', 'Sindhi': 'snd_Arab', 'Somali': 'som_Latn', 'Southern Sotho': 'sot_Latn', 'Spanish': 'spa_Latn', 'Tosk Albanian': 'als_Latn', 'Sardinian': 'srd_Latn', 'Serbian': 'srp_Cyrl', 'Swati': 'ssw_Latn', 'Sundanese': 'sun_Latn', 'Swedish': 'swe_Latn', 'Swahili': 'swh_Latn', 'Silesian': 'szl_Latn', 'Tamil': 'tam_Taml', 'Tatar': 'tat_Cyrl', 'Telugu': 'tel_Telu', 'Tajik': 'tgk_Cyrl', 'Tagalog': 'tgl_Latn', 'Thai': 'tha_Thai', 'Tigrinya': 'tir_Ethi', 'Tamasheq Latin': 'taq_Latn', 'Tamasheq Tifinagh': 'taq_Tfng', 'Tok Pisin': 'tpi_Latn', 'Tswana': 'tsn_Latn', 'Tsonga': 'tso_Latn', 'Turkmen': 'tuk_Latn', 'Tumbuka': 'tum_Latn', 'Turkish': 'tur_Latn', 'Twi': 'twi_Latn', 'Central Atlas Tamazight': 'tzm_Tfng', 'Uyghur': 'uig_Arab', 'Ukrainian': 'ukr_Cyrl', 'Umbundu': 'umb_Latn', 'Urdu': 'urd_Arab', 'Northern Uzbek': 'uzn_Latn', 'Venetian': 'vec_Latn', 'Vietnamese': 'vie_Latn', 'Waray': 'war_Latn', 'Wolof': 'wol_Latn', 'Xhosa': 'xho_Latn', 'Eastern Yiddish': 'ydd_Hebr', 'Yoruba': 'yor_Latn', 'Yue Chinese': 'yue_Hant', 'Chinese Simplified': 'zho_Hans', 'Chinese Traditional': 'zho_Hant', 'Standard Malay': 'zsm_Latn', 'Zulu': 'zul_Latn', } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : str = '''facebook/nllb-200-distilled-600M''' snake_case__ : Union[str, Any] = ( '''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ''' '''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ''' '''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ''' '''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.''' ) snake_case__ : Optional[Any] = '''translator''' snake_case__ : Tuple = AutoTokenizer snake_case__ : Union[str, Any] = AutoModelForSeqaSeqLM snake_case__ : Dict = LANGUAGE_CODES snake_case__ : str = ['''text''', '''text''', '''text'''] snake_case__ : Tuple = ['''text'''] def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple: if src_lang not in self.lang_to_code: raise ValueError(F"""{src_lang} is not a supported language.""" ) if tgt_lang not in self.lang_to_code: raise ValueError(F"""{tgt_lang} is not a supported language.""" ) a_ : str = self.lang_to_code[src_lang] a_ : Any = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( SCREAMING_SNAKE_CASE__ , return_tensors='pt' , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any: return self.model.generate(**SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict: return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
32
1
'''simple docstring''' from pathlib import Path import numpy as np from PIL import Image def _lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray: """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b def _lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray: """simple docstring""" return (gray > 1_27) & (gray <= 2_55) def _lowerCAmelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray ) -> np.ndarray: """simple docstring""" _SCREAMING_SNAKE_CASE =np.zeros_like(_UpperCamelCase ) _SCREAMING_SNAKE_CASE =np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image _SCREAMING_SNAKE_CASE =image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): _SCREAMING_SNAKE_CASE =( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() _SCREAMING_SNAKE_CASE =int(summation > 0 ) return output if __name__ == "__main__": # read original image lowerCamelCase : Optional[Any] = Path(__file__).resolve().parent / "image_data" / "lena.jpg" lowerCamelCase : List[str] = np.array(Image.open(lena_path)) # kernel to be applied lowerCamelCase : int = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) lowerCamelCase : Dict = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image lowerCamelCase : str = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png")
357
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase : Tuple = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[Any] = ["FNetTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[int] = ["FNetTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : str = [ "FNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FNetForMaskedLM", "FNetForMultipleChoice", "FNetForNextSentencePrediction", "FNetForPreTraining", "FNetForQuestionAnswering", "FNetForSequenceClassification", "FNetForTokenClassification", "FNetLayer", "FNetModel", "FNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys lowerCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
114
0
'''simple docstring''' import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model lowercase_ = """0.12""" # assumed parallelism: 8 if is_torch_available(): import torch def lowerCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]=None ) ->Tuple: if rng is None: _SCREAMING_SNAKE_CASE = random.Random() _SCREAMING_SNAKE_CASE = 1 for dim in shape: total_dims *= dim _SCREAMING_SNAKE_CASE = [] for _ in range(__lowerCamelCase ): values.append(rng.randint(0 , vocab_size - 1 ) ) _SCREAMING_SNAKE_CASE = np.array(__lowerCamelCase , dtype=jnp.intaa ).reshape(__lowerCamelCase ) return output def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=None ) ->List[Any]: _SCREAMING_SNAKE_CASE = ids_tensor(__lowerCamelCase , vocab_size=2 , rng=__lowerCamelCase ) # make sure that at least one token is attended to for each batch _SCREAMING_SNAKE_CASE = 1 return attn_mask @require_flax class a_ : '''simple docstring''' UpperCamelCase = None UpperCamelCase = () def snake_case_( self ) -> List[Any]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = inputs["""input_ids"""].shape[-1] // 2 _SCREAMING_SNAKE_CASE = inputs["""input_ids"""][:max_batch_size, :sequence_length] _SCREAMING_SNAKE_CASE = jnp.ones_like(A ) _SCREAMING_SNAKE_CASE = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens _SCREAMING_SNAKE_CASE = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` _SCREAMING_SNAKE_CASE = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def snake_case_( self ) -> Tuple: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config() _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = max_length _SCREAMING_SNAKE_CASE = 0 for model_class in self.all_generative_model_classes: _SCREAMING_SNAKE_CASE = model_class(A ) _SCREAMING_SNAKE_CASE = model_class.__name__[4:] # Skip the "Flax" at the beginning _SCREAMING_SNAKE_CASE = getattr(A , A ) _SCREAMING_SNAKE_CASE = pt_model_class(A ).eval() _SCREAMING_SNAKE_CASE = load_flax_weights_in_pytorch_model(A , flax_model.params ) _SCREAMING_SNAKE_CASE = flax_model.generate(A ).sequences _SCREAMING_SNAKE_CASE = pt_model.generate(torch.tensor(A , dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: _SCREAMING_SNAKE_CASE = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() ) def snake_case_( self ) -> str: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config() _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = max_length for model_class in self.all_generative_model_classes: _SCREAMING_SNAKE_CASE = model_class(A ) _SCREAMING_SNAKE_CASE = model.generate(A ).sequences self.assertEqual(generation_outputs.shape[-1] , A ) _SCREAMING_SNAKE_CASE = jit(model.generate ) _SCREAMING_SNAKE_CASE = jit_generate(A ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def snake_case_( self ) -> List[Any]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config() _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = max_length for model_class in self.all_generative_model_classes: _SCREAMING_SNAKE_CASE = model_class(A ) _SCREAMING_SNAKE_CASE = model.generate(A ).sequences self.assertEqual(generation_outputs.shape[-1] , A ) _SCREAMING_SNAKE_CASE = jit(model.generate ) _SCREAMING_SNAKE_CASE = jit_generate(A ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def snake_case_( self ) -> List[str]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config() _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = max_length _SCREAMING_SNAKE_CASE = 2 for model_class in self.all_generative_model_classes: _SCREAMING_SNAKE_CASE = model_class(A ) _SCREAMING_SNAKE_CASE = model.generate(A ).sequences self.assertEqual(generation_outputs.shape[-1] , A ) _SCREAMING_SNAKE_CASE = jit(model.generate ) _SCREAMING_SNAKE_CASE = jit_generate(A ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def snake_case_( self ) -> str: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config() _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = max_length _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = 2 for model_class in self.all_generative_model_classes: _SCREAMING_SNAKE_CASE = model_class(A ) _SCREAMING_SNAKE_CASE = model.generate(A ).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences ) def snake_case_( self ) -> Tuple: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config() _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = max_length _SCREAMING_SNAKE_CASE = 0.8 _SCREAMING_SNAKE_CASE = 10 _SCREAMING_SNAKE_CASE = 0.3 _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = 8 _SCREAMING_SNAKE_CASE = 9 for model_class in self.all_generative_model_classes: _SCREAMING_SNAKE_CASE = model_class(A ) _SCREAMING_SNAKE_CASE = model.generate(A ).sequences self.assertEqual(generation_outputs.shape[-1] , A ) _SCREAMING_SNAKE_CASE = jit(model.generate ) _SCREAMING_SNAKE_CASE = jit_generate(A ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def snake_case_( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config() _SCREAMING_SNAKE_CASE = max_length _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = 8 _SCREAMING_SNAKE_CASE = 9 for model_class in self.all_generative_model_classes: _SCREAMING_SNAKE_CASE = model_class(A ) _SCREAMING_SNAKE_CASE = model.generate(A ).sequences self.assertEqual(generation_outputs.shape[-1] , A ) _SCREAMING_SNAKE_CASE = jit(model.generate ) _SCREAMING_SNAKE_CASE = jit_generate(A ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def snake_case_( self ) -> Any: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config() _SCREAMING_SNAKE_CASE = max_length _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = 8 _SCREAMING_SNAKE_CASE = 9 for model_class in self.all_generative_model_classes: _SCREAMING_SNAKE_CASE = model_class(A ) _SCREAMING_SNAKE_CASE = model.generate(A ).sequences self.assertEqual(generation_outputs.shape[-1] , A ) _SCREAMING_SNAKE_CASE = jit(model.generate ) _SCREAMING_SNAKE_CASE = jit_generate(A ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def snake_case_( self ) -> str: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config() # pad attention mask on the left _SCREAMING_SNAKE_CASE = attention_mask.at[(0, 0)].set(0 ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = max_length for model_class in self.all_generative_model_classes: _SCREAMING_SNAKE_CASE = model_class(A ) _SCREAMING_SNAKE_CASE = model.generate(A , attention_mask=A ).sequences self.assertEqual(generation_outputs.shape[-1] , A ) _SCREAMING_SNAKE_CASE = jit(model.generate ) _SCREAMING_SNAKE_CASE = jit_generate(A , attention_mask=A ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def snake_case_( self ) -> int: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config() # pad attention mask on the left _SCREAMING_SNAKE_CASE = attention_mask.at[(0, 0)].set(0 ) _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = max_length for model_class in self.all_generative_model_classes: _SCREAMING_SNAKE_CASE = model_class(A ) _SCREAMING_SNAKE_CASE = model.generate(A , attention_mask=A ).sequences self.assertEqual(generation_outputs.shape[-1] , A ) _SCREAMING_SNAKE_CASE = jit(model.generate ) _SCREAMING_SNAKE_CASE = jit_generate(A , attention_mask=A ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def snake_case_( self ) -> Tuple: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._get_input_ids_and_config() # pad attention mask on the left _SCREAMING_SNAKE_CASE = attention_mask.at[(0, 0)].set(0 ) _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = max_length for model_class in self.all_generative_model_classes: _SCREAMING_SNAKE_CASE = model_class(A ) _SCREAMING_SNAKE_CASE = model.generate(A , attention_mask=A ).sequences self.assertEqual(generation_outputs.shape[-1] , A ) _SCREAMING_SNAKE_CASE = jit(model.generate ) _SCREAMING_SNAKE_CASE = jit_generate(A , attention_mask=A ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) @require_flax class a_ ( unittest.TestCase ): '''simple docstring''' def snake_case_( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" ) _SCREAMING_SNAKE_CASE = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) _SCREAMING_SNAKE_CASE = """Hello world""" _SCREAMING_SNAKE_CASE = tokenizer(A , return_tensors="""np""" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(A , """do_samples""" ): model.generate(A , do_samples=A ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(A , """foo""" ): _SCREAMING_SNAKE_CASE = {"""foo""": """bar"""} model.generate(A , **A )
58
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} SCREAMING_SNAKE_CASE :str = { '''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''}, '''tokenizer_file''': { '''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json''' }, } SCREAMING_SNAKE_CASE :Optional[int] = {'''mobilebert-uncased''': 5_12} SCREAMING_SNAKE_CASE :Dict = {} class __lowerCAmelCase ( a ): """simple docstring""" _SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION _SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE = MobileBertTokenizer def __init__( self : Dict , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]="[UNK]" , _lowerCAmelCase : Union[str, Any]="[SEP]" , _lowerCAmelCase : List[str]="[PAD]" , _lowerCAmelCase : Tuple="[CLS]" , _lowerCAmelCase : Union[str, Any]="[MASK]" , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[str]=None , **_lowerCAmelCase : Union[str, Any] , ) -> Optional[int]: """simple docstring""" super().__init__( _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , ) snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , _lowerCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" , _lowerCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , _lowerCAmelCase ) != tokenize_chinese_chars ): snake_case_ = getattr(_lowerCAmelCase , normalizer_state.pop("type" ) ) snake_case_ = do_lower_case snake_case_ = strip_accents snake_case_ = tokenize_chinese_chars snake_case_ = normalizer_class(**_lowerCAmelCase ) snake_case_ = do_lower_case def lowerCAmelCase__ ( self : str , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any]=None ) -> Optional[int]: """simple docstring""" snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCAmelCase__ ( self : List[Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" snake_case_ = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
159
0
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() a__ : Dict = logging.get_logger(__name__) a__ : List[str] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } a__ : int = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def _lowercase ( __A ,__A ,__A ,__A ,__A ,__A ): '''simple docstring''' for attribute in key.split(""".""" ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models __UpperCamelCase = """lm_head""" __UpperCamelCase = getattr(__A ,__A ) if weight_type is not None: __UpperCamelCase = getattr(__A ,__A ).shape else: __UpperCamelCase = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": __UpperCamelCase = value elif weight_type == "weight_g": __UpperCamelCase = value elif weight_type == "weight_v": __UpperCamelCase = value elif weight_type == "bias": __UpperCamelCase = value else: __UpperCamelCase = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def _lowercase ( __A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = [] __UpperCamelCase = fairseq_model.state_dict() __UpperCamelCase = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): __UpperCamelCase = False if "conv_layers" in name: load_conv_layer( __A ,__A ,__A ,__A ,hf_model.config.feat_extract_norm == """group""" ,) __UpperCamelCase = True else: for key, mapped_key in MAPPING.items(): __UpperCamelCase = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __UpperCamelCase = True if "*" in mapped_key: __UpperCamelCase = name.split(__A )[0].split(""".""" )[-2] __UpperCamelCase = mapped_key.replace("""*""" ,__A ) if "weight_g" in name: __UpperCamelCase = """weight_g""" elif "weight_v" in name: __UpperCamelCase = """weight_v""" elif "bias" in name: __UpperCamelCase = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __UpperCamelCase = """weight""" else: __UpperCamelCase = None set_recursively(__A ,__A ,__A ,__A ,__A ,__A ) continue if not is_used: unused_weights.append(__A ) logger.warning(f"Unused weights: {unused_weights}" ) def _lowercase ( __A ,__A ,__A ,__A ,__A ): '''simple docstring''' __UpperCamelCase = full_name.split("""conv_layers.""" )[-1] __UpperCamelCase = name.split(""".""" ) __UpperCamelCase = int(items[0] ) __UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) __UpperCamelCase = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(__A ) @torch.no_grad() def _lowercase ( __A ,__A ,__A=None ,__A=None ,__A=True ): '''simple docstring''' if config_path is not None: __UpperCamelCase = UniSpeechConfig.from_pretrained(__A ) else: __UpperCamelCase = UniSpeechConfig() if is_finetuned: if dict_path: __UpperCamelCase = Dictionary.load_from_json(__A ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __UpperCamelCase = target_dict.pad_index __UpperCamelCase = target_dict.bos_index __UpperCamelCase = target_dict.eos_index __UpperCamelCase = len(target_dict.symbols ) __UpperCamelCase = os.path.join(__A ,"""vocab.json""" ) if not os.path.isdir(__A ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__A ) ) return os.makedirs(__A ,exist_ok=__A ) __UpperCamelCase = target_dict.indices # fairseq has the <pad> and <s> switched __UpperCamelCase = 42 __UpperCamelCase = 43 with open(__A ,"""w""" ,encoding="""utf-8""" ) as vocab_handle: json.dump(__A ,__A ) __UpperCamelCase = WavaVecaPhonemeCTCTokenizer( __A ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=__A ,) __UpperCamelCase = True if config.feat_extract_norm == """layer""" else False __UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,) __UpperCamelCase = WavaVecaProcessor(feature_extractor=__A ,tokenizer=__A ) processor.save_pretrained(__A ) __UpperCamelCase = UniSpeechForCTC(__A ) else: __UpperCamelCase = UniSpeechForPreTraining(__A ) if is_finetuned: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} ) else: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __UpperCamelCase = model[0].eval() recursively_load_weights(__A ,__A ,__A ) hf_unispeech.save_pretrained(__A ) if __name__ == "__main__": a__ : int = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action="https://huggingface.co/datasets/infinityofspace/python_codestyles-mixed1-500/viewer/default/store_true", help='Whether the model to convert is a fine-tuned model or not' ) a__ : Any = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
243
'''simple docstring''' import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex a__ : Optional[Any] = logging.getLogger(__name__) class UpperCAmelCase__ : def __init__( self ) -> Union[str, Any]: __UpperCamelCase = False def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: if not self.initialized: __UpperCamelCase = RagRetriever( lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , ) __UpperCamelCase = True def __lowerCamelCase ( self ) -> List[Any]: self.retriever.index.init_index() def __lowerCamelCase ( self , lowercase , lowercase ) -> Optional[Any]: __UpperCamelCase , __UpperCamelCase = self.retriever._main_retrieve(lowercase , lowercase ) return doc_ids, retrieved_doc_embeds class UpperCAmelCase__ ( UpperCAmelCase_): def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> Optional[Any]: if index is not None and index.is_initialized() and len(lowercase ) > 0: raise ValueError( """When using Ray for distributed fine-tuning, """ """you'll need to provide the paths instead, """ """as the dataset and the index are loaded """ """separately. More info in examples/rag/use_own_knowledge_dataset.py """ ) super().__init__( lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , ) __UpperCamelCase = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase ) for worker in self.retrieval_workers ] ) def __lowerCamelCase ( self ) -> Optional[int]: logger.info("""initializing retrieval""" ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def __lowerCamelCase ( self , lowercase , lowercase ) -> List[str]: if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. __UpperCamelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )] __UpperCamelCase , __UpperCamelCase = ray.get(random_worker.retrieve.remote(lowercase , lowercase ) ) else: __UpperCamelCase , __UpperCamelCase = self._main_retrieve(lowercase , lowercase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase ) @classmethod def __lowerCamelCase ( cls , lowercase , lowercase=None , **lowercase ) -> Tuple: return super(lowercase , cls ).get_tokenizers(lowercase , lowercase , **lowercase ) @classmethod def __lowerCamelCase ( cls , lowercase , lowercase , lowercase=None , **lowercase ) -> Dict: __UpperCamelCase = kwargs.pop("""config""" , lowercase ) or RagConfig.from_pretrained(lowercase , **lowercase ) __UpperCamelCase = RagTokenizer.from_pretrained(lowercase , config=lowercase ) __UpperCamelCase = rag_tokenizer.question_encoder __UpperCamelCase = rag_tokenizer.generator if indexed_dataset is not None: __UpperCamelCase = """custom""" __UpperCamelCase = CustomHFIndex(config.retrieval_vector_size , lowercase ) else: __UpperCamelCase = cls._build_index(lowercase ) return cls( lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
243
1
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { "huggingface/informer-tourism-monthly": ( "https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json" ), # See all Informer models at https://huggingface.co/models?filter=informer } class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" __lowercase : Dict = '''informer''' __lowercase : Union[str, Any] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "student_t" , lowerCAmelCase__ = "nll" , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = "mean" , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 6_4 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = True , lowerCAmelCase__ = "gelu" , lowerCAmelCase__ = 0.05 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 1_0_0 , lowerCAmelCase__ = 0.02 , lowerCAmelCase__=True , lowerCAmelCase__ = "prob" , lowerCAmelCase__ = 5 , lowerCAmelCase__ = True , **lowerCAmelCase__ , ): # time series specific configuration __SCREAMING_SNAKE_CASE = prediction_length __SCREAMING_SNAKE_CASE = context_length or prediction_length __SCREAMING_SNAKE_CASE = distribution_output __SCREAMING_SNAKE_CASE = loss __SCREAMING_SNAKE_CASE = input_size __SCREAMING_SNAKE_CASE = num_time_features __SCREAMING_SNAKE_CASE = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] __SCREAMING_SNAKE_CASE = scaling __SCREAMING_SNAKE_CASE = num_dynamic_real_features __SCREAMING_SNAKE_CASE = num_static_real_features __SCREAMING_SNAKE_CASE = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase__) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""") __SCREAMING_SNAKE_CASE = cardinality else: __SCREAMING_SNAKE_CASE = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase__) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""") __SCREAMING_SNAKE_CASE = embedding_dimension else: __SCREAMING_SNAKE_CASE = [min(5_0 , (cat + 1) // 2) for cat in self.cardinality] __SCREAMING_SNAKE_CASE = num_parallel_samples # Transformer architecture configuration __SCREAMING_SNAKE_CASE = input_size * len(self.lags_sequence) + self._number_of_features __SCREAMING_SNAKE_CASE = d_model __SCREAMING_SNAKE_CASE = encoder_attention_heads __SCREAMING_SNAKE_CASE = decoder_attention_heads __SCREAMING_SNAKE_CASE = encoder_ffn_dim __SCREAMING_SNAKE_CASE = decoder_ffn_dim __SCREAMING_SNAKE_CASE = encoder_layers __SCREAMING_SNAKE_CASE = decoder_layers __SCREAMING_SNAKE_CASE = dropout __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = activation_dropout __SCREAMING_SNAKE_CASE = encoder_layerdrop __SCREAMING_SNAKE_CASE = decoder_layerdrop __SCREAMING_SNAKE_CASE = activation_function __SCREAMING_SNAKE_CASE = init_std __SCREAMING_SNAKE_CASE = use_cache # Informer __SCREAMING_SNAKE_CASE = attention_type __SCREAMING_SNAKE_CASE = sampling_factor __SCREAMING_SNAKE_CASE = distil super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__) @property def snake_case_ ( self): return ( sum(self.embedding_dimension) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
100
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging __lowerCAmelCase = logging.get_logger(__name__) def snake_case_ ( snake_case , snake_case ) -> List[str]: lowercase__: List[str] = set() lowercase__: List[Any] = [] def parse_line(snake_case ): for line in fp: if isinstance(snake_case , snake_case ): lowercase__: Optional[Any] = line.decode('UTF-8' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(' ' ): # process a single warning and move it to `selected_warnings`. if len(snake_case ) > 0: lowercase__: List[str] = '\n'.join(snake_case ) # Only keep the warnings specified in `targets` if any(f': {x}: ' in warning for x in targets ): selected_warnings.add(snake_case ) buffer.clear() continue else: lowercase__: Union[str, Any] = line.strip() buffer.append(snake_case ) if from_gh: for filename in os.listdir(snake_case ): lowercase__: Dict = os.path.join(snake_case , snake_case ) if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with open(snake_case ) as fp: parse_line(snake_case ) else: try: with zipfile.ZipFile(snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with z.open(snake_case ) as fp: parse_line(snake_case ) except Exception: logger.warning( f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' ) return selected_warnings def snake_case_ ( snake_case , snake_case ) -> Any: lowercase__: Optional[Any] = set() lowercase__: int = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('.zip' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) ) return selected_warnings if __name__ == "__main__": def snake_case_ ( snake_case ) -> str: return values.split(',' ) __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') parser.add_argument( '''--output_dir''', type=str, required=True, help='''Where to store the downloaded artifacts and other result files.''', ) parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''') # optional parameters parser.add_argument( '''--targets''', default='''DeprecationWarning,UserWarning,FutureWarning''', type=list_str, help='''Comma-separated list of target warning(s) which we want to extract.''', ) parser.add_argument( '''--from_gh''', action='''store_true''', help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''', ) __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links __lowerCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print('''=''' * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts __lowerCAmelCase = extract_warnings(args.output_dir, args.targets) __lowerCAmelCase = sorted(selected_warnings) with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
196
0
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging __A =logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( snake_case_ ): lowerCAmelCase__ = ['pixel_values'] def __init__( self , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = 8 , **lowercase , ) -> None: super().__init__(**lowercase ) lowerCamelCase_ = do_rescale lowerCamelCase_ = rescale_factor lowerCamelCase_ = do_pad lowerCamelCase_ = pad_size def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase = None , **lowercase ) -> np.ndarray: return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase = None ) -> List[str]: lowerCamelCase_ , lowerCamelCase_ = get_image_size(lowercase ) lowerCamelCase_ = (old_height // size + 1) * size - old_height lowerCamelCase_ = (old_width // size + 1) * size - old_width return pad(lowercase , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=lowercase ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> Any: lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ = do_pad if do_pad is not None else self.do_pad lowerCamelCase_ = pad_size if pad_size is not None else self.pad_size lowerCamelCase_ = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) # All transformations expect numpy arrays. lowerCamelCase_ = [to_numpy_array(lowercase ) for image in images] if do_rescale: lowerCamelCase_ = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_pad: lowerCamelCase_ = [self.pad(lowercase , size=lowercase ) for image in images] lowerCamelCase_ = [to_channel_dimension_format(lowercase , lowercase ) for image in images] lowerCamelCase_ = {"pixel_values": images} return BatchFeature(data=lowercase , tensor_type=lowercase )
47
__A =''' # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git ''' __A =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}] __A ={ '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
47
1
"""simple docstring""" import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel __snake_case = { '''gwf-440k''': { '''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''', '''sample_rate''': 48000, '''sample_size''': 65536, }, '''jmann-small-190k''': { '''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''', '''sample_rate''': 48000, '''sample_size''': 65536, }, '''jmann-large-580k''': { '''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''', '''sample_rate''': 48000, '''sample_size''': 131072, }, '''maestro-uncond-150k''': { '''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''', '''sample_rate''': 16000, '''sample_size''': 65536, }, '''unlocked-uncond-250k''': { '''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''', '''sample_rate''': 16000, '''sample_size''': 65536, }, '''honk-140k''': { '''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''', '''sample_rate''': 16000, '''sample_size''': 65536, }, } def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : Tuple ): """simple docstring""" return torch.atana(UpperCamelCase__, UpperCamelCase__ ) / math.pi * 2 def A_ ( _lowerCAmelCase : List[Any] ): """simple docstring""" _a = torch.sin(t * math.pi / 2 ) ** 2 _a = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(UpperCamelCase__, UpperCamelCase__ ) class __lowerCamelCase ( snake_case_ ): '''simple docstring''' pass class __lowerCamelCase ( nn.Module ): '''simple docstring''' def __init__( self , __UpperCAmelCase ) -> Union[str, Any]: super().__init__() _a = DiffusionAttnUnetaD(UpperCAmelCase__ , n_attn_layers=4 ) _a = deepcopy(self.diffusion ) _a = torch.quasirandom.SobolEngine(1 , scramble=UpperCAmelCase__ ) def A_ ( _lowerCAmelCase : Any ): """simple docstring""" _a = MODELS_MAP[model_name]["""url"""] os.system(f'wget {url} ./' ) return f'./{model_name}.ckpt' __snake_case = { '''1''': '''resnets.0''', '''2''': '''attentions.0''', '''3''': '''resnets.1''', '''4''': '''attentions.1''', '''5''': '''resnets.2''', '''6''': '''attentions.2''', } __snake_case = { '''8''': '''resnets.0''', '''9''': '''attentions.0''', '''10''': '''resnets.1''', '''11''': '''attentions.1''', '''12''': '''resnets.2''', '''13''': '''attentions.2''', } __snake_case = { '''1''': '''resnets.0''', '''2''': '''attentions.0''', '''3''': '''resnets.1''', '''4''': '''attentions.1''', '''5''': '''resnets.2''', '''6''': '''attentions.2''', '''8''': '''resnets.3''', '''9''': '''attentions.3''', '''10''': '''resnets.4''', '''11''': '''attentions.4''', '''12''': '''resnets.5''', '''13''': '''attentions.5''', } __snake_case = { '''0''': '''resnets.0''', '''1''': '''resnets.1''', '''2''': '''resnets.2''', '''4''': '''resnets.0''', '''5''': '''resnets.1''', '''6''': '''resnets.2''', } __snake_case = { '''skip''': '''conv_skip''', '''main.0''': '''conv_1''', '''main.1''': '''group_norm_1''', '''main.3''': '''conv_2''', '''main.4''': '''group_norm_2''', } __snake_case = { '''norm''': '''group_norm''', '''qkv_proj''': ['''query''', '''key''', '''value'''], '''out_proj''': ['''proj_attn'''], } def A_ ( _lowerCAmelCase : Union[str, Any] ): """simple docstring""" if name.startswith('''skip''' ): return name.replace('''skip''', RES_CONV_MAP['''skip'''] ) # name has to be of format main.{digit} if not name.startswith('''main.''' ): raise ValueError(f'ResConvBlock error with {name}' ) return name.replace(name[:6], RES_CONV_MAP[name[:6]] ) def A_ ( _lowerCAmelCase : Dict ): """simple docstring""" for key, value in ATTN_MAP.items(): if name.startswith(UpperCamelCase__ ) and not isinstance(UpperCamelCase__, UpperCamelCase__ ): return name.replace(UpperCamelCase__, UpperCamelCase__ ) elif name.startswith(UpperCamelCase__ ): return [name.replace(UpperCamelCase__, UpperCamelCase__ ) for v in value] raise ValueError(f'Attn error with {name}' ) def A_ ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Optional[int]=13 ): """simple docstring""" _a = input_string if string.split('''.''' )[0] == "timestep_embed": return string.replace('''timestep_embed''', '''time_proj''' ) _a = 0 if string.startswith('''net.3.''' ): depth += 1 _a = string[6:] elif string.startswith('''net.''' ): _a = string[4:] while string.startswith('''main.7.''' ): depth += 1 _a = string[7:] if string.startswith('''main.''' ): _a = string[5:] # mid block if string[:2].isdigit(): _a = string[:2] _a = string[2:] else: _a = string[0] _a = string[1:] if depth == max_depth: _a = MID_NUM_TO_LAYER[layer_num] _a = """mid_block""" elif depth > 0 and int(UpperCamelCase__ ) < 7: _a = DOWN_NUM_TO_LAYER[layer_num] _a = f'down_blocks.{depth}' elif depth > 0 and int(UpperCamelCase__ ) > 7: _a = UP_NUM_TO_LAYER[layer_num] _a = f'up_blocks.{max_depth - depth - 1}' elif depth == 0: _a = DEPTH_0_TO_LAYER[layer_num] _a = f'up_blocks.{max_depth - 1}' if int(UpperCamelCase__ ) > 3 else """down_blocks.0""" if not string_left.startswith('''.''' ): raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' ) _a = string_left[1:] if "resnets" in new_layer: _a = convert_resconv_naming(UpperCamelCase__ ) elif "attentions" in new_layer: _a = convert_attn_naming(UpperCamelCase__ ) _a = new_string_left if not isinstance(UpperCamelCase__, UpperCamelCase__ ): _a = prefix + """.""" + new_layer + """.""" + string_left else: _a = [prefix + """.""" + new_layer + """.""" + s for s in string_left] return new_string def A_ ( _lowerCAmelCase : str ): """simple docstring""" _a = {} for k, v in state_dict.items(): if k.endswith('''kernel''' ): # up- and downsample layers, don't have trainable weights continue _a = rename(UpperCamelCase__ ) # check if we need to transform from Conv => Linear for attention if isinstance(UpperCamelCase__, UpperCamelCase__ ): _a = transform_conv_attns(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) else: _a = v return new_state_dict def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[Any] ): """simple docstring""" if len(UpperCamelCase__ ) == 1: if len(v.shape ) == 3: # weight _a = v[:, :, 0] else: # bias _a = v else: # qkv matrices _a = v.shape[0] _a = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: _a = v[i * single_shape : (i + 1) * single_shape, :, 0] else: _a = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def A_ ( _lowerCAmelCase : int ): """simple docstring""" _a = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) _a = args.model_path.split('''/''' )[-1].split('''.''' )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}' _a = download(UpperCamelCase__ ) _a = MODELS_MAP[model_name]["""sample_rate"""] _a = MODELS_MAP[model_name]["""sample_size"""] _a = Object() _a = sample_size _a = sample_rate _a = 0 _a = UNetaDModel(sample_size=UpperCamelCase__, sample_rate=UpperCamelCase__ ) _a = diffusers_model.state_dict() _a = DiffusionUncond(UpperCamelCase__ ) orig_model.load_state_dict(torch.load(args.model_path, map_location=UpperCamelCase__ )['''state_dict'''] ) _a = orig_model.diffusion_ema.eval() _a = orig_model.state_dict() _a = rename_orig_weights(UpperCamelCase__ ) _a = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) _a = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(UpperCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}' assert all(k.endswith('''kernel''' ) for k in list(UpperCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}' if key == "time_proj.weight": _a = value.squeeze() _a = value diffusers_model.load_state_dict(UpperCamelCase__ ) _a = 1_00 _a = 33 _a = IPNDMScheduler(num_train_timesteps=UpperCamelCase__ ) _a = torch.manual_seed(UpperCamelCase__ ) _a = torch.randn([1, 2, config.sample_size], generator=UpperCamelCase__ ).to(UpperCamelCase__ ) _a = torch.linspace(1, 0, steps + 1, device=UpperCamelCase__ )[:-1] _a = get_crash_schedule(UpperCamelCase__ ) _a = DanceDiffusionPipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ ) _a = torch.manual_seed(33 ) _a = pipe(num_inference_steps=UpperCamelCase__, generator=UpperCamelCase__ ).audios _a = sampling.iplms_sample(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, {} ) _a = generated.clamp(-1, 1 ) _a = (generated - audio).abs().sum() _a = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print('''Diff sum''', UpperCamelCase__ ) print('''Diff max''', UpperCamelCase__ ) assert diff_max < 1e-3, f'Diff max: {diff_max} is too much :-/' print(f'Conversion for {model_name} successful!' ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''') parser.add_argument( '''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.''' ) parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''') __snake_case = parser.parse_args() main(args)
320
"""simple docstring""" def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' # Check if the input is valid if not len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == 3: raise ValueError("""Please enter a valid equation.""" ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("""Both a & b of two equations can't be zero.""" ) # Extract the coefficients _a , _a , _a : Any = equationa _a , _a , _a : Tuple = equationa # Calculate the determinants of the matrices _a : int = aa * ba - aa * ba _a : str = ca * ba - ca * ba _a : str = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("""Infinite solutions. (Consistent system)""" ) else: raise ValueError("""No solution. (Inconsistent system)""" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: _a : Dict = determinant_x / determinant _a : str = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
294
0
'''simple docstring''' import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = 'laion/clap-htsat-unfused' __a : Optional[Any] = tempfile.mkdtemp() def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return RobertaTokenizer.from_pretrained(self.checkpoint , **__a ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a ) def __UpperCAmelCase ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = self.get_tokenizer() __a : List[str] = self.get_feature_extractor() __a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a ) processor.save_pretrained(self.tmpdirname ) __a : Tuple = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) __a : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __a : List[str] = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 ) __a : Tuple = ClapProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.get_feature_extractor() __a : int = self.get_tokenizer() __a : str = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : int = floats_list((3, 1000) ) __a : str = feature_extractor(__a , return_tensors='np' ) __a : int = processor(audios=__a , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.get_feature_extractor() __a : Any = self.get_tokenizer() __a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : Union[str, Any] = 'This is a test string' __a : Union[str, Any] = processor(text=__a ) __a : Tuple = tokenizer(__a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.get_feature_extractor() __a : str = self.get_tokenizer() __a : List[str] = ClapProcessor(tokenizer=__a , feature_extractor=__a ) __a : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __a : Optional[int] = processor.batch_decode(__a ) __a : Optional[Any] = tokenizer.batch_decode(__a ) self.assertListEqual(__a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.get_feature_extractor() __a : Optional[int] = self.get_tokenizer() __a : int = ClapProcessor(tokenizer=__a , feature_extractor=__a ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
294
'''simple docstring''' import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): A_ = CodeGenTokenizer A_ = CodeGenTokenizerFast A_ = True A_ = {"add_prefix_space": True} A_ = False def __UpperCAmelCase ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __a : Tuple = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', '<|endoftext|>', ] __a : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) ) __a : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __a : Dict = {'unk_token': '<unk>'} __a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__a ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__a ) ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__a ) def __UpperCAmelCase ( self , **__a ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__a ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' __a : Tuple = 'lower newer' __a : Tuple = 'lower newer' return input_text, output_text def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __a : str = 'lower newer' __a : Tuple = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er'] __a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a ) self.assertListEqual(__a , __a ) __a : List[str] = tokens + [tokenizer.unk_token] __a : Any = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return __a : List[Any] = self.get_tokenizer() __a : List[str] = self.get_rust_tokenizer(add_prefix_space=__a ) __a : Any = 'lower newer' # Testing tokenization __a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a ) __a : Dict = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) # Testing conversion to ids without special tokens __a : int = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a ) __a : Tuple = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) # Testing conversion to ids with special tokens __a : Tuple = self.get_rust_tokenizer(add_prefix_space=__a ) __a : Union[str, Any] = tokenizer.encode(__a , add_prefix_space=__a ) __a : int = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) # Testing the unknown token __a : Any = tokens + [rust_tokenizer.unk_token] __a : Tuple = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a ) def __UpperCAmelCase ( self , *__a , **__a ): '''simple docstring''' pass def __UpperCAmelCase ( self , __a=15 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __a : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a ) # Simple input __a : List[Any] = 'This is a simple input' __a : Tuple = ['This is a simple input 1', 'This is a simple input 2'] __a : Tuple = ('This is a simple input', 'This is a pair') __a : str = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' ) # Simple input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' ) # Simple input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , ) # Pair input self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' ) # Pair input self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' ) # Pair input self.assertRaises( __a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' ) # Simple input __a : str = 'This is a simple input' __a : Any = ['This is a simple input looooooooong', 'This is a simple input'] __a : Optional[int] = ('This is a simple input', 'This is a pair') __a : Optional[Any] = [ ('This is a simple input loooooong', 'This is a simple input'), ('This is a simple pair loooooong', 'This is a simple pair'), ] __a : int = tokenizer.pad_token_id __a : List[Any] = tokenizer(__a , padding='max_length' , max_length=30 , return_tensors='np' ) __a : Union[str, Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' ) __a : Optional[Any] = tokenizer(*__a , padding='max_length' , max_length=60 , return_tensors='np' ) __a : List[Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' ) # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['input_ids'] ) self.assertTrue(0 in out_s['attention_mask'] ) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0] ) self.assertFalse(0 in out_sa['attention_mask'][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1] ) self.assertTrue(0 in out_sa['attention_mask'][1] ) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['input_ids'] ) self.assertTrue(0 in out_p['attention_mask'] ) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0] ) self.assertFalse(0 in out_pa['attention_mask'][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1] ) self.assertTrue(0 in out_pa['attention_mask'][1] ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[int] = '$$$' __a : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a ) __a : Union[str, Any] = 'This is a simple input' __a : List[Any] = ['This is a simple input 1', 'This is a simple input 2'] __a : List[Any] = tokenizer.bos_token_id __a : List[str] = tokenizer(__a ) __a : Optional[Any] = tokenizer(__a ) self.assertEqual(out_s.input_ids[0] , __a ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) __a : Any = tokenizer.decode(out_s.input_ids ) __a : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , __a ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' ) __a : Optional[int] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#' __a : Tuple = '\nif len_a > len_b: result = a\nelse: result = b' __a : Optional[int] = tokenizer.encode(__a ) __a : Union[str, Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n'] __a : Tuple = tokenizer.decode(__a , truncate_before_pattern=__a ) self.assertEqual(__a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' pass
294
1
"""simple docstring""" from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class _A : """simple docstring""" def __init__( self : str , __UpperCAmelCase : Optional[Any] , ): a : List[str] = parent a : Optional[Any] = 13 a : Dict = 7 a : Dict = 30 a : Optional[int] = self.seq_length + self.mem_len a : Optional[int] = 15 a : Dict = True a : List[str] = True a : Union[str, Any] = 99 a : Any = [10, 50, 80] a : Dict = 32 a : Dict = 32 a : Optional[int] = 4 a : List[Any] = 8 a : List[Any] = 128 a : Optional[int] = 2 a : List[Any] = 2 a : Tuple = None a : str = 1 a : Dict = 0 a : Any = 3 a : Any = self.vocab_size - 1 a : Optional[int] = 0.01 def __snake_case ( self : Union[str, Any]): a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a : Any = None if self.use_labels: a : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a : Tuple = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def __snake_case ( self : List[str]): random.seed(self.seed) tf.random.set_seed(self.seed) def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int): a : List[str] = TFTransfoXLModel(__UpperCAmelCase) a , a : List[str] = model(__UpperCAmelCase).to_tuple() a : Optional[Any] = {"input_ids": input_ids_a, "mems": mems_a} a , a : Union[str, Any] = model(__UpperCAmelCase).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any]): a : List[Any] = TFTransfoXLLMHeadModel(__UpperCAmelCase) a , a : Optional[Any] = model(__UpperCAmelCase).to_tuple() a : Optional[int] = {"input_ids": input_ids_a, "labels": lm_labels} a , a : Optional[int] = model(__UpperCAmelCase).to_tuple() a , a : Optional[int] = model([input_ids_a, mems_a]).to_tuple() a : Tuple = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels} a , a : List[str] = model(__UpperCAmelCase).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def __snake_case ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]): a : Dict = TFTransfoXLForSequenceClassification(__UpperCAmelCase) a : int = model(__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def __snake_case ( self : Union[str, Any]): a : Optional[Any] = self.prepare_config_and_inputs() ((a) , (a) , (a) , (a)) : List[Any] = config_and_inputs a : List[str] = {"input_ids": input_ids_a} return config, inputs_dict @require_tf class _A ( _a ,_a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Any = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) UpperCAmelCase : List[Any] = () if is_tf_available() else () UpperCAmelCase : Tuple = ( { """feature-extraction""": TFTransfoXLModel, """text-classification""": TFTransfoXLForSequenceClassification, """text-generation""": TFTransfoXLLMHeadModel, """zero-shot""": TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented UpperCAmelCase : Any = False UpperCAmelCase : int = False UpperCAmelCase : Dict = False UpperCAmelCase : Any = False def __snake_case ( self : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int]): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def __snake_case ( self : Any): a : Any = TFTransfoXLModelTester(self) a : Any = ConfigTester(self , config_class=__UpperCAmelCase , d_embed=37) def __snake_case ( self : int): self.config_tester.run_common_tests() def __snake_case ( self : int): self.model_tester.set_seed() a : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*__UpperCAmelCase) def __snake_case ( self : str): self.model_tester.set_seed() a : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*__UpperCAmelCase) def __snake_case ( self : Optional[int]): a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__UpperCAmelCase) def __snake_case ( self : Tuple): a , a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() a : Optional[Any] = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: a : Optional[int] = model_class(__UpperCAmelCase) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer) if model_class in list_other_models_with_output_ebd: a : Optional[int] = model.get_output_embeddings() assert isinstance(__UpperCAmelCase , tf.keras.layers.Layer) a : List[str] = model.get_bias() assert name is None else: a : List[str] = model.get_output_embeddings() assert x is None a : Optional[int] = model.get_bias() assert name is None def __snake_case ( self : Optional[Any]): # TODO JP: Make TransfoXL XLA compliant pass @slow def __snake_case ( self : str): for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Tuple = TFTransfoXLModel.from_pretrained(__UpperCAmelCase) self.assertIsNotNone(__UpperCAmelCase) @unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss.") def __snake_case ( self : Tuple): pass @require_tf class _A ( unittest.TestCase ): """simple docstring""" @unittest.skip("Skip test until #12651 is resolved.") @slow def __snake_case ( self : List[str]): a : int = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103") # fmt: off a : Tuple = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off a : Union[str, Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> a : Optional[int] = model.generate(__UpperCAmelCase , max_length=200 , do_sample=__UpperCAmelCase) self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase)
40
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'vocab.txt'} a_ = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } a_ = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } a_ = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ =VOCAB_FILES_NAMES lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ =PRETRAINED_INIT_CONFIGURATION lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ =ConvBertTokenizer def __init__( self : List[str] , a : Union[str, Any]=None , a : Optional[int]=None , a : int=True , a : Tuple="[UNK]" , a : Dict="[SEP]" , a : Dict="[PAD]" , a : List[Any]="[CLS]" , a : Tuple="[MASK]" , a : Dict=True , a : Optional[Any]=None , **a : str , ) -> Dict: """simple docstring""" super().__init__( a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , ) SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , a ) != do_lower_case or normalizer_state.get("strip_accents" , a ) != strip_accents or normalizer_state.get("handle_chinese_chars" , a ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE : List[str] = getattr(a , normalizer_state.pop("type" ) ) SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case SCREAMING_SNAKE_CASE : Any = strip_accents SCREAMING_SNAKE_CASE : Optional[int] = tokenize_chinese_chars SCREAMING_SNAKE_CASE : List[str] = normalizer_class(**a ) SCREAMING_SNAKE_CASE : str = do_lower_case def __UpperCamelCase ( self : Union[str, Any] , a : List[Any] , a : int=None ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCamelCase ( self : Tuple , a : str , a : Optional[str] = None ) -> Tuple[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = self._tokenizer.model.save(a , name=a ) return tuple(a )
76
0
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def __lowerCamelCase ( _lowercase ) -> List[Any]: return (data["data"], data["target"]) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = XGBRegressor(verbosity=0 , random_state=4_2 ) xgb.fit(a__ , a__ ) # Predict target for test data UpperCAmelCase : Tuple = xgb.predict(a__ ) UpperCAmelCase : List[Any] = predictions.reshape(len(a__ ) , 1 ) return predictions def __lowerCamelCase ( ) -> str: UpperCAmelCase : Union[str, Any] = fetch_california_housing() UpperCAmelCase : Union[str, Any] = data_handling(a__ ) UpperCAmelCase : List[str] = train_test_split( a__ , a__ , test_size=0.25 , random_state=1 ) UpperCAmelCase : Dict = xgboost(a__ , a__ , a__ ) # Error printing print(F'''Mean Absolute Error : {mean_absolute_error(a__ , a__ )}''' ) print(F'''Mean Square Error : {mean_squared_error(a__ , a__ )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
369
'''simple docstring''' # Function to print upper half of diamond (pyramid) def __lowerCamelCase ( _lowercase ) -> List[Any]: for i in range(0 , _lowercase ): for _ in range(0 , n - i - 1 ): # printing spaces print(""" """ , end="""""" ) for _ in range(0 , i + 1 ): # printing stars print("""* """ , end="""""" ) print() def __lowerCamelCase ( _lowercase ) -> Dict: for i in range(_lowercase , 0 , -1 ): for _ in range(_lowercase , 0 , -1 ): # printing stars print("""* """ , end="""""" ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(""" """ , end="""""" ) def __lowerCamelCase ( _lowercase ) -> List[Any]: if n <= 0: print(""" ... .... nothing printing :(""" ) return floyd(_lowercase ) # upper half reverse_floyd(_lowercase ) # lower half if __name__ == "__main__": print(R"""| /\ | |- | |- |--| |\ /| |-""") print(R"""|/ \| |- |_ |_ |__| | \/ | |_""") a : List[Any] = 1 while K: a : int = int(input("""enter the number and , and see the magic : """)) print() pretty_print(user_number) a : Tuple = int(input("""press 0 to exit... and 1 to continue...""")) print("""Good Bye...""")
338
0
"""simple docstring""" import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser( description=( "Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned" " Distillation" ) ) parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"]) parser.add_argument("--model_name", default="roberta-large", type=str) parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str) parser.add_argument("--vocab_transform", action="https://huggingface.co/datasets/infinityofspace/python_codestyles-mixed1-500/viewer/default/store_true") SCREAMING_SNAKE_CASE__ = parser.parse_args() if args.model_type == "roberta": SCREAMING_SNAKE_CASE__ = RobertaForMaskedLM.from_pretrained(args.model_name) SCREAMING_SNAKE_CASE__ = "roberta" elif args.model_type == "gpt2": SCREAMING_SNAKE_CASE__ = GPTaLMHeadModel.from_pretrained(args.model_name) SCREAMING_SNAKE_CASE__ = "transformer" SCREAMING_SNAKE_CASE__ = model.state_dict() SCREAMING_SNAKE_CASE__ = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: SCREAMING_SNAKE_CASE__ = state_dict[f'{prefix}.{param_name}'] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: SCREAMING_SNAKE_CASE__ = f'{prefix}.embeddings.{w}.weight' SCREAMING_SNAKE_CASE__ = state_dict[param_name] for w in ["weight", "bias"]: SCREAMING_SNAKE_CASE__ = f'{prefix}.embeddings.LayerNorm.{w}' SCREAMING_SNAKE_CASE__ = state_dict[param_name] # Transformer Blocks # SCREAMING_SNAKE_CASE__ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: SCREAMING_SNAKE_CASE__ = state_dict[ f'{prefix}.h.{teacher_idx}.{layer}.{w}' ] SCREAMING_SNAKE_CASE__ = state_dict[f'{prefix}.h.{teacher_idx}.attn.bias'] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: SCREAMING_SNAKE_CASE__ = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}' ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: SCREAMING_SNAKE_CASE__ = state_dict[f'{layer}'] if args.vocab_transform: for w in ["weight", "bias"]: SCREAMING_SNAKE_CASE__ = state_dict[f'lm_head.dense.{w}'] SCREAMING_SNAKE_CASE__ = state_dict[f'lm_head.layer_norm.{w}'] elif args.model_type == "gpt2": for w in ["weight", "bias"]: SCREAMING_SNAKE_CASE__ = state_dict[f'{prefix}.ln_f.{w}'] SCREAMING_SNAKE_CASE__ = state_dict["lm_head.weight"] print(f'N layers selected for distillation: {std_idx}') print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}') print(f'Save transferred checkpoint to {args.dump_checkpoint}.') torch.save(compressed_sd, args.dump_checkpoint)
46
'''simple docstring''' import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict , __lowercase : str , __lowercase : Optional[Any] , __lowercase : str ) -> List[str]: '''simple docstring''' _UpperCAmelCase = TapasConfig.from_json_file(__lowercase ) # set absolute/relative position embeddings parameter _UpperCAmelCase = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase ) elif task == "WTQ": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = True # hparam_utils.py hparams _UpperCAmelCase = 0.66_4694 _UpperCAmelCase = 0.20_7951 _UpperCAmelCase = 0.12_1194 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = 0.035_2513 _UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _UpperCAmelCase = 4 _UpperCAmelCase = False # hparam_utils.py hparams _UpperCAmelCase = 36.4519 _UpperCAmelCase = 0.90_3421 _UpperCAmelCase = 222.088 _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = 0.76_3141 _UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase ) elif task == "TABFACT": _UpperCAmelCase = TapasForSequenceClassification(config=__lowercase ) elif task == "MLM": _UpperCAmelCase = TapasForMaskedLM(config=__lowercase ) elif task == "INTERMEDIATE_PRETRAINING": _UpperCAmelCase = TapasModel(config=__lowercase ) else: raise ValueError(f'Task {task} not supported.' ) print(f'Building PyTorch model from configuration: {config}' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__lowercase , __lowercase , __lowercase ) # Save pytorch-model (weights and configuration) print(f'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(__lowercase ) # Save tokenizer files print(f'Save tokenizer files to {pytorch_dump_path}' ) _UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 ) tokenizer.save_pretrained(__lowercase ) print("Used relative position embeddings:" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __SCREAMING_SNAKE_CASE :List[str] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
22
0
"""simple docstring""" from __future__ import annotations def __snake_case ( SCREAMING_SNAKE_CASE__ : int ) -> list[int]: '''simple docstring''' _UpperCAmelCase : Optional[int] = 2 _UpperCAmelCase : List[str] = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(SCREAMING_SNAKE_CASE__ ) if n > 1: factors.append(SCREAMING_SNAKE_CASE__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
202
"""simple docstring""" def __snake_case ( SCREAMING_SNAKE_CASE__ : List[str] ) -> str: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase : Dict = [], [] while len(SCREAMING_SNAKE_CASE__ ) > 1: _UpperCAmelCase , _UpperCAmelCase : int = min(SCREAMING_SNAKE_CASE__ ), max(SCREAMING_SNAKE_CASE__ ) start.append(SCREAMING_SNAKE_CASE__ ) end.append(SCREAMING_SNAKE_CASE__ ) collection.remove(SCREAMING_SNAKE_CASE__ ) collection.remove(SCREAMING_SNAKE_CASE__ ) end.reverse() return start + collection + end if __name__ == "__main__": _lowerCAmelCase : int = input("Enter numbers separated by a comma:\n").strip() _lowerCAmelCase : List[str] = [int(item) for item in user_input.split(",")] print(*merge_sort(unsorted), sep=",")
202
1
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch lowercase__ :List[Any] = "sshleifer/bart-tiny-random" lowercase__ :Union[str, Any] = "patrickvonplaten/t5-tiny-random" @require_torch class lowercase ( unittest.TestCase ): @cached_property def A__ ( self): return AutoConfig.from_pretrained(A__) def A__ ( self): lowercase , *lowercase = create_student_by_copying_alternating_layers(A__ ,tempfile.mkdtemp() ,e=1 ,d=1) self.assertEqual(student.config.num_hidden_layers ,1) def A__ ( self): lowercase , *lowercase = create_student_by_copying_alternating_layers(A__ ,tempfile.mkdtemp() ,e=1 ,d=A__) def A__ ( self): lowercase , *lowercase = create_student_by_copying_alternating_layers(A__ ,tempfile.mkdtemp() ,e=1 ,d=A__) self.assertEqual(student.config.encoder_layers ,1) self.assertEqual(student.config.decoder_layers ,self.teacher_config.encoder_layers) def A__ ( self): lowercase , *lowercase = create_student_by_copying_alternating_layers(A__ ,tempfile.mkdtemp() ,e=1 ,d=1) self.assertEqual(student.config.encoder_layers ,1) self.assertEqual(student.config.decoder_layers ,1) def A__ ( self): with self.assertRaises(A__): create_student_by_copying_alternating_layers(A__ ,tempfile.mkdtemp() ,e=A__ ,d=A__)
101
import os import sys lowercase__ :Tuple = os.path.join(os.path.dirname(__file__), "src") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) lowercase__ :List[Any] = [ "torch", "numpy", "tokenizers", "filelock", "requests", "tqdm", "regex", "sentencepiece", "sacremoses", "importlib_metadata", "huggingface_hub", ] @add_start_docstrings(AutoConfig.__doc__ ) def UpperCamelCase ( *lowerCAmelCase__ , **lowerCAmelCase__ ): '''simple docstring''' return AutoConfig.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def UpperCamelCase ( *lowerCAmelCase__ , **lowerCAmelCase__ ): '''simple docstring''' return AutoTokenizer.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ ) @add_start_docstrings(AutoModel.__doc__ ) def UpperCamelCase ( *lowerCAmelCase__ , **lowerCAmelCase__ ): '''simple docstring''' return AutoModel.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def UpperCamelCase ( *lowerCAmelCase__ , **lowerCAmelCase__ ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def UpperCamelCase ( *lowerCAmelCase__ , **lowerCAmelCase__ ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def UpperCamelCase ( *lowerCAmelCase__ , **lowerCAmelCase__ ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def UpperCamelCase ( *lowerCAmelCase__ , **lowerCAmelCase__ ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
101
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list: '''simple docstring''' if len(lowerCamelCase__ ) <= 1: return lst lowercase_ = 1 while i < len(lowerCamelCase__ ): if lst[i - 1] <= lst[i]: i += 1 else: lowercase_ = lst[i], lst[i - 1] i -= 1 if i == 0: lowercase_ = 1 return lst if __name__ == "__main__": UpperCAmelCase : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip() UpperCAmelCase : str = [int(item) for item in user_input.split(",")] print(gnome_sort(unsorted))
354
"""simple docstring""" import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class SCREAMING_SNAKE_CASE__ : def __init__( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any]=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Tuple=9_9 , lowerCAmelCase_ : List[str]=6_4 , lowerCAmelCase_ : Optional[int]=3_2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=3_7 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : int=5_1_2 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Dict=None , ): """simple docstring""" lowercase_ = parent lowercase_ = batch_size lowercase_ = seq_length lowercase_ = is_training lowercase_ = use_input_mask lowercase_ = use_token_type_ids lowercase_ = use_labels lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = embedding_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = type_sequence_label_size lowercase_ = initializer_range lowercase_ = num_labels lowercase_ = num_choices lowercase_ = scope def _UpperCAmelCase ( self : Tuple): """simple docstring""" lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase_ = None if self.use_input_mask: lowercase_ = random_attention_mask([self.batch_size, self.seq_length]) lowercase_ = None if self.use_token_type_ids: lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) lowercase_ = None lowercase_ = None lowercase_ = None if self.use_labels: lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) lowercase_ = ids_tensor([self.batch_size] , self.num_choices) lowercase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self : Optional[int]): """simple docstring""" return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , ) def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict): """simple docstring""" lowercase_ = MegatronBertModel(config=lowerCAmelCase_) model.to(lowerCAmelCase_) model.eval() lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_) lowercase_ = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_) lowercase_ = model(lowerCAmelCase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]): """simple docstring""" lowercase_ = MegatronBertForMaskedLM(config=lowerCAmelCase_) model.to(lowerCAmelCase_) model.eval() lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]): """simple docstring""" lowercase_ = MegatronBertForCausalLM(config=lowerCAmelCase_) model.to(lowerCAmelCase_) model.eval() lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]): """simple docstring""" lowercase_ = MegatronBertForNextSentencePrediction(config=lowerCAmelCase_) model.to(lowerCAmelCase_) model.eval() lowercase_ = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2)) def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]): """simple docstring""" lowercase_ = MegatronBertForPreTraining(config=lowerCAmelCase_) model.to(lowerCAmelCase_) model.eval() lowercase_ = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , next_sentence_label=lowerCAmelCase_ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2)) def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple): """simple docstring""" lowercase_ = MegatronBertForQuestionAnswering(config=lowerCAmelCase_) model.to(lowerCAmelCase_) model.eval() lowercase_ = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]): """simple docstring""" lowercase_ = self.num_labels lowercase_ = MegatronBertForSequenceClassification(lowerCAmelCase_) model.to(lowerCAmelCase_) model.eval() lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict): """simple docstring""" lowercase_ = self.num_labels lowercase_ = MegatronBertForTokenClassification(config=lowerCAmelCase_) model.to(lowerCAmelCase_) model.eval() lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]): """simple docstring""" lowercase_ = self.num_choices lowercase_ = MegatronBertForMultipleChoice(config=lowerCAmelCase_) model.to(lowerCAmelCase_) model.eval() lowercase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() lowercase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() lowercase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() lowercase_ = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def _UpperCAmelCase ( self : List[Any]): """simple docstring""" lowercase_ = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) = config_and_inputs lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): lowercase__ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) lowercase__ = ( { "feature-extraction": MegatronBertModel, "fill-mask": MegatronBertForMaskedLM, "question-answering": MegatronBertForQuestionAnswering, "text-classification": MegatronBertForSequenceClassification, "text-generation": MegatronBertForCausalLM, "token-classification": MegatronBertForTokenClassification, "zero-shot": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) lowercase__ = True # test_resize_embeddings = False lowercase__ = False def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]=False): """simple docstring""" lowercase_ = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_) if return_labels: if model_class in get_values(lowerCAmelCase_): lowercase_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_) lowercase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_) return inputs_dict def _UpperCAmelCase ( self : int): """simple docstring""" lowercase_ = MegatronBertModelTester(self) lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7) def _UpperCAmelCase ( self : Dict): """simple docstring""" self.config_tester.run_common_tests() def _UpperCAmelCase ( self : Tuple): """simple docstring""" lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*lowerCAmelCase_) def _UpperCAmelCase ( self : str): """simple docstring""" lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCAmelCase_) def _UpperCAmelCase ( self : Optional[int]): """simple docstring""" lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCAmelCase_) def _UpperCAmelCase ( self : List[Any]): """simple docstring""" lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCAmelCase_) def _UpperCAmelCase ( self : List[str]): """simple docstring""" lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCAmelCase_) def _UpperCAmelCase ( self : Optional[int]): """simple docstring""" lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCAmelCase_) def _UpperCAmelCase ( self : Optional[int]): """simple docstring""" lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCAmelCase_) def _UpperCAmelCase ( self : Optional[Any]): """simple docstring""" lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCAmelCase_) def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int: '''simple docstring''' return torch.tensor( __lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase , ) UpperCAmelCase : Any = 1E-4 @require_torch @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @slow @unittest.skip("""Model is not available.""") def _UpperCAmelCase ( self : Optional[int]): """simple docstring""" lowercase_ = """nvidia/megatron-bert-uncased-345m""" if "MYDIR" in os.environ: lowercase_ = os.path.join(os.environ["""MYDIR"""] , lowerCAmelCase_) lowercase_ = MegatronBertModel.from_pretrained(lowerCAmelCase_) model.to(lowerCAmelCase_) model.half() lowercase_ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]]) with torch.no_grad(): lowercase_ = model(lowerCAmelCase_)[0] lowercase_ = torch.Size((1, 9, 1_0_2_4)) self.assertEqual(output.shape , lowerCAmelCase_) lowercase_ = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728] for ii in range(3): for jj in range(3): lowercase_ = output[0, ii, jj] lowercase_ = expected[3 * ii + jj] lowercase_ = """ii={} jj={} a={} b={}""".format(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) self.assertTrue(math.isclose(lowerCAmelCase_ , lowerCAmelCase_ , rel_tol=lowerCAmelCase_ , abs_tol=lowerCAmelCase_) , msg=lowerCAmelCase_)
313
0
import argparse import hashlib # hashlib is only used inside the Test class import struct class _lowerCamelCase : """simple docstring""" def __init__( self , UpperCAmelCase ) -> Any: '''simple docstring''' __snake_case : Optional[Any] = data __snake_case : Any = [0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0] @staticmethod def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> str: '''simple docstring''' return ((n << b) | (n >> (32 - b))) & 0xFFFFFFFF def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' __snake_case : int = B"\x80" + B"\x00" * (63 - (len(self.data ) + 8) % 64) __snake_case : Any = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) ) return padded_data def UpperCAmelCase ( self ) -> int: '''simple docstring''' return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]: '''simple docstring''' __snake_case : Any = list(struct.unpack(">16L" , UpperCAmelCase ) ) + [0] * 64 for i in range(16 , 80 ): __snake_case : Optional[int] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' __snake_case : Tuple = self.padding() __snake_case : Tuple = self.split_blocks() for block in self.blocks: __snake_case : Union[str, Any] = self.expand_block(UpperCAmelCase ) __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = self.h for i in range(0 , 80 ): if 0 <= i < 20: __snake_case : Union[str, Any] = (b & c) | ((~b) & d) __snake_case : int = 0x5A827999 elif 20 <= i < 40: __snake_case : Any = b ^ c ^ d __snake_case : Tuple = 0x6ED9EBA1 elif 40 <= i < 60: __snake_case : Dict = (b & c) | (b & d) | (c & d) __snake_case : Optional[Any] = 0x8F1BBCDC elif 60 <= i < 80: __snake_case : int = b ^ c ^ d __snake_case : Optional[Any] = 0xCA62C1D6 __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[int] = ( self.rotate(UpperCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0xFFFFFFFF, a, self.rotate(UpperCAmelCase , 30 ), c, d, ) __snake_case : str = ( self.h[0] + a & 0xFFFFFFFF, self.h[1] + b & 0xFFFFFFFF, self.h[2] + c & 0xFFFFFFFF, self.h[3] + d & 0xFFFFFFFF, self.h[4] + e & 0xFFFFFFFF, ) return ("{:08x}" * 5).format(*self.h ) def lowerCAmelCase__( ) -> str: __snake_case : List[str] = B"Test String" assert SHAaHash(lowercase ).final_hash() == hashlib.shaa(lowercase ).hexdigest() # noqa: S324 def lowerCAmelCase__( ) -> str: __snake_case : int = argparse.ArgumentParser(description="Process some strings or files" ) parser.add_argument( "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , ) parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" ) __snake_case : Optional[int] = parser.parse_args() __snake_case : List[Any] = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , "rb" ) as f: __snake_case : Optional[Any] = f.read() else: __snake_case : Optional[int] = bytes(lowercase , "utf-8" ) print(SHAaHash(lowercase ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
326
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" ) __snake_case : str = AutoTokenizer.from_pretrained("google/mt5-small" ) __snake_case : List[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids __snake_case : int = tokenizer("Hi I am" , return_tensors="np" ).input_ids __snake_case : Tuple = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id ) __snake_case : Tuple = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits __snake_case : str = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean() __snake_case : Any = -(labels.shape[-1] * loss.item()) __snake_case : List[str] = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
326
1
'''simple docstring''' import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def UpperCamelCase_( snake_case : Optional[int] ): '''simple docstring''' monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def UpperCamelCase_( snake_case : int ): '''simple docstring''' class _snake_case : def __init__( self , a__ ) -> Any: '''simple docstring''' snake_case_ = metric_id class _snake_case : lowerCAmelCase_ : int = [MetricMock(lowercase_ ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]] def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def UpperCamelCase_( snake_case : Any , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : List[str] ): '''simple docstring''' if "tmp_path" in args: snake_case_ = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(snake_case , match="https://huggingface.co/docs/evaluate" ): func(*snake_case )
361
'''simple docstring''' import logging import os from .state import PartialState class _snake_case ( logging.LoggerAdapter ): @staticmethod def lowerCAmelCase__ ( a__ ) -> Optional[Any]: '''simple docstring''' snake_case_ = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def lowerCAmelCase__ ( self , a__ , a__ , *a__ , **a__ ) -> List[Any]: '''simple docstring''' if PartialState._shared_state == {}: raise RuntimeError( "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." ) snake_case_ = kwargs.pop("main_process_only" , a__ ) snake_case_ = kwargs.pop("in_order" , a__ ) if self.isEnabledFor(a__ ): if self._should_log(a__ ): snake_case_ , snake_case_ = self.process(a__ , a__ ) self.logger.log(a__ , a__ , *a__ , **a__ ) elif in_order: snake_case_ = PartialState() for i in range(state.num_processes ): if i == state.process_index: snake_case_ , snake_case_ = self.process(a__ , a__ ) self.logger.log(a__ , a__ , *a__ , **a__ ) state.wait_for_everyone() def UpperCamelCase_( snake_case : str , snake_case : str = None ): '''simple docstring''' if log_level is None: snake_case_ = os.environ.get("ACCELERATE_LOG_LEVEL" , snake_case ) snake_case_ = logging.getLogger(snake_case ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(snake_case , {} )
92
0
"""simple docstring""" import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(_UpperCamelCase ) , 'Tatoeba directory does not exist.' ) class A__ ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self: str) -> int: """simple docstring""" __lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() return TatoebaConverter(save_dir=_UpperCAmelCase) @slow def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Any: """simple docstring""" self.resolver.convert_models(["heb-eng"]) @slow def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[Any]: """simple docstring""" __lowerCAmelCase : str = self.resolver.write_model_card("opus-mt-he-en" , dry_run=_UpperCAmelCase) assert mmeta["long_pair"] == "heb-eng"
269
'''simple docstring''' import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class __magic_name__ ( _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : Optional[int] = BarthezTokenizer lowerCAmelCase : int = BarthezTokenizerFast lowerCAmelCase : Dict = True lowerCAmelCase : str = True def __lowercase ( self : List[Any] ): super().setUp() _a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase ) _a : Union[str, Any] = tokenizer def __lowercase ( self : Tuple ): _a : Optional[Any] = '<pad>' _a : List[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase ) def __lowercase ( self : str ): _a : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,'<s>' ) self.assertEqual(vocab_keys[1] ,'<pad>' ) self.assertEqual(vocab_keys[-1] ,'<mask>' ) self.assertEqual(len(_UpperCAmelCase ) ,101122 ) def __lowercase ( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size ,101122 ) @require_torch def __lowercase ( self : Dict ): _a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _a : Dict = [0, 57, 3018, 70307, 91, 2] _a : Dict = self.tokenizer( _UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' ) self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase ) self.assertEqual((2, 6) ,batch.input_ids.shape ) self.assertEqual((2, 6) ,batch.attention_mask.shape ) _a : Tuple = batch.input_ids.tolist()[0] self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ): if not self.test_rust_tokenizer: return _a : str = self.get_tokenizer() _a : List[str] = self.get_rust_tokenizer() _a : Dict = 'I was born in 92000, and this is falsé.' _a : List[Any] = tokenizer.tokenize(_UpperCAmelCase ) _a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) _a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : Union[str, Any] = self.get_rust_tokenizer() _a : Any = tokenizer.encode(_UpperCAmelCase ) _a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) @slow def __lowercase ( self : Optional[int] ): # fmt: off _a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _a : Optional[Any] = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
89
0
"""simple docstring""" from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def A ( ) -> Union[str, Any]: __UpperCamelCase = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' ) __UpperCamelCase = parser.add_subparsers(help='transformers-cli command helpers' ) # Register commands ConvertCommand.register_subcommand(snake_case ) DownloadCommand.register_subcommand(snake_case ) EnvironmentCommand.register_subcommand(snake_case ) RunCommand.register_subcommand(snake_case ) ServeCommand.register_subcommand(snake_case ) UserCommands.register_subcommand(snake_case ) AddNewModelCommand.register_subcommand(snake_case ) AddNewModelLikeCommand.register_subcommand(snake_case ) LfsCommands.register_subcommand(snake_case ) PTtoTFCommand.register_subcommand(snake_case ) # Let's go __UpperCamelCase = parser.parse_args() if not hasattr(snake_case , 'func' ): parser.print_help() exit(1 ) # Run __UpperCamelCase = args.func(snake_case ) service.run() if __name__ == "__main__": main()
263
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin UpperCamelCase : List[Any] = False @skip_mps class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): lowercase = StableDiffusionAttendAndExcitePipeline lowercase = False lowercase = TEXT_TO_IMAGE_PARAMS lowercase = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} ) lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def UpperCAmelCase ( cls ): '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(__UpperCAmelCase ) @classmethod def UpperCAmelCase ( cls ): '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) __UpperCamelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , ) __UpperCamelCase = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) __UpperCamelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) __UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) __UpperCamelCase = CLIPTextModel(__UpperCAmelCase ) __UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __UpperCamelCase = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ): '''simple docstring''' if str(__UpperCAmelCase ).startswith('mps' ): __UpperCamelCase = torch.manual_seed(__UpperCAmelCase ) else: __UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) __UpperCamelCase = __UpperCamelCase = { 'prompt': 'a cat and a frog', 'token_indices': [2, 5], 'generator': generator, 'num_inference_steps': 1, 'guidance_scale': 6.0, 'output_type': 'numpy', 'max_iter_to_alter': 2, 'thresholds': {0: 0.7}, } return inputs def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = 'cpu' __UpperCamelCase = self.get_dummy_components() __UpperCamelCase = self.pipeline_class(**__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCamelCase = self.get_dummy_inputs(__UpperCAmelCase ) __UpperCamelCase = pipe(**__UpperCAmelCase ).images __UpperCamelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 64, 64, 3) ) __UpperCamelCase = np.array( [0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] ) __UpperCamelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__UpperCAmelCase , 1E-3 ) def UpperCAmelCase ( self ): '''simple docstring''' super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def UpperCAmelCase ( self ): '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def UpperCAmelCase ( self ): '''simple docstring''' self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 ) def UpperCAmelCase ( self ): '''simple docstring''' super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def UpperCAmelCase ( self ): '''simple docstring''' super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def UpperCAmelCase ( self ): '''simple docstring''' super().test_save_load_local(expected_max_difference=5E-4 ) def UpperCAmelCase ( self ): '''simple docstring''' super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): @classmethod def UpperCAmelCase ( cls ): '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(__UpperCAmelCase ) @classmethod def UpperCAmelCase ( cls ): '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = torch.manual_seed(51 ) __UpperCamelCase = StableDiffusionAttendAndExcitePipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa ) pipe.to('cuda' ) __UpperCamelCase = 'a painting of an elephant with glasses' __UpperCamelCase = [5, 7] __UpperCamelCase = pipe( prompt=__UpperCAmelCase , token_indices=__UpperCAmelCase , guidance_scale=7.5 , generator=__UpperCAmelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0] __UpperCamelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' ) assert np.abs((expected_image - image).max() ) < 5E-1
263
1
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def lowerCAmelCase_ ( A_ ,A_): UpperCamelCase__: List[str] = old_name if "patch_embed" in old_name: UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__: Dict = old_name.split(".") if layer == "0": UpperCamelCase__: Any = old_name.replace("0" ,"convolution1") elif layer == "1": UpperCamelCase__: str = old_name.replace("1" ,"batchnorm_before") elif layer == "3": UpperCamelCase__: Tuple = old_name.replace("3" ,"convolution2") else: UpperCamelCase__: Optional[Any] = old_name.replace("4" ,"batchnorm_after") if "network" in old_name and re.search(R"\d\.\d" ,A_): UpperCamelCase__: List[Any] = R"\b\d{2}\b" if bool(re.search(A_ ,A_)): UpperCamelCase__: int = re.search(R"\d\.\d\d." ,A_).group() else: UpperCamelCase__: Optional[Any] = re.search(R"\d\.\d." ,A_).group() if int(match[0]) < 6: UpperCamelCase__: Union[str, Any] = old_name.replace(A_ ,"") UpperCamelCase__: Any = trimmed_name.replace("network" ,match[0] + ".meta4D_layers.blocks." + match[2:-1]) UpperCamelCase__: Tuple = "intermediate_stages." + trimmed_name else: UpperCamelCase__: Dict = old_name.replace(A_ ,"") if int(match[2]) < num_meta4D_last_stage: UpperCamelCase__: Optional[int] = trimmed_name.replace("network" ,"meta4D_layers.blocks." + match[2]) else: UpperCamelCase__: Dict = str(int(match[2]) - num_meta4D_last_stage) UpperCamelCase__: List[Any] = trimmed_name.replace("network" ,"meta3D_layers.blocks." + layer_index) if "norm1" in old_name: UpperCamelCase__: int = trimmed_name.replace("norm1" ,"layernorm1") elif "norm2" in old_name: UpperCamelCase__: List[Any] = trimmed_name.replace("norm2" ,"layernorm2") elif "fc1" in old_name: UpperCamelCase__: Optional[Any] = trimmed_name.replace("fc1" ,"linear_in") elif "fc2" in old_name: UpperCamelCase__: List[str] = trimmed_name.replace("fc2" ,"linear_out") UpperCamelCase__: Union[str, Any] = "last_stage." + trimmed_name elif "network" in old_name and re.search(R".\d." ,A_): UpperCamelCase__: List[str] = old_name.replace("network" ,"intermediate_stages") if "fc" in new_name: UpperCamelCase__: List[str] = new_name.replace("fc" ,"convolution") elif ("norm1" in new_name) and ("layernorm1" not in new_name): UpperCamelCase__: List[Any] = new_name.replace("norm1" ,"batchnorm_before") elif ("norm2" in new_name) and ("layernorm2" not in new_name): UpperCamelCase__: int = new_name.replace("norm2" ,"batchnorm_after") if "proj" in new_name: UpperCamelCase__: Union[str, Any] = new_name.replace("proj" ,"projection") if "dist_head" in new_name: UpperCamelCase__: Any = new_name.replace("dist_head" ,"distillation_classifier") elif "head" in new_name: UpperCamelCase__: Dict = new_name.replace("head" ,"classifier") elif "patch_embed" in new_name: UpperCamelCase__: List[str] = "efficientformer." + new_name elif new_name == "norm.weight" or new_name == "norm.bias": UpperCamelCase__: List[Any] = new_name.replace("norm" ,"layernorm") UpperCamelCase__: Optional[int] = "efficientformer." + new_name else: UpperCamelCase__: str = "efficientformer.encoder." + new_name return new_name def lowerCAmelCase_ ( A_ ,A_): for key in checkpoint.copy().keys(): UpperCamelCase__: Optional[int] = checkpoint.pop(A_) UpperCamelCase__: Dict = val return checkpoint def lowerCAmelCase_ ( ): UpperCamelCase__: Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCamelCase__: List[str] = Image.open(requests.get(A_ ,stream=A_).raw) return image def lowerCAmelCase_ ( A_ ,A_ ,A_ ,A_): UpperCamelCase__: List[Any] = torch.load(A_ ,map_location="cpu")["model"] UpperCamelCase__: List[Any] = EfficientFormerConfig.from_json_file(A_) UpperCamelCase__: Dict = EfficientFormerForImageClassificationWithTeacher(A_) UpperCamelCase__: int = "_".join(checkpoint_path.split("/")[-1].split(".")[0].split("_")[:-1]) UpperCamelCase__: List[Any] = config.depths[-1] - config.num_metaad_blocks + 1 UpperCamelCase__: Optional[int] = convert_torch_checkpoint(A_ ,A_) model.load_state_dict(A_) model.eval() UpperCamelCase__: Optional[int] = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } # prepare image UpperCamelCase__: Union[str, Any] = prepare_img() UpperCamelCase__: Optional[Any] = 2_56 UpperCamelCase__: Any = 2_24 UpperCamelCase__: List[str] = EfficientFormerImageProcessor( size={"shortest_edge": image_size} ,crop_size={"height": crop_size, "width": crop_size} ,resample=pillow_resamplings["bicubic"] ,) UpperCamelCase__: Optional[Any] = processor(images=A_ ,return_tensors="pt").pixel_values # original processing pipeline UpperCamelCase__: Tuple = Compose( [ Resize(A_ ,interpolation=pillow_resamplings["bicubic"]), CenterCrop(A_), ToTensor(), Normalize(A_ ,A_), ]) UpperCamelCase__: Tuple = image_transforms(A_).unsqueeze(0) assert torch.allclose(A_ ,A_) UpperCamelCase__: List[str] = model(A_) UpperCamelCase__: Dict = outputs.logits UpperCamelCase__: int = (1, 10_00) if "l1" in model_name: UpperCamelCase__: Optional[Any] = torch.Tensor( [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328]) assert torch.allclose(logits[0, :10] ,A_ ,atol=1e-3) assert logits.shape == expected_shape elif "l3" in model_name: UpperCamelCase__: List[str] = torch.Tensor( [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127]) assert torch.allclose(logits[0, :10] ,A_ ,atol=1e-3) assert logits.shape == expected_shape elif "l7" in model_name: UpperCamelCase__: str = torch.Tensor( [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878]) assert logits.shape == expected_shape else: raise ValueError( F"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7") # Save Checkpoints Path(A_).mkdir(exist_ok=A_) model.save_pretrained(A_) print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}") processor.save_pretrained(A_) print(F"Processor successfuly saved at {pytorch_dump_path}") if push_to_hub: print("Pushing model to the hub...") model.push_to_hub( repo_id=F"Bearnardd/{pytorch_dump_path}" ,commit_message="Add model" ,use_temp_dir=A_ ,) processor.push_to_hub( repo_id=F"Bearnardd/{pytorch_dump_path}" ,commit_message="Add image processor" ,use_temp_dir=A_ ,) if __name__ == "__main__": A__: Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to EfficientFormer pytorch checkpoint.''', ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The json file for EfficientFormer model config.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') parser.add_argument( '''--no-push_to_hub''', dest='''push_to_hub''', action='''store_false''', help='''Do not push model and image processor to the hub''', ) parser.set_defaults(push_to_hub=True) A__: Optional[Any] = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
149
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def lowerCAmelCase_ ( A_ ,A_ ,A_): UpperCamelCase__: Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") UpperCamelCase__: Dict = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(A_): os.makedirs(A_) UpperCamelCase__: Optional[Any] = model.state_dict() def to_tf_var_name(A_): for patt, repl in iter(A_): UpperCamelCase__: Optional[Any] = name.replace(A_ ,A_) return F"bert/{name}" def create_tf_var(A_ ,A_ ,A_): UpperCamelCase__: Any = tf.dtypes.as_dtype(tensor.dtype) UpperCamelCase__: int = tf.get_variable(dtype=A_ ,shape=tensor.shape ,name=A_ ,initializer=tf.zeros_initializer()) session.run(tf.variables_initializer([tf_var])) session.run(A_) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: UpperCamelCase__: List[Any] = to_tf_var_name(A_) UpperCamelCase__: List[str] = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose): UpperCamelCase__: List[Any] = torch_tensor.T UpperCamelCase__: int = create_tf_var(tensor=A_ ,name=A_ ,session=A_) tf.keras.backend.set_value(A_ ,A_) UpperCamelCase__: Optional[Any] = session.run(A_) print(F"Successfully created {tf_name}: {np.allclose(A_ ,A_)}") UpperCamelCase__: Tuple = tf.train.Saver(tf.trainable_variables()) saver.save(A_ ,os.path.join(A_ ,model_name.replace("-" ,"_") + ".ckpt")) def lowerCAmelCase_ ( A_=None): UpperCamelCase__: Tuple = argparse.ArgumentParser() parser.add_argument("--model_name" ,type=A_ ,required=A_ ,help="model name e.g. bert-base-uncased") parser.add_argument( "--cache_dir" ,type=A_ ,default=A_ ,required=A_ ,help="Directory containing pytorch model") parser.add_argument("--pytorch_model_path" ,type=A_ ,required=A_ ,help="/path/to/<pytorch-model-name>.bin") parser.add_argument("--tf_cache_dir" ,type=A_ ,required=A_ ,help="Directory in which to save tensorflow model") UpperCamelCase__: Any = parser.parse_args(A_) UpperCamelCase__: List[Any] = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path) ,cache_dir=args.cache_dir ,) convert_pytorch_checkpoint_to_tf(model=A_ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name) if __name__ == "__main__": main()
149
1
"""simple docstring""" from bisect import bisect from itertools import accumulate def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_ ) -> str: """simple docstring""" a = sorted(zip(__lowerCamelCase, __lowerCamelCase ), key=lambda snake_case_ : x[0] / x[1], reverse=__lowerCamelCase ) a , a = [i[0] for i in r], [i[1] for i in r] a = list(accumulate(__lowerCamelCase ) ) a = bisect(__lowerCamelCase, __lowerCamelCase ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
368
from __future__ import annotations from collections.abc import Sequence from typing import Literal def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> str | Literal[False]: """simple docstring""" a = list(snake_case_ ) a = list(snake_case_ ) a = 0 for i in range(len(snake_case_ ) ): if lista[i] != lista[i]: count += 1 a = '''_''' if count > 1: return False else: return "".join(snake_case_ ) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> list[str]: """simple docstring""" a = [] while True: a = ['''$'''] * len(snake_case_ ) a = [] for i in range(len(snake_case_ ) ): for j in range(i + 1, len(snake_case_ ) ): a = compare_string(binary[i], binary[j] ) if k is False: a = '''*''' a = '''*''' temp.append('''X''' ) for i in range(len(snake_case_ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(snake_case_ ) == 0: return pi a = list(set(snake_case_ ) ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[str]: """simple docstring""" a = [] for minterm in minterms: a = '''''' for _ in range(snake_case_ ): a = str(minterm % 2 ) + string minterm //= 2 temp.append(snake_case_ ) return temp def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> bool: """simple docstring""" a = list(snake_case_ ) a = list(snake_case_ ) a = 0 for i in range(len(snake_case_ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[str]: """simple docstring""" a = [] a = [0] * len(snake_case_ ) for i in range(len(chart[0] ) ): a = 0 a = -1 for j in range(len(snake_case_ ) ): if chart[j][i] == 1: count += 1 a = j if count == 1: a = 1 for i in range(len(snake_case_ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(snake_case_ ) ): a = 0 temp.append(prime_implicants[i] ) while True: a = 0 a = -1 a = 0 for i in range(len(snake_case_ ) ): a = chart[i].count(1 ) if count_n > max_n: a = count_n a = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(snake_case_ ) ): a = 0 def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[list[int]]: """simple docstring""" a = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )] for i in range(len(snake_case_ ) ): a = prime_implicants[i].count('''_''' ) for j in range(len(snake_case_ ) ): if is_for_table(prime_implicants[i], binary[j], snake_case_ ): a = 1 return chart def SCREAMING_SNAKE_CASE__ ( ) -> None: """simple docstring""" a = int(input('''Enter the no. of variables\n''' ) ) a = [ float(snake_case_ ) for x in input( '''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split() ] a = decimal_to_binary(snake_case_, snake_case_ ) a = check(snake_case_ ) print('''Prime Implicants are:''' ) print(snake_case_ ) a = prime_implicant_chart(snake_case_, snake_case_ ) a = selection(snake_case_, snake_case_ ) print('''Essential Prime Implicants are:''' ) print(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
330
0
'''simple docstring''' def a_ ( __snake_case : list[int] ) -> float: """simple docstring""" if not nums: # Makes sure that the list is not empty raise ValueError('''List is empty''' ) lowerCamelCase_ =sum(__snake_case ) / len(__snake_case ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
75
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() snake_case__ : Any = logging.get_logger(__name__) def _snake_case ( _snake_case : List[Any] , _snake_case : Tuple=False ): lowerCAmelCase : List[str] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCAmelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def _snake_case ( _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Tuple=False ): for i in range(config.num_hidden_layers ): if base_model: lowerCAmelCase : Optional[int] = '''''' else: lowerCAmelCase : Union[str, Any] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase : List[Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) lowerCAmelCase : Tuple = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase : Optional[Any] = in_proj_weight[ : config.hidden_size, : ] lowerCAmelCase : Tuple = in_proj_bias[: config.hidden_size] lowerCAmelCase : Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase : Tuple = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase : Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] lowerCAmelCase : List[Any] = in_proj_bias[-config.hidden_size :] def _snake_case ( _snake_case : Tuple ): lowerCAmelCase : List[Any] = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(_snake_case , _snake_case ) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : List[Any] ): lowerCAmelCase : Optional[int] = dct.pop(_snake_case ) lowerCAmelCase : Union[str, Any] = val def _snake_case ( ): lowerCAmelCase : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase : Any = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) return im @torch.no_grad() def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[Any] ): lowerCAmelCase : Any = ViTConfig() lowerCAmelCase : Any = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": lowerCAmelCase : List[str] = True lowerCAmelCase : int = int(vit_name[-12:-10] ) lowerCAmelCase : List[Any] = int(vit_name[-9:-6] ) else: lowerCAmelCase : str = 1000 lowerCAmelCase : Optional[int] = '''huggingface/label-files''' lowerCAmelCase : Any = '''imagenet-1k-id2label.json''' lowerCAmelCase : Optional[Any] = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase : Optional[Any] = {int(_snake_case ): v for k, v in idalabel.items()} lowerCAmelCase : Dict = idalabel lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()} lowerCAmelCase : List[str] = int(vit_name[-6:-4] ) lowerCAmelCase : int = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('''tiny''' ): lowerCAmelCase : str = 192 lowerCAmelCase : int = 768 lowerCAmelCase : List[str] = 12 lowerCAmelCase : str = 3 elif vit_name[9:].startswith('''small''' ): lowerCAmelCase : List[str] = 384 lowerCAmelCase : Optional[int] = 1536 lowerCAmelCase : int = 12 lowerCAmelCase : str = 6 else: pass else: if vit_name[4:].startswith('''small''' ): lowerCAmelCase : List[str] = 768 lowerCAmelCase : Dict = 2304 lowerCAmelCase : Dict = 8 lowerCAmelCase : Tuple = 8 elif vit_name[4:].startswith('''base''' ): pass elif vit_name[4:].startswith('''large''' ): lowerCAmelCase : Union[str, Any] = 1024 lowerCAmelCase : List[Any] = 4096 lowerCAmelCase : Union[str, Any] = 24 lowerCAmelCase : Any = 16 elif vit_name[4:].startswith('''huge''' ): lowerCAmelCase : Any = 1280 lowerCAmelCase : str = 5120 lowerCAmelCase : Tuple = 32 lowerCAmelCase : Tuple = 16 # load original model from timm lowerCAmelCase : Any = timm.create_model(_snake_case , pretrained=_snake_case ) timm_model.eval() # load state_dict of original model, remove and rename some keys lowerCAmelCase : int = timm_model.state_dict() if base_model: remove_classification_head_(_snake_case ) lowerCAmelCase : Optional[Any] = create_rename_keys(_snake_case , _snake_case ) for src, dest in rename_keys: rename_key(_snake_case , _snake_case , _snake_case ) read_in_q_k_v(_snake_case , _snake_case , _snake_case ) # load HuggingFace model if vit_name[-5:] == "in21k": lowerCAmelCase : Any = ViTModel(_snake_case ).eval() else: lowerCAmelCase : Any = ViTForImageClassification(_snake_case ).eval() model.load_state_dict(_snake_case ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: lowerCAmelCase : Dict = DeiTImageProcessor(size=config.image_size ) else: lowerCAmelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size ) lowerCAmelCase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='''pt''' ) lowerCAmelCase : Dict = encoding['''pixel_values'''] lowerCAmelCase : List[Any] = model(_snake_case ) if base_model: lowerCAmelCase : Dict = timm_model.forward_features(_snake_case ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_snake_case , outputs.pooler_output , atol=1E-3 ) else: lowerCAmelCase : Dict = timm_model(_snake_case ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_snake_case , outputs.logits , atol=1E-3 ) Path(_snake_case ).mkdir(exist_ok=_snake_case ) print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_snake_case ) if __name__ == "__main__": snake_case__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_patch16_224''', type=str, help='''Name of the ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) snake_case__ : int = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
60
0
import math import tensorflow as tf from packaging import version def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Any = tf.convert_to_tensor(_lowercase ) UpperCAmelCase_ : Optional[Any] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Any = tf.convert_to_tensor(_lowercase ) UpperCAmelCase_ : Tuple = tf.cast(math.pi , x.dtype ) UpperCAmelCase_ : str = tf.cast(0.04_4715 , x.dtype ) UpperCAmelCase_ : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_lowercase , 3 )) )) return x * cdf def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[str] = tf.convert_to_tensor(_lowercase ) return x * tf.tanh(tf.math.softplus(_lowercase ) ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[str] = tf.convert_to_tensor(_lowercase ) UpperCAmelCase_ : str = tf.cast(0.04_4715 , x.dtype ) UpperCAmelCase_ : Optional[int] = tf.cast(0.79_7884_5608 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : str = tf.convert_to_tensor(_lowercase ) UpperCAmelCase_ : List[str] = tf.cast(1.702 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' return tf.clip_by_value(_gelu(_lowercase ) , -10 , 10 ) def lowerCamelCase__ ( _lowercase , _lowercase=-1 ): '''simple docstring''' UpperCAmelCase_ : Any = tf.split(_lowercase , 2 , axis=_lowercase ) return a * tf.math.sigmoid(_lowercase ) if version.parse(tf.version.VERSION) >= version.parse('2.4'): def lowerCamelCase__ ( _lowercase ): '''simple docstring''' return tf.keras.activations.gelu(_lowercase , approximate=_lowercase ) __a = tf.keras.activations.gelu __a = approximate_gelu_wrap else: __a = _gelu __a = _gelu_new __a = { 'gelu': gelu, 'gelu_10': gelu_aa, 'gelu_fast': gelu_fast, 'gelu_new': gelu_new, 'glu': glu, 'mish': mish, 'quick_gelu': quick_gelu, 'relu': tf.keras.activations.relu, 'sigmoid': tf.keras.activations.sigmoid, 'silu': tf.keras.activations.swish, 'swish': tf.keras.activations.swish, 'tanh': tf.keras.activations.tanh, } def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(f'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
371
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType __a = logging.get_logger(__name__) __a = { 'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json', } class __a( _a ): """simple docstring""" lowerCAmelCase = '''layoutlmv3''' def __init__( self ,_SCREAMING_SNAKE_CASE=50_265 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=64 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=224 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Dict: super().__init__( vocab_size=_SCREAMING_SNAKE_CASE ,hidden_size=_SCREAMING_SNAKE_CASE ,num_hidden_layers=_SCREAMING_SNAKE_CASE ,num_attention_heads=_SCREAMING_SNAKE_CASE ,intermediate_size=_SCREAMING_SNAKE_CASE ,hidden_act=_SCREAMING_SNAKE_CASE ,hidden_dropout_prob=_SCREAMING_SNAKE_CASE ,attention_probs_dropout_prob=_SCREAMING_SNAKE_CASE ,max_position_embeddings=_SCREAMING_SNAKE_CASE ,type_vocab_size=_SCREAMING_SNAKE_CASE ,initializer_range=_SCREAMING_SNAKE_CASE ,layer_norm_eps=_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,) UpperCAmelCase_ : Dict = max_ad_position_embeddings UpperCAmelCase_ : Any = coordinate_size UpperCAmelCase_ : Tuple = shape_size UpperCAmelCase_ : Optional[int] = has_relative_attention_bias UpperCAmelCase_ : Union[str, Any] = rel_pos_bins UpperCAmelCase_ : Dict = max_rel_pos UpperCAmelCase_ : Union[str, Any] = has_spatial_attention_bias UpperCAmelCase_ : Any = rel_ad_pos_bins UpperCAmelCase_ : Tuple = max_rel_ad_pos UpperCAmelCase_ : List[str] = text_embed UpperCAmelCase_ : int = visual_embed UpperCAmelCase_ : int = input_size UpperCAmelCase_ : Dict = num_channels UpperCAmelCase_ : int = patch_size UpperCAmelCase_ : Dict = classifier_dropout class __a( _a ): """simple docstring""" lowerCAmelCase = version.parse('''1.12''' ) @property def a__ ( self ) -> Mapping[str, Mapping[int, str]]: # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}), ('''bbox''', {0: '''batch''', 1: '''sequence'''}), ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) else: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ('''bbox''', {0: '''batch''', 1: '''sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}), ('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}), ] ) @property def a__ ( self ) -> float: return 1e-5 @property def a__ ( self ) -> int: return 12 def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 40 ,_SCREAMING_SNAKE_CASE = 40 ,) -> Mapping[str, Any]: setattr(processor.image_processor ,'''apply_ocr''' ,_SCREAMING_SNAKE_CASE ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCAmelCase_ : List[str] = compute_effective_axis_dimension( _SCREAMING_SNAKE_CASE ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCAmelCase_ : Optional[Any] = processor.tokenizer.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = compute_effective_axis_dimension( _SCREAMING_SNAKE_CASE ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=_SCREAMING_SNAKE_CASE ) # Generate dummy inputs according to compute batch and sequence UpperCAmelCase_ : Optional[Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes UpperCAmelCase_ : Tuple = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) UpperCAmelCase_ : Union[str, Any] = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = dict( processor( _SCREAMING_SNAKE_CASE ,text=_SCREAMING_SNAKE_CASE ,boxes=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,) ) return inputs
235
0
from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]: __lowerCamelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) # We need to create solution object to save path. __lowerCamelCase : Tuple = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )] __lowerCamelCase : Optional[Any] = run_maze(SCREAMING_SNAKE_CASE__ , 0 , 0 , SCREAMING_SNAKE_CASE__ ) if solved: print('\n'.join(str(SCREAMING_SNAKE_CASE__ ) for row in solutions ) ) else: print('No solution exists!' ) return solved def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: __lowerCamelCase : Any = len(SCREAMING_SNAKE_CASE__ ) # Final check point. if i == j == (size - 1): __lowerCamelCase : Any = 1 return True __lowerCamelCase : int = (not i < 0) and (not j < 0) # Check lower bounds __lowerCamelCase : Any = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. __lowerCamelCase : List[str] = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited __lowerCamelCase : List[str] = 1 # check for directions if ( run_maze(SCREAMING_SNAKE_CASE__ , i + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j + 1 , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - 1 , SCREAMING_SNAKE_CASE__ ) ): return True __lowerCamelCase : Any = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
73
from ..utils import DummyObject, requires_backends class snake_case_ ( metaclass=__A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = ["note_seq"] def __init__( self : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Any: requires_backends(self , ['''note_seq'''] ) @classmethod def snake_case__( cls : int , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->int: requires_backends(cls , ['''note_seq'''] ) @classmethod def snake_case__( cls : Dict , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[str]: requires_backends(cls , ['''note_seq'''] )
8
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A : Optional[Any] = 'Salesforce/blip-image-captioning-base' A : str = ( 'This is a tool that generates a description of an image. It takes an input named `image` which should be the ' 'image to caption, and returns a text that contains the description in English.' ) A : List[Any] = 'image_captioner' A : int = AutoModelForVisionaSeq A : Union[str, Any] = ['image'] A : Optional[int] = ['text'] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: requires_backends(self , ["vision"] ) super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any: return self.pre_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ) def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Tuple: return self.model.generate(**_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]: return self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )[0].strip()
362
def lowerCAmelCase__ ( _a : int = 50 ): snake_case_ : Union[str, Any] = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F"""{solution() = }""")
36
0
import operator as op def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple ) -> int: """simple docstring""" UpperCamelCase :Dict = [] UpperCamelCase :Union[str, Any] = lambda __magic_name__ , __magic_name__ : int(x / y ) # noqa: E731 integer division operation UpperCamelCase :Optional[Any] = { """^""": op.pow, """*""": op.mul, """/""": div, """+""": op.add, """-""": op.sub, } # operators & their respective operation # print table header print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ ) print("""-""" * (30 + len(__magic_name__ )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(__magic_name__ ) # append x to stack # output in tabular format print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(__magic_name__ ) , sep=""" | """ ) else: UpperCamelCase :List[Any] = stack.pop() # pop stack # output in tabular format print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(__magic_name__ ) , sep=""" | """ ) UpperCamelCase :Union[str, Any] = stack.pop() # pop stack # output in tabular format print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(__magic_name__ ) , sep=""" | """ ) stack.append( str(opr[x](int(__magic_name__ ) , int(__magic_name__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(__magic_name__ ) , sep=""" | """ , ) return int(stack[0] ) if __name__ == "__main__": UpperCAmelCase_ : Tuple = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''') print('''\n\tResult = ''', solve(Postfix))
38
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup UpperCAmelCase_ : Any = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( _a ): def __init__( self : Optional[int] , **__lowerCamelCase : Optional[int] ): requires_backends(self , ["""bs4"""] ) super().__init__(**__lowerCamelCase ) def _A ( self : List[str] , __lowerCamelCase : Any ): UpperCamelCase :Optional[int] = [] UpperCamelCase :List[str] = [] UpperCamelCase :Union[str, Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag UpperCamelCase :Optional[Any] = parent.find_all(child.name , recursive=__lowerCamelCase ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(__lowerCamelCase ) else next(i for i, s in enumerate(__lowerCamelCase , 1 ) if s is child ) ) UpperCamelCase :Any = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def _A ( self : Any , __lowerCamelCase : Tuple ): UpperCamelCase :Any = BeautifulSoup(__lowerCamelCase , """html.parser""" ) UpperCamelCase :Union[str, Any] = [] UpperCamelCase :Tuple = [] UpperCamelCase :Tuple = [] for element in html_code.descendants: if type(__lowerCamelCase ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue UpperCamelCase :Any = html.unescape(__lowerCamelCase ).strip() if not text_in_this_tag: continue all_doc_strings.append(__lowerCamelCase ) UpperCamelCase , UpperCamelCase :Optional[Any] = self.xpath_soup(__lowerCamelCase ) stringaxtag_seq.append(__lowerCamelCase ) stringaxsubs_seq.append(__lowerCamelCase ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError("""Number of doc strings and xtags does not correspond""" ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError("""Number of doc strings and xsubs does not correspond""" ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def _A ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ): UpperCamelCase :Tuple = """""" for tagname, subs in zip(__lowerCamelCase , __lowerCamelCase ): xpath += F"""/{tagname}""" if subs != 0: xpath += F"""[{subs}]""" return xpath def __call__( self : Any , __lowerCamelCase : Dict ): UpperCamelCase :Any = False # Check that strings has a valid type if isinstance(__lowerCamelCase , __lowerCamelCase ): UpperCamelCase :List[Any] = True elif isinstance(__lowerCamelCase , (list, tuple) ): if len(__lowerCamelCase ) == 0 or isinstance(html_strings[0] , __lowerCamelCase ): UpperCamelCase :Any = True if not valid_strings: raise ValueError( """HTML strings must of type `str`, `List[str]` (batch of examples), """ F"""but is of type {type(__lowerCamelCase )}.""" ) UpperCamelCase :str = bool(isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , __lowerCamelCase )) ) if not is_batched: UpperCamelCase :Any = [html_strings] # Get nodes + xpaths UpperCamelCase :Union[str, Any] = [] UpperCamelCase :str = [] for html_string in html_strings: UpperCamelCase , UpperCamelCase , UpperCamelCase :int = self.get_three_from_single(__lowerCamelCase ) nodes.append(__lowerCamelCase ) UpperCamelCase :int = [] for node, tag_list, sub_list in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): UpperCamelCase :str = self.construct_xpath(__lowerCamelCase , __lowerCamelCase ) xpath_strings.append(__lowerCamelCase ) xpaths.append(__lowerCamelCase ) # return as Dict UpperCamelCase :Optional[int] = {"""nodes""": nodes, """xpaths""": xpaths} UpperCamelCase :Any = BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase ) return encoded_inputs
38
1
"""simple docstring""" def _snake_case ( UpperCamelCase : int = 3 , UpperCamelCase : int = 7 , UpperCamelCase : int = 1000000 ): UpperCAmelCase : List[str] = 0 UpperCAmelCase : List[str] = 1 for current_denominator in range(1 , limit + 1 ): UpperCAmelCase : Tuple = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: UpperCAmelCase : List[str] = current_numerator UpperCAmelCase : Dict = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
76
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: A: str = None A: List[Any] = logging.get_logger(__name__) A: Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} A: Union[str, Any] = { "vocab_file": { "facebook/mbart-large-en-ro": ( "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model" ), "facebook/mbart-large-cc25": ( "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json", "facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json", }, } A: Tuple = { "facebook/mbart-large-en-ro": 1_0_2_4, "facebook/mbart-large-cc25": 1_0_2_4, } # fmt: off A: Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): __lowerCAmelCase : Tuple = VOCAB_FILES_NAMES __lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Tuple = ['input_ids', 'attention_mask'] __lowerCAmelCase : str = MBartTokenizer __lowerCAmelCase : List[int] = [] __lowerCAmelCase : List[int] = [] def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Any: '''simple docstring''' UpperCAmelCase : Union[str, Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token super().__init__( vocab_file=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) UpperCAmelCase : int = vocab_file UpperCAmelCase : Optional[int] = False if not self.vocab_file else True UpperCAmelCase : List[str] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} ) UpperCAmelCase : List[Any] = { lang_code: self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES } UpperCAmelCase : int = src_lang if src_lang is not None else """en_XX""" UpperCAmelCase : List[Any] = self.convert_tokens_to_ids(self._src_lang ) UpperCAmelCase : int = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' return self._src_lang @src_lang.setter def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None: '''simple docstring''' UpperCAmelCase : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]: '''simple docstring''' UpperCAmelCase : str = [self.sep_token_id] UpperCAmelCase : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) UpperCAmelCase : List[str] = src_lang UpperCAmelCase : Union[str, Any] = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = tgt_lang_id return inputs def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "en_XX" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "ro_RO" , **_SCREAMING_SNAKE_CASE , ) -> BatchEncoding: '''simple docstring''' UpperCAmelCase : int = src_lang UpperCAmelCase : Dict = tgt_lang return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None: '''simple docstring''' UpperCAmelCase : Any = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Any = [] UpperCAmelCase : Tuple = [self.eos_token_id, self.cur_lang_code] UpperCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCAmelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCAmelCase : str = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None: '''simple docstring''' UpperCAmelCase : Tuple = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : int = [] UpperCAmelCase : Optional[int] = [self.eos_token_id, self.cur_lang_code] UpperCAmelCase : str = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCAmelCase : int = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error(F"Vocabulary path ({save_directory}) should be a directory." ) return UpperCAmelCase : Any = os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
76
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor lowercase_ = logging.get_logger(__name__) class a_ ( snake_case_ ): '''simple docstring''' def __init__( self , *A , **A ) -> None: warnings.warn( """The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use PerceiverImageProcessor instead.""" , A , ) super().__init__(*A , **A )
58
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Optional[int] =logging.get_logger(__name__) lowerCamelCase : Dict ={ '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json''' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class __a ( A__ ): _lowerCAmelCase : Tuple = '''speech_to_text_2''' _lowerCAmelCase : Dict = ['''past_key_values'''] _lowerCAmelCase : Any = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any]=1_00_00 , SCREAMING_SNAKE_CASE : List[Any]=6 , SCREAMING_SNAKE_CASE : List[Any]=20_48 , SCREAMING_SNAKE_CASE : Dict=4 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : List[Any]="relu" , SCREAMING_SNAKE_CASE : Tuple=2_56 , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : Tuple=0.0 , SCREAMING_SNAKE_CASE : Any=0.0 , SCREAMING_SNAKE_CASE : int=0.0_2 , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=1 , SCREAMING_SNAKE_CASE : List[Any]=0 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : str=10_24 , **SCREAMING_SNAKE_CASE : int , ): '''simple docstring''' UpperCamelCase__ : int = vocab_size UpperCamelCase__ : Optional[Any] = d_model UpperCamelCase__ : Optional[Any] = decoder_ffn_dim UpperCamelCase__ : str = decoder_layers UpperCamelCase__ : Any = decoder_attention_heads UpperCamelCase__ : List[str] = dropout UpperCamelCase__ : int = attention_dropout UpperCamelCase__ : Optional[int] = activation_dropout UpperCamelCase__ : Union[str, Any] = activation_function UpperCamelCase__ : Tuple = init_std UpperCamelCase__ : Optional[int] = decoder_layerdrop UpperCamelCase__ : Dict = use_cache UpperCamelCase__ : str = decoder_layers UpperCamelCase__ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True UpperCamelCase__ : Optional[Any] = max_target_positions super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
189
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _UpperCamelCase: Optional[int] = { "configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"], "tokenization_xlm": ["XLMTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase: Dict = [ "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", "XLMForSequenceClassification", "XLMForTokenClassification", "XLMModel", "XLMPreTrainedModel", "XLMWithLMHeadModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase: int = [ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMMainLayer", "TFXLMModel", "TFXLMPreTrainedModel", "TFXLMWithLMHeadModel", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys _UpperCamelCase: Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
354
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging _UpperCamelCase: Optional[int] = logging.get_logger(__name__) _UpperCamelCase: Union[str, Any] = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class a__ ( SCREAMING_SNAKE_CASE__ ): _lowerCamelCase = 'gpt_neo' _lowerCamelCase = ['past_key_values'] _lowerCamelCase = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self : Optional[Any], lowerCAmelCase : int=50257, lowerCAmelCase : Tuple=2048, lowerCAmelCase : int=2048, lowerCAmelCase : Tuple=24, lowerCAmelCase : Optional[Any]=[[["global", "local"], 12]], lowerCAmelCase : Optional[int]=16, lowerCAmelCase : Optional[Any]=None, lowerCAmelCase : Dict=256, lowerCAmelCase : Optional[int]="gelu_new", lowerCAmelCase : Any=0.0, lowerCAmelCase : Dict=0.0, lowerCAmelCase : Optional[Any]=0.0, lowerCAmelCase : Dict=0.1, lowerCAmelCase : List[Any]=1e-5, lowerCAmelCase : Optional[Any]=0.02, lowerCAmelCase : Dict=True, lowerCAmelCase : int=50256, lowerCAmelCase : Optional[Any]=50256, **lowerCAmelCase : Any, ) -> Optional[Any]: lowercase : List[Any] = vocab_size lowercase : Optional[Any] = max_position_embeddings lowercase : Dict = hidden_size lowercase : Optional[Any] = num_layers lowercase : str = num_heads lowercase : Optional[int] = intermediate_size lowercase : List[str] = window_size lowercase : Dict = activation_function lowercase : Dict = resid_dropout lowercase : int = embed_dropout lowercase : Optional[Any] = attention_dropout lowercase : Tuple = classifier_dropout lowercase : Optional[int] = layer_norm_epsilon lowercase : Dict = initializer_range lowercase : Optional[Any] = use_cache lowercase : Union[str, Any] = bos_token_id lowercase : int = eos_token_id lowercase : str = attention_types lowercase : int = self.expand_attention_types_params(lowerCAmelCase ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' f'''`config.num_layers = {self.num_layers}`. ''' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase ) @staticmethod def lowercase ( lowerCAmelCase : str ) -> Optional[Any]: lowercase : Dict = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int: '''simple docstring''' import torch lowercase : Dict = input.size() lowercase : Optional[int] = len(_UpperCAmelCase ) lowercase : str = shape[dimension] lowercase : Optional[Any] = torch.arange(0 , _UpperCAmelCase , _UpperCAmelCase ) lowercase : List[str] = torch.div(sizedim - size , _UpperCAmelCase , rounding_mode='floor' ) + 1 lowercase : Any = torch.arange(_UpperCAmelCase ) + low_indices[:min_length][:, None] lowercase : List[Any] = [slice(_UpperCAmelCase )] * rank lowercase : int = indices lowercase : Optional[Any] = input[s] lowercase : str = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(_UpperCAmelCase ) def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Any: '''simple docstring''' import torch lowercase : int = torch.arange(1 , _UpperCAmelCase ) lowercase : List[str] = torch.remainder(_UpperCAmelCase , _UpperCAmelCase ) lowercase : Optional[int] = remainders == 0 lowercase : Tuple = candidates[divisor_indices] lowercase : Any = torch.max(_UpperCAmelCase ) return largest_divisor, torch.div(_UpperCAmelCase , _UpperCAmelCase , rounding_mode='floor' ) class a__ ( SCREAMING_SNAKE_CASE__ ): @property def lowercase ( self : int ) -> Mapping[str, Mapping[int, str]]: lowercase : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase, direction='inputs' ) lowercase : Dict = {0: 'batch', 1: 'past_sequence + sequence'} else: lowercase : List[str] = {0: 'batch', 1: 'sequence'} return common_inputs @property def lowercase ( self : int ) -> int: return self._config.num_heads def lowercase ( self : Tuple, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int = -1, lowerCAmelCase : int = -1, lowerCAmelCase : bool = False, lowerCAmelCase : Optional[TensorType] = None, ) -> Mapping[str, Any]: lowercase : Union[str, Any] = super(lowerCAmelCase, self ).generate_dummy_inputs( lowerCAmelCase, batch_size=lowerCAmelCase, seq_length=lowerCAmelCase, is_pair=lowerCAmelCase, framework=lowerCAmelCase ) # We need to order the input in the way they appears in the forward() lowercase : int = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch lowercase , lowercase : str = common_inputs['input_ids'].shape # Not using the same length for past_key_values lowercase : Tuple = seqlen + 2 lowercase : Tuple = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowercase : Any = [ (torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(self.num_layers ) ] lowercase : Optional[int] = common_inputs['attention_mask'] if self.use_past: lowercase : Optional[int] = ordered_inputs['attention_mask'].dtype lowercase : Dict = torch.cat( [ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase, lowerCAmelCase, dtype=lowerCAmelCase )], dim=1 ) return ordered_inputs @property def lowercase ( self : int ) -> int: return 13
53
0
'''simple docstring''' from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch __a = logging.get_logger(__name__) @add_end_docstrings( _a , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , ) class UpperCAmelCase_ ( _a ): """simple docstring""" def lowerCamelCase ( self : Union[str, Any] , snake_case_ : GenericTensor ): if self.framework == "tf": snake_case__ : Optional[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": snake_case__ : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ ) else: raise ValueError("""Unsupported framework""" ) return masked_index def lowerCamelCase ( self : Optional[Any] , snake_case_ : GenericTensor ): snake_case__ : List[Any] = self.get_masked_index(snake_case_ ) snake_case__ : List[Any] = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( """fill-mask""" , self.model.base_model_prefix , f"No mask_token ({self.tokenizer.mask_token}) found on the input" , ) def lowerCamelCase ( self : Tuple , snake_case_ : GenericTensor ): if isinstance(snake_case_ , snake_case_ ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(snake_case_ ) def lowerCamelCase ( self : List[Any] , snake_case_ : Any , snake_case_ : Optional[int]=None , **snake_case_ : Optional[Any] ): if return_tensors is None: snake_case__ : Tuple = self.framework snake_case__ : Optional[Any] = self.tokenizer(snake_case_ , return_tensors=snake_case_ ) self.ensure_exactly_one_mask_token(snake_case_ ) return model_inputs def lowerCamelCase ( self : str , snake_case_ : str ): snake_case__ : Union[str, Any] = self.model(**snake_case_ ) snake_case__ : Dict = model_inputs["""input_ids"""] return model_outputs def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : str=5 , snake_case_ : List[Any]=None ): # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: snake_case__ : Any = target_ids.shape[0] snake_case__ : List[Any] = model_outputs["""input_ids"""][0] snake_case__ : Optional[Any] = model_outputs["""logits"""] if self.framework == "tf": snake_case__ : Optional[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] snake_case__ : Optional[int] = outputs.numpy() snake_case__ : Optional[int] = outputs[0, masked_index, :] snake_case__ : Optional[int] = stable_softmax(snake_case_ , axis=-1 ) if target_ids is not None: snake_case__ : Optional[Any] = tf.gather_nd(tf.squeeze(snake_case_ , 0 ) , target_ids.reshape(-1 , 1 ) ) snake_case__ : Optional[int] = tf.expand_dims(snake_case_ , 0 ) snake_case__ : int = tf.math.top_k(snake_case_ , k=snake_case_ ) snake_case__ , snake_case__ : Any = topk.values.numpy(), topk.indices.numpy() else: snake_case__ : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample snake_case__ : Tuple = outputs[0, masked_index, :] snake_case__ : Tuple = logits.softmax(dim=-1 ) if target_ids is not None: snake_case__ : List[str] = probs[..., target_ids] snake_case__ , snake_case__ : List[str] = probs.topk(snake_case_ ) snake_case__ : Tuple = [] snake_case__ : List[str] = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): snake_case__ : Union[str, Any] = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place snake_case__ : Dict = input_ids.numpy().copy() if target_ids is not None: snake_case__ : Any = target_ids[p].tolist() snake_case__ : Union[str, Any] = p # Filter padding out: snake_case__ : List[str] = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back snake_case__ : str = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ ) snake_case__ : Union[str, Any] = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence} row.append(snake_case_ ) result.append(snake_case_ ) if single_mask: return result[0] return result def lowerCamelCase ( self : int , snake_case_ : Any , snake_case_ : str=None ): if isinstance(snake_case_ , snake_case_ ): snake_case__ : Union[str, Any] = [targets] try: snake_case__ : Any = self.tokenizer.get_vocab() except Exception: snake_case__ : str = {} snake_case__ : List[Any] = [] for target in targets: snake_case__ : List[str] = vocab.get(snake_case_ , snake_case_ ) if id_ is None: snake_case__ : int = self.tokenizer( snake_case_ , add_special_tokens=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , max_length=1 , truncation=snake_case_ , )["""input_ids"""] if len(snake_case_ ) == 0: logger.warning( f"The specified target token `{target}` does not exist in the model vocabulary. " """We cannot replace it with anything meaningful, ignoring it""" ) continue snake_case__ : Optional[Any] = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f"The specified target token `{target}` does not exist in the model vocabulary. " f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." ) target_ids.append(id_ ) snake_case__ : Optional[Any] = list(set(snake_case_ ) ) if len(snake_case_ ) == 0: raise ValueError("""At least one target must be provided when passed.""" ) snake_case__ : Dict = np.array(snake_case_ ) return target_ids def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Tuple=None , snake_case_ : Union[str, Any]=None ): snake_case__ : Union[str, Any] = {} if targets is not None: snake_case__ : List[str] = self.get_target_ids(snake_case_ , snake_case_ ) snake_case__ : Union[str, Any] = target_ids if top_k is not None: snake_case__ : Optional[int] = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( """fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" ) return {}, {}, postprocess_params def __call__( self : List[str] , snake_case_ : Union[str, Any] , *snake_case_ : Tuple , **snake_case_ : List[Any] ): snake_case__ : Optional[int] = super().__call__(snake_case_ , **snake_case_ ) if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) == 1: return outputs[0] return outputs
35
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase ={"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase =[ "VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTMSNModel", "ViTMSNForImageClassification", "ViTMSNPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys __UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
67
0
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig UpperCamelCase__ = logging.get_logger(__name__) # General docstring UpperCamelCase__ = 'RegNetConfig' # Base docstring UpperCamelCase__ = 'facebook/regnet-y-040' UpperCamelCase__ = [1, 1_0_8_8, 7, 7] # Image classification docstring UpperCamelCase__ = 'facebook/regnet-y-040' UpperCamelCase__ = 'tabby, tabby cat' UpperCamelCase__ = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class A ( nn.Module ): def __init__(self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[str] = "relu" , ) -> Any: """simple docstring""" super().__init__() UpperCAmelCase__ = nn.Convad( __UpperCAmelCase , __UpperCAmelCase , kernel_size=__UpperCAmelCase , stride=__UpperCAmelCase , padding=kernel_size // 2 , groups=__UpperCAmelCase , bias=__UpperCAmelCase , ) UpperCAmelCase__ = nn.BatchNormad(__UpperCAmelCase ) UpperCAmelCase__ = ACTaFN[activation] if activation is not None else nn.Identity() def lowercase_ (self : str , __UpperCAmelCase : int ) -> Tuple: """simple docstring""" UpperCAmelCase__ = self.convolution(__UpperCAmelCase ) UpperCAmelCase__ = self.normalization(__UpperCAmelCase ) UpperCAmelCase__ = self.activation(__UpperCAmelCase ) return hidden_state class A ( nn.Module ): def __init__(self : Dict , __UpperCAmelCase : RegNetConfig ) -> Optional[int]: """simple docstring""" super().__init__() UpperCAmelCase__ = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) UpperCAmelCase__ = config.num_channels def lowercase_ (self : List[Any] , __UpperCAmelCase : Optional[Any] ) -> str: """simple docstring""" UpperCAmelCase__ = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) UpperCAmelCase__ = self.embedder(__UpperCAmelCase ) return hidden_state class A ( nn.Module ): def __init__(self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 2 ) -> List[str]: """simple docstring""" super().__init__() UpperCAmelCase__ = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 , stride=__UpperCAmelCase , bias=__UpperCAmelCase ) UpperCAmelCase__ = nn.BatchNormad(__UpperCAmelCase ) def lowercase_ (self : Optional[int] , __UpperCAmelCase : Tensor ) -> Tensor: """simple docstring""" UpperCAmelCase__ = self.convolution(__UpperCAmelCase ) UpperCAmelCase__ = self.normalization(__UpperCAmelCase ) return hidden_state class A ( nn.Module ): def __init__(self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> Tuple: """simple docstring""" super().__init__() UpperCAmelCase__ = nn.AdaptiveAvgPoolad((1, 1) ) UpperCAmelCase__ = nn.Sequential( nn.Convad(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 ) , nn.Sigmoid() , ) def lowercase_ (self : int , __UpperCAmelCase : Optional[int] ) -> str: """simple docstring""" UpperCAmelCase__ = self.pooler(__UpperCAmelCase ) UpperCAmelCase__ = self.attention(__UpperCAmelCase ) UpperCAmelCase__ = hidden_state * attention return hidden_state class A ( nn.Module ): def __init__(self : str , __UpperCAmelCase : RegNetConfig , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 1 ) -> Optional[Any]: """simple docstring""" super().__init__() UpperCAmelCase__ = in_channels != out_channels or stride != 1 UpperCAmelCase__ = max(1 , out_channels // config.groups_width ) UpperCAmelCase__ = ( RegNetShortCut(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase ) if should_apply_shortcut else nn.Identity() ) UpperCAmelCase__ = nn.Sequential( RegNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase , groups=__UpperCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 , activation=__UpperCAmelCase ) , ) UpperCAmelCase__ = ACTaFN[config.hidden_act] def lowercase_ (self : Optional[int] , __UpperCAmelCase : Optional[int] ) -> int: """simple docstring""" UpperCAmelCase__ = hidden_state UpperCAmelCase__ = self.layer(__UpperCAmelCase ) UpperCAmelCase__ = self.shortcut(__UpperCAmelCase ) hidden_state += residual UpperCAmelCase__ = self.activation(__UpperCAmelCase ) return hidden_state class A ( nn.Module ): def __init__(self : str , __UpperCAmelCase : RegNetConfig , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 1 ) -> Optional[int]: """simple docstring""" super().__init__() UpperCAmelCase__ = in_channels != out_channels or stride != 1 UpperCAmelCase__ = max(1 , out_channels // config.groups_width ) UpperCAmelCase__ = ( RegNetShortCut(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase ) if should_apply_shortcut else nn.Identity() ) UpperCAmelCase__ = nn.Sequential( RegNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase , groups=__UpperCAmelCase , activation=config.hidden_act ) , RegNetSELayer(__UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 , activation=__UpperCAmelCase ) , ) UpperCAmelCase__ = ACTaFN[config.hidden_act] def lowercase_ (self : List[str] , __UpperCAmelCase : int ) -> str: """simple docstring""" UpperCAmelCase__ = hidden_state UpperCAmelCase__ = self.layer(__UpperCAmelCase ) UpperCAmelCase__ = self.shortcut(__UpperCAmelCase ) hidden_state += residual UpperCAmelCase__ = self.activation(__UpperCAmelCase ) return hidden_state class A ( nn.Module ): def __init__(self : Tuple , __UpperCAmelCase : RegNetConfig , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 2 , ) -> Any: """simple docstring""" super().__init__() UpperCAmelCase__ = RegNetXLayer if config.layer_type == "x" else RegNetYLayer UpperCAmelCase__ = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase , ) , *[layer(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for _ in range(depth - 1 )] , ) def lowercase_ (self : Tuple , __UpperCAmelCase : Optional[int] ) -> Tuple: """simple docstring""" UpperCAmelCase__ = self.layers(__UpperCAmelCase ) return hidden_state class A ( nn.Module ): def __init__(self : str , __UpperCAmelCase : RegNetConfig ) -> Tuple: """simple docstring""" super().__init__() UpperCAmelCase__ = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( __UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) UpperCAmelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(__UpperCAmelCase , config.depths[1:] ): self.stages.append(RegNetStage(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , depth=__UpperCAmelCase ) ) def lowercase_ (self : List[Any] , __UpperCAmelCase : Tensor , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention: """simple docstring""" UpperCAmelCase__ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: UpperCAmelCase__ = hidden_states + (hidden_state,) UpperCAmelCase__ = stage_module(__UpperCAmelCase ) if output_hidden_states: UpperCAmelCase__ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__UpperCAmelCase , hidden_states=__UpperCAmelCase ) class A ( UpperCAmelCase_ ): __UpperCAmelCase : Union[str, Any] = RegNetConfig __UpperCAmelCase : Dict = 'regnet' __UpperCAmelCase : List[Any] = 'pixel_values' __UpperCAmelCase : Tuple = True def lowercase_ (self : Dict , __UpperCAmelCase : List[str] ) -> Optional[Any]: """simple docstring""" if isinstance(__UpperCAmelCase , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" ) elif isinstance(__UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def lowercase_ (self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : str=False ) -> int: """simple docstring""" if isinstance(__UpperCAmelCase , __UpperCAmelCase ): UpperCAmelCase__ = value UpperCamelCase__ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' UpperCamelCase__ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , UpperCAmelCase_ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class A ( UpperCAmelCase_ ): def __init__(self : Union[str, Any] , __UpperCAmelCase : List[str] ) -> List[str]: """simple docstring""" super().__init__(__UpperCAmelCase ) UpperCAmelCase__ = config UpperCAmelCase__ = RegNetEmbeddings(__UpperCAmelCase ) UpperCAmelCase__ = RegNetEncoder(__UpperCAmelCase ) UpperCAmelCase__ = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowercase_ (self : Dict , __UpperCAmelCase : Tensor , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: """simple docstring""" UpperCAmelCase__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase__ = self.embedder(__UpperCAmelCase ) UpperCAmelCase__ = self.encoder( __UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase ) UpperCAmelCase__ = encoder_outputs[0] UpperCAmelCase__ = self.pooler(__UpperCAmelCase ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__UpperCAmelCase , pooler_output=__UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase_ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class A ( UpperCAmelCase_ ): def __init__(self : Union[str, Any] , __UpperCAmelCase : str ) -> Tuple: """simple docstring""" super().__init__(__UpperCAmelCase ) UpperCAmelCase__ = config.num_labels UpperCAmelCase__ = RegNetModel(__UpperCAmelCase ) # classification head UpperCAmelCase__ = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowercase_ (self : int , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[torch.LongTensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention: """simple docstring""" UpperCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase__ = self.regnet(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase ) UpperCAmelCase__ = outputs.pooler_output if return_dict else outputs[1] UpperCAmelCase__ = self.classifier(__UpperCAmelCase ) UpperCAmelCase__ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: UpperCAmelCase__ = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): UpperCAmelCase__ = "single_label_classification" else: UpperCAmelCase__ = "multi_label_classification" if self.config.problem_type == "regression": UpperCAmelCase__ = MSELoss() if self.num_labels == 1: UpperCAmelCase__ = loss_fct(logits.squeeze() , labels.squeeze() ) else: UpperCAmelCase__ = loss_fct(__UpperCAmelCase , __UpperCAmelCase ) elif self.config.problem_type == "single_label_classification": UpperCAmelCase__ = CrossEntropyLoss() UpperCAmelCase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": UpperCAmelCase__ = BCEWithLogitsLoss() UpperCAmelCase__ = loss_fct(__UpperCAmelCase , __UpperCAmelCase ) if not return_dict: UpperCAmelCase__ = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states )
143
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer UpperCamelCase__ = logging.getLogger(__name__) def lowerCAmelCase_ ( ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ = argparse.ArgumentParser( description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." ) parser.add_argument( "--dataset_name", type=__A, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", ) parser.add_argument( "--dataset_config", type=__A, default="wikitext-103-raw-v1", help="Configuration name of the dataset." ) parser.add_argument( "--tokenizer_name_or_path", type=__A, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", ) parser.add_argument( "--shard_size", type=__A, default=1_000, help="Number of entries to go in a single shard.", ) parser.add_argument("--split", type=__A, default="train", choices=["train", "test", "validation"] ) parser.add_argument( "--limit", default=__A, type=__A, help="Limit the number of shards (used for debugging).", ) parser.add_argument( "--max_length", type=__A, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum" " sequence length that is a multiple of 8.", ) parser.add_argument( "--output_dir", default="tf-tpu", type=__A, help="Output directory where the TFRecord shards will be saved. If the" " path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord" " shards will be directly saved to a Google Cloud Storage bucket.", ) UpperCAmelCase__ = parser.parse_args() return args def lowerCAmelCase_ ( __A ) -> Optional[int]: '''simple docstring''' def fn(__A ): return tokenizer(examples["text"] ) return fn def lowerCAmelCase_ ( __A ) -> List[Any]: '''simple docstring''' UpperCAmelCase__ = [] for i in range(len(tokenized_data["input_ids"] ) ): UpperCAmelCase__ = { "input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ), "attention_mask": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ), } UpperCAmelCase__ = tf.train.Features(feature=__A ) UpperCAmelCase__ = tf.train.Example(features=__A ) UpperCAmelCase__ = example.SerializeToString() records.append(__A ) return records def lowerCAmelCase_ ( __A ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split ) if args.limit is not None: UpperCAmelCase__ = min(len(__A ), args.limit ) UpperCAmelCase__ = dataset.select(range(__A ) ) print(f"""Limiting the dataset to {args.limit} entries.""" ) UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) UpperCAmelCase__ = os.path.join(args.output_dir, args.split ) if not os.path.exists(__A ): os.makedirs(__A ) else: UpperCAmelCase__ = os.path.join(args.output_dir, args.split ) # Tokenize the whole dataset at once. UpperCAmelCase__ = tokenize_function(__A ) UpperCAmelCase__ = dataset.map(__A, batched=__A, num_proc=4, remove_columns=["text"] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(__A ): # Concatenate all texts. UpperCAmelCase__ = {k: sum(examples[k], [] ) for k in examples.keys()} UpperCAmelCase__ = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 UpperCAmelCase__ = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. UpperCAmelCase__ = { k: [t[i : i + args.max_length] for i in range(0, __A, args.max_length )] for k, t in concatenated_examples.items() } return result UpperCAmelCase__ = dataset_tokenized.map(__A, batched=__A, batch_size=1_000, num_proc=4 ) UpperCAmelCase__ = 0 UpperCAmelCase__ = 0 for shard in range(0, len(__A ), args.shard_size ): UpperCAmelCase__ = grouped_dataset[shard : shard + args.shard_size] UpperCAmelCase__ = len(dataset_snapshot["input_ids"] ) UpperCAmelCase__ = os.path.join(__A, f"""dataset-{shard_count}-{records_containing}.tfrecord""" ) UpperCAmelCase__ = get_serialized_examples(__A ) with tf.io.TFRecordWriter(__A ) as out_file: for i in range(len(__A ) ): UpperCAmelCase__ = serialized_examples[i] out_file.write(__A ) print("Wrote file {} containing {} records".format(__A, __A ) ) shard_count += 1 total_records += records_containing with open(f"""split-{args.split}-records-count.txt""", "w" ) as f: print(f"""Total {args.split} records: {total_records}""", file=__A ) if __name__ == "__main__": UpperCamelCase__ = parse_args() main(args)
143
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ : List[str] = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : int = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[int] = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys UpperCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
38
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[int] ) -> bool: """simple docstring""" if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : list[int] , __magic_name__ : int ) -> bool: """simple docstring""" if curr_ind == len(__magic_name__ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__magic_name__ ) ): if valid_connection(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): # Insert current vertex into path as next transition UpperCamelCase :str = next_ver # Validate created path if util_hamilton_cycle(__magic_name__ , __magic_name__ , curr_ind + 1 ): return True # Backtrack UpperCamelCase :Union[str, Any] = -1 return False def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : int = 0 ) -> list[int]: """simple docstring""" UpperCamelCase :Union[str, Any] = [-1] * (len(__magic_name__ ) + 1) # initialize start and end of path with starting index UpperCamelCase :Any = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__magic_name__ , __magic_name__ , 1 ) else []
38
1
import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCamelCase = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n" UpperCamelCase = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n" UpperCamelCase = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n" UpperCamelCase = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n" UpperCamelCase = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE." @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCamelCase ( datasets.Metric ): """simple docstring""" def a ( self : Optional[int] ) -> Union[str, Any]: return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ) ), "references": datasets.Value("string" ), } ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , ) def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[1, 10, 100] , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3.0 ) -> Union[str, Any]: if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("This metric is currently not supported on Windows." ) with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE__ ) as executor: lowerCAmelCase__ = [] lowerCAmelCase__ = Counter() lowerCAmelCase__ = 0 lowerCAmelCase__ = defaultdict(SCREAMING_SNAKE_CASE__ ) for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ): for candidate in candidates: lowerCAmelCase__ = candidate + "\n" + test_case lowerCAmelCase__ = (test_program, timeout, task_id, completion_id[task_id]) lowerCAmelCase__ = executor.submit(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ) futures.append(SCREAMING_SNAKE_CASE__ ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(SCREAMING_SNAKE_CASE__ ): lowerCAmelCase__ = future.result() results[result["task_id"]].append((result["completion_id"], result) ) lowerCAmelCase__ , lowerCAmelCase__ = [], [] for result in results.values(): result.sort() lowerCAmelCase__ = [r[1]["passed"] for r in result] total.append(len(SCREAMING_SNAKE_CASE__ ) ) correct.append(sum(SCREAMING_SNAKE_CASE__ ) ) lowerCAmelCase__ = np.array(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = np.array(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = k lowerCAmelCase__ = {f'pass@{k}': estimate_pass_at_k(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def _A ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple ): """simple docstring""" def estimator(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): lowerCAmelCase__ = itertools.repeat(lowerCamelCase__ , len(lowerCamelCase__ ) ) else: assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ) lowerCAmelCase__ = iter(lowerCamelCase__ ) return np.array([estimator(int(lowerCamelCase__ ) , int(lowerCamelCase__ ) , lowerCamelCase__ ) for n, c in zip(lowerCamelCase__ , lowerCamelCase__ )] )
350
import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class __lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self : Dict ) -> Optional[int]: super().__init__() lowerCAmelCase__ = nn.Linear(3 , 4 ) lowerCAmelCase__ = nn.BatchNormad(4 ) lowerCAmelCase__ = nn.Linear(4 , 5 ) def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]: return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE__ ) ) ) class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" def a ( self : Any , SCREAMING_SNAKE_CASE__ : str , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]: return (args[0] + 1,) + args[1:], kwargs class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> Dict: return output + 1 class __lowerCamelCase ( unittest.TestCase ): """simple docstring""" def a ( self : List[str] ) -> Tuple: lowerCAmelCase__ = ModelForTest() lowerCAmelCase__ = ModelHook() add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(test_model._hf_hook , SCREAMING_SNAKE_CASE__ ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "_old_forward" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , "forward" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] ) remove_hook_from_module(SCREAMING_SNAKE_CASE__ ) self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , "_hf_hook" ) ) self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , "_old_forward" ) ) def a ( self : Union[str, Any] ) -> int: lowerCAmelCase__ = ModelForTest() lowerCAmelCase__ = ModelHook() add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , append=SCREAMING_SNAKE_CASE__ ) self.assertEqual(isinstance(test_model._hf_hook , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "_old_forward" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , "forward" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] ) remove_hook_from_module(SCREAMING_SNAKE_CASE__ ) self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , "_hf_hook" ) ) self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , "_old_forward" ) ) def a ( self : List[str] ) -> Any: lowerCAmelCase__ = ModelForTest() lowerCAmelCase__ = torch.randn(2 , 3 ) lowerCAmelCase__ = test_model(x + 1 ) lowerCAmelCase__ = test_model(x + 2 ) lowerCAmelCase__ = PreForwardHook() add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain lowerCAmelCase__ = PreForwardHook() add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks lowerCAmelCase__ = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ ) assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 ) def a ( self : Any ) -> Union[str, Any]: lowerCAmelCase__ = ModelForTest() lowerCAmelCase__ = torch.randn(2 , 3 ) lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = PostForwardHook() add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain lowerCAmelCase__ = PostForwardHook() add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks lowerCAmelCase__ = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ ) assert torch.allclose(SCREAMING_SNAKE_CASE__ , output + 2 , atol=1e-5 ) def a ( self : Optional[int] ) -> int: lowerCAmelCase__ = ModelForTest() lowerCAmelCase__ = torch.randn(2 , 3 ) lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = PostForwardHook() add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 ) ) self.assertTrue(outputa.requires_grad ) lowerCAmelCase__ = True lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def a ( self : Optional[Any] ) -> List[str]: lowerCAmelCase__ = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device lowerCAmelCase__ = torch.randn(2 , 3 ) lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(SCREAMING_SNAKE_CASE__ , AlignDevicesHook(io_same_device=SCREAMING_SNAKE_CASE__ ) ) lowerCAmelCase__ = torch.randn(2 , 3 ).to(0 ) lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ ) self.assertEqual(output.device , torch.device(0 ) ) def a ( self : List[str] ) -> List[str]: lowerCAmelCase__ = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # This will move each submodule on different devices lowerCAmelCase__ = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True} add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCAmelCase__ = torch.device(hook_kwargs["execution_device"] ) self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = torch.randn(2 , 3 ) lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ ) self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # Now test with buffers included in the offload lowerCAmelCase__ = { "execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True, "offload_buffers": True, } add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) ) lowerCAmelCase__ = torch.randn(2 , 3 ) lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ ) self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) def a ( self : Optional[int] ) -> Union[str, Any]: lowerCAmelCase__ = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # This will move each submodule on different devices lowerCAmelCase__ = 0 if torch.cuda.is_available() else "cpu" attach_align_device_hook(SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCAmelCase__ = torch.device(SCREAMING_SNAKE_CASE__ ) self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = torch.randn(2 , 3 ) lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ ) self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # Now test with buffers included in the offload attach_align_device_hook(SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , offload_buffers=SCREAMING_SNAKE_CASE__ ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) ) lowerCAmelCase__ = torch.randn(2 , 3 ) lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ ) self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) def a ( self : Optional[Any] ) -> str: lowerCAmelCase__ = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # This will move each submodule on different devices lowerCAmelCase__ = 0 if torch.cuda.is_available() else "cpu" attach_align_device_hook( SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCAmelCase__ = torch.device(SCREAMING_SNAKE_CASE__ ) self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = torch.randn(2 , 3 ) lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ ) self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # Now test with buffers included in the offload attach_align_device_hook( SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , weights_map=model.state_dict() , offload_buffers=SCREAMING_SNAKE_CASE__ , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) ) lowerCAmelCase__ = torch.randn(2 , 3 ) lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ ) self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
221
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a ={"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a =[ """MRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MraForMaskedLM""", """MraForMultipleChoice""", """MraForQuestionAnswering""", """MraForSequenceClassification""", """MraForTokenClassification""", """MraLayer""", """MraModel""", """MraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys a =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
73
lowerCAmelCase__ = 0 # The first color of the flag. lowerCAmelCase__ = 1 # The second color of the flag. lowerCAmelCase__ = 2 # The third color of the flag. lowerCAmelCase__ = (red, white, blue) def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if not sequence: return [] if len(lowerCamelCase__ ) == 1: return list(lowerCamelCase__ ) lowercase__ : List[Any] = 0 lowercase__ : Any = len(lowerCamelCase__ ) - 1 lowercase__ : Dict = 0 while mid <= high: if sequence[mid] == colors[0]: lowercase__ , lowercase__ : int = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: lowercase__ , lowercase__ : Union[str, Any] = sequence[high], sequence[mid] high -= 1 else: lowercase__ : Tuple = F"""The elements inside the sequence must contains only {colors} values""" raise ValueError(lowerCamelCase__ ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = input('''Enter numbers separated by commas:\n''').strip() lowerCAmelCase__ = [int(item.strip()) for item in user_input.split(''',''')] print(f'''{dutch_national_flag_sort(unsorted)}''')
130
0
import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''microsoft/conditional-detr-resnet-50''': ( '''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json''' ), } class _UpperCAmelCase ( lowerCAmelCase ): '''simple docstring''' __A = '''conditional_detr''' __A = ['''past_key_values'''] __A = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : Tuple , lowercase_ : Optional[int]=True , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=3 , lowercase_ : Optional[Any]=300 , lowercase_ : List[str]=6 , lowercase_ : Union[str, Any]=2048 , lowercase_ : Any=8 , lowercase_ : Dict=6 , lowercase_ : Tuple=2048 , lowercase_ : Union[str, Any]=8 , lowercase_ : Optional[int]=0.0 , lowercase_ : str=0.0 , lowercase_ : Tuple=True , lowercase_ : Any="relu" , lowercase_ : Any=256 , lowercase_ : str=0.1 , lowercase_ : Dict=0.0 , lowercase_ : List[Any]=0.0 , lowercase_ : Tuple=0.02 , lowercase_ : Optional[int]=1.0 , lowercase_ : Tuple=False , lowercase_ : Dict="sine" , lowercase_ : List[Any]="resnet50" , lowercase_ : str=True , lowercase_ : List[Any]=False , lowercase_ : List[str]=2 , lowercase_ : Tuple=5 , lowercase_ : Dict=2 , lowercase_ : List[str]=1 , lowercase_ : Any=1 , lowercase_ : Any=2 , lowercase_ : int=5 , lowercase_ : str=2 , lowercase_ : Tuple=0.25 , **lowercase_ : Tuple , ) -> Dict: """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.") if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") _UpperCamelCase = CONFIG_MAPPING["resnet"](out_features=["stage4"]) elif isinstance(lowercase_ , lowercase_): _UpperCamelCase = backbone_config.get("model_type") _UpperCamelCase = CONFIG_MAPPING[backbone_model_type] _UpperCamelCase = config_class.from_dict(lowercase_) _UpperCamelCase = use_timm_backbone _UpperCamelCase = backbone_config _UpperCamelCase = num_channels _UpperCamelCase = num_queries _UpperCamelCase = d_model _UpperCamelCase = encoder_ffn_dim _UpperCamelCase = encoder_layers _UpperCamelCase = encoder_attention_heads _UpperCamelCase = decoder_ffn_dim _UpperCamelCase = decoder_layers _UpperCamelCase = decoder_attention_heads _UpperCamelCase = dropout _UpperCamelCase = attention_dropout _UpperCamelCase = activation_dropout _UpperCamelCase = activation_function _UpperCamelCase = init_std _UpperCamelCase = init_xavier_std _UpperCamelCase = encoder_layerdrop _UpperCamelCase = decoder_layerdrop _UpperCamelCase = encoder_layers _UpperCamelCase = auxiliary_loss _UpperCamelCase = position_embedding_type _UpperCamelCase = backbone _UpperCamelCase = use_pretrained_backbone _UpperCamelCase = dilation # Hungarian matcher _UpperCamelCase = class_cost _UpperCamelCase = bbox_cost _UpperCamelCase = giou_cost # Loss coefficients _UpperCamelCase = mask_loss_coefficient _UpperCamelCase = dice_loss_coefficient _UpperCamelCase = cls_loss_coefficient _UpperCamelCase = bbox_loss_coefficient _UpperCamelCase = giou_loss_coefficient _UpperCamelCase = focal_alpha super().__init__(is_encoder_decoder=lowercase_ , **lowercase_) @property def __UpperCAmelCase ( self : Dict) -> int: """simple docstring""" return self.encoder_attention_heads @property def __UpperCAmelCase ( self : List[Any]) -> int: """simple docstring""" return self.d_model def __UpperCAmelCase ( self : Optional[int]) -> Optional[int]: """simple docstring""" _UpperCamelCase = copy.deepcopy(self.__dict__) if self.backbone_config is not None: _UpperCamelCase = self.backbone_config.to_dict() _UpperCamelCase = self.__class__.model_type return output class _UpperCAmelCase ( lowerCAmelCase ): '''simple docstring''' __A = version.parse('''1.11''' ) @property def __UpperCAmelCase ( self : Union[str, Any]) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ]) @property def __UpperCAmelCase ( self : Optional[int]) -> float: """simple docstring""" return 1e-5 @property def __UpperCAmelCase ( self : Union[str, Any]) -> int: """simple docstring""" return 12
63
from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class _UpperCAmelCase : '''simple docstring''' def __init__( self : Optional[Any] , lowercase_ : Optional[Any] , ) -> Optional[Any]: """simple docstring""" _UpperCamelCase = parent _UpperCamelCase = 13 _UpperCamelCase = 7 _UpperCamelCase = 30 _UpperCamelCase = self.seq_length + self.mem_len _UpperCamelCase = 15 _UpperCamelCase = True _UpperCamelCase = True _UpperCamelCase = 99 _UpperCamelCase = [10, 50, 80] _UpperCamelCase = 32 _UpperCamelCase = 32 _UpperCamelCase = 4 _UpperCamelCase = 8 _UpperCamelCase = 128 _UpperCamelCase = 2 _UpperCamelCase = 2 _UpperCamelCase = None _UpperCamelCase = 1 _UpperCamelCase = 0 _UpperCamelCase = 3 _UpperCamelCase = self.vocab_size - 1 _UpperCamelCase = 0.01 def __UpperCAmelCase ( self : Dict) -> Optional[int]: """simple docstring""" _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCamelCase = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def __UpperCAmelCase ( self : Union[str, Any]) -> Tuple: """simple docstring""" random.seed(self.seed) tf.random.set_seed(self.seed) def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = TFTransfoXLModel(lowercase_) _UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple() _UpperCamelCase = {"input_ids": input_ids_a, "mems": mems_a} _UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : str , lowercase_ : Dict , lowercase_ : List[Any]) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = TFTransfoXLLMHeadModel(lowercase_) _UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple() _UpperCamelCase = {"input_ids": input_ids_a, "labels": lm_labels} _UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple() _UpperCamelCase , _UpperCamelCase = model([input_ids_a, mems_a]).to_tuple() _UpperCamelCase = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels} _UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Dict) -> str: """simple docstring""" _UpperCamelCase = TFTransfoXLForSequenceClassification(lowercase_) _UpperCamelCase = model(lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def __UpperCAmelCase ( self : Dict) -> List[Any]: """simple docstring""" _UpperCamelCase = self.prepare_config_and_inputs() ((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = config_and_inputs _UpperCamelCase = {"input_ids": input_ids_a} return config, inputs_dict @require_tf class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ): '''simple docstring''' __A = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) __A = () if is_tf_available() else () __A = ( { '''feature-extraction''': TFTransfoXLModel, '''text-classification''': TFTransfoXLForSequenceClassification, '''text-generation''': TFTransfoXLLMHeadModel, '''zero-shot''': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented __A = False __A = False __A = False __A = False def __UpperCAmelCase ( self : List[Any] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Any , lowercase_ : List[str]) -> Any: """simple docstring""" if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def __UpperCAmelCase ( self : Optional[Any]) -> int: """simple docstring""" _UpperCamelCase = TFTransfoXLModelTester(self) _UpperCamelCase = ConfigTester(self , config_class=lowercase_ , d_embed=37) def __UpperCAmelCase ( self : Dict) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]: """simple docstring""" self.model_tester.set_seed() _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*lowercase_) def __UpperCAmelCase ( self : Optional[Any]) -> List[Any]: """simple docstring""" self.model_tester.set_seed() _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*lowercase_) def __UpperCAmelCase ( self : List[str]) -> List[Any]: """simple docstring""" _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowercase_) def __UpperCAmelCase ( self : Dict) -> int: """simple docstring""" _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: _UpperCamelCase = model_class(lowercase_) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer) if model_class in list_other_models_with_output_ebd: _UpperCamelCase = model.get_output_embeddings() assert isinstance(lowercase_ , tf.keras.layers.Layer) _UpperCamelCase = model.get_bias() assert name is None else: _UpperCamelCase = model.get_output_embeddings() assert x is None _UpperCamelCase = model.get_bias() assert name is None def __UpperCAmelCase ( self : Optional[int]) -> Any: """simple docstring""" pass @slow def __UpperCAmelCase ( self : List[str]) -> Tuple: """simple docstring""" for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = TFTransfoXLModel.from_pretrained(lowercase_) self.assertIsNotNone(lowercase_) @unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss.") def __UpperCAmelCase ( self : Union[str, Any]) -> Tuple: """simple docstring""" pass @require_tf class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @unittest.skip("Skip test until #12651 is resolved.") @slow def __UpperCAmelCase ( self : Optional[Any]) -> Dict: """simple docstring""" _UpperCamelCase = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103") # fmt: off _UpperCamelCase = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off _UpperCamelCase = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> _UpperCamelCase = model.generate(lowercase_ , max_length=200 , do_sample=lowercase_) self.assertListEqual(output_ids[0].numpy().tolist() , lowercase_)
63
1
from ...configuration_utils import PretrainedConfig lowercase_ = { "google/tapas-base-finetuned-sqa": ( "https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json" ), "google/tapas-base-finetuned-wtq": ( "https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json" ), "google/tapas-base-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json" ), "google/tapas-base-finetuned-tabfact": ( "https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json" ), } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'tapas' def __init__( self : Optional[Any],lowercase_ : Union[str, Any]=3_0_5_2_2,lowercase_ : List[str]=7_6_8,lowercase_ : int=1_2,lowercase_ : Optional[Any]=1_2,lowercase_ : str=3_0_7_2,lowercase_ : Optional[Any]="gelu",lowercase_ : Union[str, Any]=0.1,lowercase_ : str=0.1,lowercase_ : Optional[int]=1_0_2_4,lowercase_ : int=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0],lowercase_ : Any=0.02,lowercase_ : int=1E-12,lowercase_ : Union[str, Any]=0,lowercase_ : Dict=10.0,lowercase_ : Any=0,lowercase_ : Any=1.0,lowercase_ : Any=None,lowercase_ : Optional[Any]=1.0,lowercase_ : Tuple=False,lowercase_ : Any=None,lowercase_ : Optional[int]=1.0,lowercase_ : Dict=1.0,lowercase_ : Any=False,lowercase_ : Optional[int]=False,lowercase_ : Union[str, Any]="ratio",lowercase_ : Tuple=None,lowercase_ : Tuple=None,lowercase_ : Tuple=6_4,lowercase_ : Optional[int]=3_2,lowercase_ : Dict=False,lowercase_ : List[Any]=True,lowercase_ : Optional[int]=False,lowercase_ : Optional[Any]=False,lowercase_ : List[Any]=True,lowercase_ : List[Any]=False,lowercase_ : int=None,lowercase_ : Optional[int]=None,**lowercase_ : int,)-> str: '''simple docstring''' super().__init__(pad_token_id=lowercase_,**lowercase_ ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_sizes A__ = initializer_range A__ = layer_norm_eps # Fine-tuning task hyperparameters A__ = positive_label_weight A__ = num_aggregation_labels A__ = aggregation_loss_weight A__ = use_answer_as_supervision A__ = answer_loss_importance A__ = use_normalized_answer_loss A__ = huber_loss_delta A__ = temperature A__ = aggregation_temperature A__ = use_gumbel_for_cells A__ = use_gumbel_for_aggregation A__ = average_approximation_function A__ = cell_selection_preference A__ = answer_loss_cutoff A__ = max_num_rows A__ = max_num_columns A__ = average_logits_per_cell A__ = select_one_column A__ = allow_empty_column_selection A__ = init_cell_selection_weights_to_zero A__ = reset_position_index_per_cell A__ = disable_per_token_loss # Aggregation hyperparameters A__ = aggregation_labels A__ = no_aggregation_label_index if isinstance(self.aggregation_labels,lowercase_ ): A__ = {int(lowercase_ ): v for k, v in aggregation_labels.items()}
7
import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class A ( A_ , unittest.TestCase ): UpperCamelCase_ : Any =PriorTransformer UpperCamelCase_ : List[str] ='''hidden_states''' @property def _A (self ): __lowercase= 4 __lowercase= 8 __lowercase= 7 __lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _A (self , lowerCAmelCase=0 ): torch.manual_seed(lowerCAmelCase ) __lowercase= 4 __lowercase= 8 __lowercase= 7 __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def _A (self ): return (4, 8) @property def _A (self ): return (4, 8) def _A (self ): __lowercase= { 'num_attention_heads': 2, 'attention_head_dim': 4, 'num_layers': 2, 'embedding_dim': 8, 'num_embeddings': 7, 'additional_embeddings': 4, } __lowercase= self.dummy_input return init_dict, inputs_dict def _A (self ): __lowercase, __lowercase= PriorTransformer.from_pretrained( 'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(lowerCAmelCase ) __lowercase= model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def _A (self ): __lowercase, __lowercase= self.prepare_init_args_and_inputs_for_common() __lowercase= self.model_class(**lowerCAmelCase ) __lowercase= inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase= [*signature.parameters.keys()] __lowercase= ['hidden_states', 'timestep'] self.assertListEqual(arg_names[:2] , lowerCAmelCase ) def _A (self ): __lowercase= PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' ) __lowercase= model.to(lowerCAmelCase ) if hasattr(lowerCAmelCase , 'set_default_attn_processor' ): model.set_default_attn_processor() __lowercase= self.get_dummy_seed_input() with torch.no_grad(): __lowercase= model(**lowerCAmelCase )[0] __lowercase= output[0, :5].flatten().cpu() print(lowerCAmelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. __lowercase= torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] ) self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) ) @slow class A ( unittest.TestCase ): def _A (self , lowerCAmelCase=1 , lowerCAmelCase=7_6_8 , lowerCAmelCase=7_7 , lowerCAmelCase=0 ): torch.manual_seed(lowerCAmelCase ) __lowercase= batch_size __lowercase= embedding_dim __lowercase= num_embeddings __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _A (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]], [3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]], # fmt: on ] ) def _A (self , lowerCAmelCase , lowerCAmelCase ): __lowercase= PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' ) model.to(lowerCAmelCase ) __lowercase= self.get_dummy_seed_input(seed=lowerCAmelCase ) with torch.no_grad(): __lowercase= model(**lowerCAmelCase )[0] assert list(sample.shape ) == [1, 7_6_8] __lowercase= sample[0, :8].flatten().cpu() print(lowerCAmelCase ) __lowercase= torch.tensor(lowerCAmelCase ) assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
295
0
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase : List[str] = logging.get_logger(__name__) lowerCAmelCase : Tuple = { """nielsr/canine-s""": 2048, } # Unicode defines 1,114,112 total “codepoints” lowerCAmelCase : Any = 1114112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py lowerCAmelCase : Dict = 0 lowerCAmelCase : Dict = 0xe000 lowerCAmelCase : str = 0xe001 lowerCAmelCase : str = 0xe002 lowerCAmelCase : Optional[int] = 0xe003 lowerCAmelCase : List[Any] = 0xe004 # Maps special codepoints to human-readable names. lowerCAmelCase : Dict[int, str] = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. lowerCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Optional[Any] , lowerCAmelCase__ : Tuple=chr(lowerCAmelCase__) , lowerCAmelCase__ : Tuple=chr(lowerCAmelCase__) , lowerCAmelCase__ : str=chr(lowerCAmelCase__) , lowerCAmelCase__ : List[Any]=chr(lowerCAmelCase__) , lowerCAmelCase__ : Optional[int]=chr(lowerCAmelCase__) , lowerCAmelCase__ : Union[str, Any]=chr(lowerCAmelCase__) , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : int=2048 , **lowerCAmelCase__ : Tuple , ): SCREAMING_SNAKE_CASE_: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token SCREAMING_SNAKE_CASE_: List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token SCREAMING_SNAKE_CASE_: List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_: Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token super().__init__( bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , model_max_length=lowerCAmelCase__ , **lowerCAmelCase__ , ) # Creates a mapping for looking up the IDs of special symbols. SCREAMING_SNAKE_CASE_: Dict[str, int] = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): SCREAMING_SNAKE_CASE_: List[Any] = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. SCREAMING_SNAKE_CASE_: Dict[int, str] = { codepoint: name for name, codepoint in self._special_codepoints.items() } SCREAMING_SNAKE_CASE_: List[str] = UNICODE_VOCAB_SIZE SCREAMING_SNAKE_CASE_: Tuple = len(self._special_codepoints) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]): return self._unicode_vocab_size def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str): return list(lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : str): try: return ord(lowerCAmelCase__) except TypeError: raise ValueError(F"invalid token: '{token}'") def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int): try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(lowerCAmelCase__) except TypeError: raise ValueError(F"invalid id: {index}") def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Dict): return "".join(lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None): SCREAMING_SNAKE_CASE_: List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE_: Tuple = [self.cls_token_id] SCREAMING_SNAKE_CASE_: int = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = [1] + ([0] * len(lowerCAmelCase__)) + [1] if token_ids_a is not None: result += ([0] * len(lowerCAmelCase__)) + [1] return result def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None): SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id] SCREAMING_SNAKE_CASE_: int = [self.cls_token_id] SCREAMING_SNAKE_CASE_: List[str] = len(cls + token_ids_a + sep) * [0] if token_ids_a is not None: result += len(token_ids_a + sep) * [1] return result def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None): return ()
127
import torch from diffusers import StableDiffusionPipeline lowerCAmelCase : Any = """path-to-your-trained-model""" lowerCAmelCase : int = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""") lowerCAmelCase : Union[str, Any] = """A photo of sks dog in a bucket""" lowerCAmelCase : Any = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("""dog-bucket.png""")
127
1
import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder _a = '''__DUMMY_TRANSFORMERS_USER__''' _a = '''Dummy User''' _a = '''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''' _a = '''https://hub-ci.huggingface.co''' _a = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}''' _a = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}''' _a = Path('''~/.huggingface/hub_ci_token''').expanduser() @pytest.fixture def __A ( __lowerCAmelCase )-> Optional[Any]: """simple docstring""" monkeypatch.setattr( 'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , __lowerCAmelCase ) @pytest.fixture def __A ( __lowerCAmelCase )-> Union[str, Any]: """simple docstring""" monkeypatch.setattr('datasets.config.HF_ENDPOINT' , __lowerCAmelCase ) monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , __lowerCAmelCase ) @pytest.fixture def __A ( __lowerCAmelCase )-> Optional[Any]: """simple docstring""" monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , __lowerCAmelCase ) @pytest.fixture def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Tuple: """simple docstring""" HfFolder.save_token(__lowerCAmelCase ) yield HfFolder.delete_token() @pytest.fixture(scope='session' ) def __A ( )-> Tuple: """simple docstring""" return HfApi(endpoint=__lowerCAmelCase ) @pytest.fixture(scope='session' ) def __A ( __lowerCAmelCase )-> Union[str, Any]: """simple docstring""" _UpperCAmelCase = HfFolder.get_token() HfFolder.save_token(__lowerCAmelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(__lowerCAmelCase ) @pytest.fixture def __A ( __lowerCAmelCase )-> Optional[Any]: """simple docstring""" def _cleanup_repo(__lowerCAmelCase ): hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type='dataset' ) return _cleanup_repo @pytest.fixture def __A ( __lowerCAmelCase )-> Union[str, Any]: """simple docstring""" @contextmanager def _temporary_repo(__lowerCAmelCase ): try: yield repo_id finally: cleanup_repo(__lowerCAmelCase ) return _temporary_repo @pytest.fixture(scope='session' ) def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Dict: """simple docstring""" _UpperCAmelCase = F"""repo_txt_data-{int(time.time() * 10E3 )}""" _UpperCAmelCase = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type='dataset' , private=__lowerCAmelCase ) hf_api.upload_file( token=__lowerCAmelCase , path_or_fileobj=str(__lowerCAmelCase ) , path_in_repo='data/text_data.txt' , repo_id=__lowerCAmelCase , repo_type='dataset' , ) yield repo_id try: hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type='dataset' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[Any]: """simple docstring""" return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope='session' ) def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int: """simple docstring""" _UpperCAmelCase = F"""repo_zipped_txt_data-{int(time.time() * 10E3 )}""" _UpperCAmelCase = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type='dataset' , private=__lowerCAmelCase ) hf_api.upload_file( token=__lowerCAmelCase , path_or_fileobj=str(__lowerCAmelCase ) , path_in_repo='data.zip' , repo_id=__lowerCAmelCase , repo_type='dataset' , ) yield repo_id try: hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type='dataset' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[Any]: """simple docstring""" return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope='session' ) def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Dict: """simple docstring""" _UpperCAmelCase = F"""repo_zipped_img_data-{int(time.time() * 10E3 )}""" _UpperCAmelCase = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type='dataset' , private=__lowerCAmelCase ) hf_api.upload_file( token=__lowerCAmelCase , path_or_fileobj=str(__lowerCAmelCase ) , path_in_repo='data.zip' , repo_id=__lowerCAmelCase , repo_type='dataset' , ) yield repo_id try: hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type='dataset' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int: """simple docstring""" return hf_private_dataset_repo_zipped_img_data_
39
'''simple docstring''' def a__ ( a__ , a__ ): """simple docstring""" _enforce_args(a__ , a__ ) if n == 0: return 0 __SCREAMING_SNAKE_CASE = float("""-inf""" ) for i in range(1 , n + 1 ): __SCREAMING_SNAKE_CASE = max( a__ , prices[i - 1] + naive_cut_rod_recursive(n - i , a__ ) ) return max_revue def a__ ( a__ , a__ ): """simple docstring""" _enforce_args(a__ , a__ ) __SCREAMING_SNAKE_CASE = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(a__ , a__ , a__ ) def a__ ( a__ , a__ , a__ ): """simple docstring""" if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: __SCREAMING_SNAKE_CASE = float("""-inf""" ) for i in range(1 , n + 1 ): __SCREAMING_SNAKE_CASE = max( a__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , a__ , a__ ) , ) __SCREAMING_SNAKE_CASE = max_revenue return max_rev[n] def a__ ( a__ , a__ ): """simple docstring""" _enforce_args(a__ , a__ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. __SCREAMING_SNAKE_CASE = [float("""-inf""" ) for _ in range(n + 1 )] __SCREAMING_SNAKE_CASE = 0 for i in range(1 , n + 1 ): __SCREAMING_SNAKE_CASE = max_rev[i] for j in range(1 , i + 1 ): __SCREAMING_SNAKE_CASE = max(a__ , prices[j - 1] + max_rev[i - j] ) __SCREAMING_SNAKE_CASE = max_revenue_i return max_rev[n] def a__ ( a__ , a__ ): """simple docstring""" if n < 0: __SCREAMING_SNAKE_CASE = F'n must be greater than or equal to 0. Got n = {n}' raise ValueError(a__ ) if n > len(a__ ): __SCREAMING_SNAKE_CASE = ( """Each integral piece of rod must have a corresponding price. """ F'Got n = {n} but length of prices = {len(a__ )}' ) raise ValueError(a__ ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = [6, 10, 12, 15, 20, 23] __SCREAMING_SNAKE_CASE = len(a__ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. __SCREAMING_SNAKE_CASE = 36 __SCREAMING_SNAKE_CASE = top_down_cut_rod(a__ , a__ ) __SCREAMING_SNAKE_CASE = bottom_up_cut_rod(a__ , a__ ) __SCREAMING_SNAKE_CASE = naive_cut_rod_recursive(a__ , a__ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
267
0
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase : Any = { 'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class lowerCamelCase__ ( A ): """simple docstring""" __a = """deformable_detr""" __a = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self : str , UpperCamelCase : int=True , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : List[Any]=3 , UpperCamelCase : int=300 , UpperCamelCase : List[Any]=1_024 , UpperCamelCase : Tuple=6 , UpperCamelCase : Optional[Any]=1_024 , UpperCamelCase : str=8 , UpperCamelCase : Tuple=6 , UpperCamelCase : Optional[Any]=1_024 , UpperCamelCase : int=8 , UpperCamelCase : Union[str, Any]=0.0 , UpperCamelCase : Any=True , UpperCamelCase : int="relu" , UpperCamelCase : Tuple=256 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Union[str, Any]=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : Any=0.02 , UpperCamelCase : Any=1.0 , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Dict=False , UpperCamelCase : Tuple="sine" , UpperCamelCase : Union[str, Any]="resnet50" , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[int]=False , UpperCamelCase : int=4 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : int=4 , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Union[str, Any]=300 , UpperCamelCase : Dict=False , UpperCamelCase : int=1 , UpperCamelCase : Optional[Any]=5 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Dict=1 , UpperCamelCase : Any=1 , UpperCamelCase : Optional[int]=5 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : str=0.25 , UpperCamelCase : int=False , **UpperCamelCase : int , ): '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) __UpperCAmelCase : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : List[str] = backbone_config.get("""model_type""" ) __UpperCAmelCase : Tuple = CONFIG_MAPPING[backbone_model_type] __UpperCAmelCase : str = config_class.from_dict(UpperCamelCase ) __UpperCAmelCase : int = use_timm_backbone __UpperCAmelCase : Dict = backbone_config __UpperCAmelCase : Union[str, Any] = num_channels __UpperCAmelCase : int = num_queries __UpperCAmelCase : Any = max_position_embeddings __UpperCAmelCase : Tuple = d_model __UpperCAmelCase : Any = encoder_ffn_dim __UpperCAmelCase : List[Any] = encoder_layers __UpperCAmelCase : List[str] = encoder_attention_heads __UpperCAmelCase : Tuple = decoder_ffn_dim __UpperCAmelCase : Optional[int] = decoder_layers __UpperCAmelCase : Union[str, Any] = decoder_attention_heads __UpperCAmelCase : List[Any] = dropout __UpperCAmelCase : Optional[Any] = attention_dropout __UpperCAmelCase : str = activation_dropout __UpperCAmelCase : int = activation_function __UpperCAmelCase : Optional[Any] = init_std __UpperCAmelCase : Any = init_xavier_std __UpperCAmelCase : Optional[Any] = encoder_layerdrop __UpperCAmelCase : Tuple = auxiliary_loss __UpperCAmelCase : Union[str, Any] = position_embedding_type __UpperCAmelCase : List[Any] = backbone __UpperCAmelCase : int = use_pretrained_backbone __UpperCAmelCase : str = dilation # deformable attributes __UpperCAmelCase : Dict = num_feature_levels __UpperCAmelCase : int = encoder_n_points __UpperCAmelCase : Any = decoder_n_points __UpperCAmelCase : Dict = two_stage __UpperCAmelCase : Optional[Any] = two_stage_num_proposals __UpperCAmelCase : Optional[Any] = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError("""If two_stage is True, with_box_refine must be True.""" ) # Hungarian matcher __UpperCAmelCase : Any = class_cost __UpperCAmelCase : str = bbox_cost __UpperCAmelCase : str = giou_cost # Loss coefficients __UpperCAmelCase : str = mask_loss_coefficient __UpperCAmelCase : Optional[int] = dice_loss_coefficient __UpperCAmelCase : List[Any] = bbox_loss_coefficient __UpperCAmelCase : Dict = giou_loss_coefficient __UpperCAmelCase : str = eos_coefficient __UpperCAmelCase : Optional[Any] = focal_alpha __UpperCAmelCase : Optional[Any] = disable_custom_kernels super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return self.encoder_attention_heads @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return self.d_model def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: __UpperCAmelCase : List[str] = self.backbone_config.to_dict() __UpperCAmelCase : Optional[int] = self.__class__.model_type return output
320
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase : Optional[int] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""pixel_values"""] def __init__( self : Tuple , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = True , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" ) __UpperCAmelCase : int = do_resize __UpperCAmelCase : Tuple = size __UpperCAmelCase : Optional[Any] = resample __UpperCAmelCase : Any = do_center_crop __UpperCAmelCase : int = crop_size __UpperCAmelCase : Optional[int] = do_rescale __UpperCAmelCase : List[Any] = rescale_factor __UpperCAmelCase : Tuple = do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCAmelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD __UpperCAmelCase : List[Any] = do_convert_rgb def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __UpperCAmelCase : int = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Dict = size if size is not None else self.size __UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Dict = resample if resample is not None else self.resample __UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Any = image_std if image_std is not None else self.image_std __UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCAmelCase : List[str] = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCAmelCase : int = [convert_to_rgb(UpperCamelCase ) for image in images] # All transformations expect numpy arrays. __UpperCAmelCase : Tuple = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: __UpperCAmelCase : Optional[int] = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: __UpperCAmelCase : int = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: __UpperCAmelCase : Dict = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: __UpperCAmelCase : Optional[Any] = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] __UpperCAmelCase : Any = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] __UpperCAmelCase : Any = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
320
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __a = { "configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "FALCON_PRETRAINED_MODEL_ARCHIVE_LIST", "FalconForCausalLM", "FalconModel", "FalconPreTrainedModel", "FalconForSequenceClassification", "FalconForTokenClassification", "FalconForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
66
'''simple docstring''' import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class _lowerCAmelCase ( unittest.TestCase ): def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=4 , ): A_ : List[Any] = parent A_ : str = batch_size A_ : List[Any] = seq_length A_ : Dict = is_training A_ : List[Any] = use_attention_mask A_ : Any = use_token_type_ids A_ : Optional[int] = use_labels A_ : Tuple = vocab_size A_ : List[str] = hidden_size A_ : List[str] = num_hidden_layers A_ : Optional[Any] = num_attention_heads A_ : int = intermediate_size A_ : Optional[Any] = hidden_act A_ : List[Any] = hidden_dropout_prob A_ : Optional[Any] = attention_probs_dropout_prob A_ : Any = max_position_embeddings A_ : Union[str, Any] = type_vocab_size A_ : int = type_sequence_label_size A_ : Any = initializer_range A_ : List[str] = num_choices def _a (self ): A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : Any = None if self.use_attention_mask: A_ : Any = random_attention_mask([self.batch_size, self.seq_length] ) A_ : Union[str, Any] = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowercase , ) return config, input_ids, attention_mask def _a (self ): A_ : List[str] = self.prepare_config_and_inputs() A_, A_, A_ : str = config_and_inputs A_ : Any = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def _a (self ): A_ : Tuple = FlaxDistilBertModelTester(self ) @slow def _a (self ): for model_class_name in self.all_model_classes: A_ : Union[str, Any] = model_class_name.from_pretrained("""distilbert-base-uncased""" ) A_ : Any = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowercase ) @require_flax class _lowerCAmelCase ( unittest.TestCase ): @slow def _a (self ): A_ : List[str] = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" ) A_ : Optional[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) A_ : int = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) A_ : Optional[int] = model(lowercase , attention_mask=lowercase )[0] A_ : Optional[Any] = (1, 11, 768) self.assertEqual(output.shape , lowercase ) A_ : Union[str, Any] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowercase , atol=1E-4 ) )
206
0
'''simple docstring''' def lowerCAmelCase__ ( ): return [ a * b * (1000 - a - b) for a in range(1 ,999 ) for b in range(lowerCamelCase ,999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(f"""{solution() = }""")
359
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) A : str = { '''configuration_blip''': [ '''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlipConfig''', '''BlipTextConfig''', '''BlipVisionConfig''', ], '''processing_blip''': ['''BlipProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = ['''BlipImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = [ '''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlipModel''', '''BlipPreTrainedModel''', '''BlipForConditionalGeneration''', '''BlipForQuestionAnswering''', '''BlipVisionModel''', '''BlipTextModel''', '''BlipForImageTextRetrieval''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Union[str, Any] = [ '''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFBlipModel''', '''TFBlipPreTrainedModel''', '''TFBlipForConditionalGeneration''', '''TFBlipForQuestionAnswering''', '''TFBlipVisionModel''', '''TFBlipTextModel''', '''TFBlipForImageTextRetrieval''', ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys A : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
227
0
import csv import tweepy # Twitter API credentials SCREAMING_SNAKE_CASE :List[str] = '' SCREAMING_SNAKE_CASE :List[str] = '' SCREAMING_SNAKE_CASE :int = '' SCREAMING_SNAKE_CASE :Dict = '' def UpperCAmelCase ( a_ ) -> None: """simple docstring""" __A = tweepy.OAuthHandler(a_ , a_ ) auth.set_access_token(a_ , a_ ) __A = tweepy.API(a_ ) # initialize a list to hold all the tweepy Tweets __A = [] # make initial request for most recent tweets (200 is the maximum allowed count) __A = api.user_timeline(screen_name=a_ , count=2_0_0 ) # save most recent tweets alltweets.extend(a_ ) # save the id of the oldest tweet less one __A = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(a_ ) > 0: print(F'''getting tweets before {oldest}''' ) # all subsequent requests use the max_id param to prevent duplicates __A = api.user_timeline( screen_name=a_ , count=2_0_0 , max_id=a_ ) # save most recent tweets alltweets.extend(a_ ) # update the id of the oldest tweet less one __A = alltweets[-1].id - 1 print(F'''...{len(a_ )} tweets downloaded so far''' ) # transform the tweepy tweets into a 2D array that will populate the csv __A = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(F'''new_{screen_name}_tweets.csv''' , "w" ) as f: __A = csv.writer(a_ ) writer.writerow(["id", "created_at", "text"] ) writer.writerows(a_ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('FirePing32')
15
import math def UpperCAmelCase ( a_ , a_ = 0 , a_ = 0 ) -> list: """simple docstring""" __A = end or len(a_ ) for i in range(a_ , a_ ): __A = i __A = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __A = array[temp_index - 1] temp_index -= 1 __A = temp_index_value return array def UpperCAmelCase ( a_ , a_ , a_ ) -> None: # Max Heap """simple docstring""" __A = index __A = 2 * index + 1 # Left Node __A = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __A = left_index if right_index < heap_size and array[largest] < array[right_index]: __A = right_index if largest != index: __A , __A = array[largest], array[index] heapify(a_ , a_ , a_ ) def UpperCAmelCase ( a_ ) -> list: """simple docstring""" __A = len(a_ ) for i in range(n // 2 , -1 , -1 ): heapify(a_ , a_ , a_ ) for i in range(n - 1 , 0 , -1 ): __A , __A = array[0], array[i] heapify(a_ , 0 , a_ ) return array def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int: """simple docstring""" __A = low __A = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __A , __A = array[j], array[i] i += 1 def UpperCAmelCase ( a_ ) -> list: """simple docstring""" if len(a_ ) == 0: return array __A = 2 * math.ceil(math.loga(len(a_ ) ) ) __A = 1_6 return intro_sort(a_ , 0 , len(a_ ) , a_ , a_ ) def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(a_ ) max_depth -= 1 __A = median_of_a(a_ , a_ , start + ((end - start) // 2) + 1 , end - 1 ) __A = partition(a_ , a_ , a_ , a_ ) intro_sort(a_ , a_ , a_ , a_ , a_ ) __A = p return insertion_sort(a_ , a_ , a_ ) if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE :List[Any] = input('Enter numbers separated by a comma : ').strip() SCREAMING_SNAKE_CASE :str = [float(item) for item in user_input.split(',')] print(sort(unsorted))
15
1
"""simple docstring""" from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowercase__ = logging.get_logger(__name__) @add_end_docstrings(lowercase ) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def __init__( self , *lowercase , **lowercase ): super().__init__(*lowercase , **lowercase ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def A_ ( self , lowercase=None ): _lowerCamelCase : Optional[int] = {} if top_k is not None: _lowerCamelCase : Union[str, Any] = top_k return {}, {}, postprocess_params def __call__( self , lowercase , **lowercase ): return super().__call__(lowercase , **lowercase ) def A_ ( self , lowercase ): _lowerCamelCase : Optional[int] = load_image(lowercase ) _lowerCamelCase : int = self.image_processor(images=lowercase , return_tensors=self.framework ) return model_inputs def A_ ( self , lowercase ): _lowerCamelCase : str = self.model(**lowercase ) return model_outputs def A_ ( self , lowercase , lowercase=5 ): if top_k > self.model.config.num_labels: _lowerCamelCase : Union[str, Any] = self.model.config.num_labels if self.framework == "pt": _lowerCamelCase : Any = model_outputs.logits.softmax(-1 )[0] _lowerCamelCase : int = probs.topk(lowercase ) elif self.framework == "tf": _lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1 )[0] _lowerCamelCase : int = tf.math.top_k(lowercase , k=lowercase ) _lowerCamelCase : Union[str, Any] = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) _lowerCamelCase : Any = scores.tolist() _lowerCamelCase : Optional[int] = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase , lowercase )]
355
"""simple docstring""" import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( """--original_config_file""", default=None, type=str, help="""The YAML config file corresponding to the original architecture.""", ) parser.add_argument( """--num_in_channels""", default=None, type=int, help="""The number of input channels. If `None` number of input channels will be automatically inferred.""", ) parser.add_argument( """--scheduler_type""", default="""pndm""", type=str, help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""", ) parser.add_argument( """--pipeline_type""", default=None, type=str, help=( """The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'""" """. If `None` pipeline will be automatically inferred.""" ), ) parser.add_argument( """--image_size""", default=None, type=int, help=( """The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2""" """ Base. Use 768 for Stable Diffusion v2.""" ), ) parser.add_argument( """--prediction_type""", default=None, type=str, help=( """The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable""" """ Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2.""" ), ) parser.add_argument( """--extract_ema""", action="""store_true""", help=( """Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights""" """ or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield""" """ higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.""" ), ) parser.add_argument( """--upcast_attention""", action="""store_true""", help=( """Whether the attention computation should always be upcasted. This is necessary when running stable""" """ diffusion 2.1.""" ), ) parser.add_argument( """--from_safetensors""", action="""store_true""", help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""", ) parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") parser.add_argument( """--stable_unclip""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""", ) parser.add_argument( """--stable_unclip_prior""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""", ) parser.add_argument( """--clip_stats_path""", type=str, help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""", required=False, ) parser.add_argument( """--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint.""" ) parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--vae_path""", type=str, default=None, required=False, help="""Set to a path, hub id to an already converted vae to not convert it again.""", ) lowercase__ = parser.parse_args() lowercase__ = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
12
0
'''simple docstring''' _A : List[str] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/''' def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> bytes: # Make sure the supplied data is a bytes-like object if not isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase__ : Dict = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(UpperCamelCase ) lowerCamelCase__ : int = """""".join(bin(UpperCamelCase )[2:].zfill(8 ) for byte in data ) lowerCamelCase__ : int = len(UpperCamelCase ) % 6 != 0 if padding_needed: # The padding that will be added later lowerCamelCase__ : Optional[Any] = b"""=""" * ((6 - len(UpperCamelCase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(UpperCamelCase ) % 6) else: lowerCamelCase__ : Optional[int] = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(UpperCamelCase ) , 6 ) ).encode() + padding ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> bytes: # Make sure encoded_data is either a string or a bytes-like object if not isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase__ : int = ( """argument should be a bytes-like object or ASCII string, """ f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(UpperCamelCase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(UpperCamelCase , UpperCamelCase ): try: lowerCamelCase__ : str = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) lowerCamelCase__ : Tuple = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowerCamelCase__ : Any = encoded_data[:-padding] lowerCamelCase__ : int = """""".join( bin(B64_CHARSET.index(UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowerCamelCase__ : Tuple = """""".join( bin(B64_CHARSET.index(UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data ) lowerCamelCase__ : List[Any] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(UpperCamelCase ) , 8 ) ] return bytes(UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
41
'''simple docstring''' class _lowercase : def __init__( self: Tuple , UpperCamelCase__: list[int] ): lowerCamelCase__ : Union[str, Any] = len(UpperCamelCase__ ) lowerCamelCase__ : Union[str, Any] = [0] * len_array if len_array > 0: lowerCamelCase__ : Union[str, Any] = array[0] for i in range(1 , UpperCamelCase__ ): lowerCamelCase__ : Tuple = self.prefix_sum[i - 1] + array[i] def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int , UpperCamelCase__: int ): if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: int ): lowerCamelCase__ : Dict = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(UpperCamelCase__ ) return False if __name__ == "__main__": import doctest doctest.testmod()
41
1
'''simple docstring''' import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class __lowerCAmelCase : """simple docstring""" @staticmethod def snake_case__ ( *lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : Any ) -> List[str]: '''simple docstring''' pass def a__ ( lowercase : Image ) -> str: """simple docstring""" _UpperCamelCase = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" _snake_case : str = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def snake_case__ ( self : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = DepthEstimationPipeline(model=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] ) -> Tuple: '''simple docstring''' _UpperCamelCase = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , lowerCAmelCase__ ) import datasets _UpperCamelCase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) _UpperCamelCase = depth_estimator( [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] ) self.assertEqual( [ {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, ] , lowerCAmelCase__ , ) @require_tf @unittest.skip('''Depth estimation is not implemented in TF''' ) def snake_case__ ( self : Optional[int] ) -> int: '''simple docstring''' pass @slow @require_torch def snake_case__ ( self : Optional[Any] ) -> int: '''simple docstring''' _UpperCamelCase = '''Intel/dpt-large''' _UpperCamelCase = pipeline('''depth-estimation''' , model=lowerCAmelCase__ ) _UpperCamelCase = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) _UpperCamelCase = hashimage(outputs['''depth'''] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 ) @require_torch def snake_case__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
287
'''simple docstring''' import math def a__ ( lowercase : float, lowercase : float ) -> float: """simple docstring""" if initial_intensity < 0: raise ValueError('''The value of intensity cannot be negative''' ) # handling of negative values of initial intensity if angle < 0 or angle > 360: raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(lowercase ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name='malus_law')
287
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" a : Optional[int] ="markuplm" def __init__( self , snake_case__=30_522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__=0 , snake_case__=2 , snake_case__=256 , snake_case__=1_024 , snake_case__=216 , snake_case__=1_001 , snake_case__=32 , snake_case__=50 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ): """simple docstring""" super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ , ) lowerCAmelCase : Union[str, Any] = vocab_size lowerCAmelCase : Tuple = hidden_size lowerCAmelCase : Any = num_hidden_layers lowerCAmelCase : Optional[Any] = num_attention_heads lowerCAmelCase : Union[str, Any] = hidden_act lowerCAmelCase : List[Any] = intermediate_size lowerCAmelCase : int = hidden_dropout_prob lowerCAmelCase : int = attention_probs_dropout_prob lowerCAmelCase : Union[str, Any] = max_position_embeddings lowerCAmelCase : Tuple = type_vocab_size lowerCAmelCase : int = initializer_range lowerCAmelCase : Tuple = layer_norm_eps lowerCAmelCase : str = position_embedding_type lowerCAmelCase : List[str] = use_cache lowerCAmelCase : Optional[Any] = classifier_dropout # additional properties lowerCAmelCase : Tuple = max_depth lowerCAmelCase : Optional[Any] = max_xpath_tag_unit_embeddings lowerCAmelCase : List[str] = max_xpath_subs_unit_embeddings lowerCAmelCase : List[Any] = tag_pad_id lowerCAmelCase : Optional[Any] = subs_pad_id lowerCAmelCase : List[str] = xpath_unit_hidden_size
108
# Algorithm for the pigeonhole sorting def _UpperCAmelCase ( a__): '''simple docstring''' a_ : List[Any] = min(a__) # min() finds the minimum value a_ : List[str] = max(a__) # max() finds the maximum value a_ : str = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size a_ : Any = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(a__ , a__), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. a_ : Tuple = 0 for count in range(a__): while holes[count] > 0: holes[count] -= 1 a_ : Optional[Any] = count + min_val i += 1 def _UpperCAmelCase ( ): '''simple docstring''' a_ : List[Any] = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(a__) print("""Sorted order is:""" , """ """.join(a__)) if __name__ == "__main__": main()
248
0
import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin _A = 1E-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class lowercase_ : def __init__( self , __UpperCamelCase , __UpperCamelCase=1_6 , __UpperCamelCase=1_3 , __UpperCamelCase=7 , __UpperCamelCase=1_4 , __UpperCamelCase=1_0 , __UpperCamelCase=1_9 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=True , __UpperCamelCase=1_6 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=[1, 2, 3, 4, 5] , __UpperCamelCase=2_5 , __UpperCamelCase=5 , ): """simple docstring""" UpperCamelCase_ = d_model UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = prediction_length UpperCamelCase_ = context_length UpperCamelCase_ = cardinality UpperCamelCase_ = num_time_features UpperCamelCase_ = lags_sequence UpperCamelCase_ = embedding_dimension UpperCamelCase_ = is_training UpperCamelCase_ = hidden_size UpperCamelCase_ = num_hidden_layers UpperCamelCase_ = num_attention_heads UpperCamelCase_ = intermediate_size UpperCamelCase_ = hidden_act UpperCamelCase_ = hidden_dropout_prob UpperCamelCase_ = attention_probs_dropout_prob UpperCamelCase_ = context_length UpperCamelCase_ = prediction_length + label_length UpperCamelCase_ = label_length UpperCamelCase_ = moving_average UpperCamelCase_ = autocorrelation_factor def lowerCamelCase_ ( self ): """simple docstring""" return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def lowerCamelCase_ ( self , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = config.context_length + max(config.lags_sequence ) UpperCamelCase_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) UpperCamelCase_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) UpperCamelCase_ = floats_tensor([self.batch_size, _past_length] ) UpperCamelCase_ = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs UpperCamelCase_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) UpperCamelCase_ = floats_tensor([self.batch_size, config.prediction_length] ) UpperCamelCase_ = { """past_values""": past_values, """static_categorical_features""": static_categorical_features, """past_time_features""": past_time_features, """past_observed_mask""": past_observed_mask, """future_time_features""": future_time_features, """future_values""": future_values, } return inputs_dict def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.get_config() UpperCamelCase_ = self.prepare_autoformer_inputs_dict(__UpperCamelCase ) return config, inputs_dict def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.prepare_config_and_inputs() return config, inputs_dict def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = AutoformerModel(config=__UpperCamelCase ).to(__UpperCamelCase ).eval() UpperCamelCase_ = model(**__UpperCamelCase ) UpperCamelCase_ = outputs.encoder_last_hidden_state UpperCamelCase_ = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase_ = model.get_encoder() encoder.save_pretrained(__UpperCamelCase ) UpperCamelCase_ = AutoformerEncoder.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase ) UpperCamelCase_ = model.create_network_inputs(**__UpperCamelCase ) UpperCamelCase_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) UpperCamelCase_ = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) UpperCamelCase_ = encoder(inputs_embeds=__UpperCamelCase )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) UpperCamelCase_ = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) UpperCamelCase_ = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) UpperCamelCase_ = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) UpperCamelCase_ = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase_ = model.get_decoder() decoder.save_pretrained(__UpperCamelCase ) UpperCamelCase_ = AutoformerDecoder.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase ) UpperCamelCase_ = decoder( trend=__UpperCamelCase , inputs_embeds=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class lowercase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): A__ : Optional[int] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () A__ : Optional[Any] = (AutoformerForPrediction,) if is_torch_available() else () A__ : Optional[int] = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {} A__ : Optional[Any] = False A__ : Optional[Any] = False A__ : int = False A__ : List[str] = False A__ : List[Any] = False A__ : Tuple = False def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = AutoformerModelTester(self ) UpperCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase ) def lowerCamelCase_ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: UpperCamelCase_ = model_class(__UpperCamelCase ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__UpperCamelCase ) UpperCamelCase_ = model_class.from_pretrained(__UpperCamelCase , output_loading_info=__UpperCamelCase ) self.assertEqual(info["""missing_keys"""] , [] ) def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*__UpperCamelCase ) @unittest.skip(reason="""Model has no tokens embeddings""" ) def lowerCamelCase_ ( self ): """simple docstring""" pass def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = inspect.signature(getattr(__UpperCamelCase , """forward""" ) ) # The main input is the name of the argument after `self` UpperCamelCase_ = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , __UpperCamelCase ) def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ = model_class(__UpperCamelCase ) UpperCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase_ = [*signature.parameters.keys()] UpperCamelCase_ = [ """past_values""", """past_time_features""", """past_observed_mask""", """static_categorical_features""", """static_real_features""", """future_values""", """future_time_features""", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("""future_observed_mask""" ) expected_arg_names.extend( [ """decoder_attention_mask""", """head_mask""", """decoder_head_mask""", """cross_attn_head_mask""", """encoder_outputs""", """past_key_values""", """output_hidden_states""", """output_attentions""", """use_cache""", """return_dict""", ] ) self.assertListEqual(arg_names[: len(__UpperCamelCase )] , __UpperCamelCase ) def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase_ = True UpperCamelCase_ = getattr(self.model_tester , """seq_length""" , __UpperCamelCase ) UpperCamelCase_ = getattr(self.model_tester , """decoder_seq_length""" , __UpperCamelCase ) UpperCamelCase_ = getattr(self.model_tester , """encoder_seq_length""" , __UpperCamelCase ) UpperCamelCase_ = getattr(self.model_tester , """d_model""" , __UpperCamelCase ) UpperCamelCase_ = getattr(self.model_tester , """num_attention_heads""" , __UpperCamelCase ) UpperCamelCase_ = d_model // num_attention_heads for model_class in self.all_model_classes: UpperCamelCase_ = True UpperCamelCase_ = False UpperCamelCase_ = True UpperCamelCase_ = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): UpperCamelCase_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) UpperCamelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] UpperCamelCase_ = True UpperCamelCase_ = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): UpperCamelCase_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) UpperCamelCase_ = outputs.encoder_attentions self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) UpperCamelCase_ = len(__UpperCamelCase ) UpperCamelCase_ = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(__UpperCamelCase , __UpperCamelCase ) # decoder attentions UpperCamelCase_ = outputs.decoder_attentions self.assertIsInstance(__UpperCamelCase , (list, tuple) ) self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions UpperCamelCase_ = outputs.cross_attentions self.assertIsInstance(__UpperCamelCase , (list, tuple) ) self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine UpperCamelCase_ = True UpperCamelCase_ = True UpperCamelCase_ = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): UpperCamelCase_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) self.assertEqual(out_len + 2 , len(__UpperCamelCase ) ) UpperCamelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def lowerCamelCase_ ( self ): """simple docstring""" super().test_retain_grad_hidden_states_attentions() def lowerCamelCase__ ( a__ : Union[str, Any]="train-batch.pt" ) -> Dict: UpperCamelCase_ = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=A__ , repo_type="""dataset""" ) UpperCamelCase_ = torch.load(A__ , map_location=A__ ) return batch @require_torch @slow class lowercase_ ( unittest.TestCase ): def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__UpperCamelCase ) UpperCamelCase_ = prepare_batch() with torch.no_grad(): UpperCamelCase_ = model( past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0] UpperCamelCase_ = torch.Size( (6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , __UpperCamelCase ) UpperCamelCase_ = torch.tensor( [[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=__UpperCamelCase ) self.assertTrue(torch.allclose(output[0, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) ) def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__UpperCamelCase ) UpperCamelCase_ = prepare_batch("""val-batch.pt""" ) with torch.no_grad(): UpperCamelCase_ = model( past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state UpperCamelCase_ = torch.Size((6_4, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , __UpperCamelCase ) UpperCamelCase_ = torch.tensor( [[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=__UpperCamelCase ) self.assertTrue(torch.allclose(output[0, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) ) def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__UpperCamelCase ) UpperCamelCase_ = prepare_batch("""val-batch.pt""" ) with torch.no_grad(): UpperCamelCase_ = model.generate( static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , ) UpperCamelCase_ = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , __UpperCamelCase ) UpperCamelCase_ = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=__UpperCamelCase ) UpperCamelCase_ = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __UpperCamelCase , rtol=1e-1 ) )
351
def lowerCamelCase__ ( a__ : List[Any] ) -> Optional[int]: UpperCamelCase_ = len(a__ ) while cur > 1: # Find the maximum number in arr UpperCamelCase_ = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi UpperCamelCase_ = arr[mi::-1] + arr[mi + 1 : len(a__ )] # Reverse whole list UpperCamelCase_ = arr[cur - 1 :: -1] + arr[cur : len(a__ )] cur -= 1 return arr if __name__ == "__main__": _A = input('''Enter numbers separated by a comma:\n''').strip() _A = [int(item) for item in user_input.split(''',''')] print(pancake_sort(unsorted))
261
0
'''simple docstring''' import math def a_ ( __snake_case : int ) -> bool: """simple docstring""" return math.sqrt(__snake_case ) * math.sqrt(__snake_case ) == num def a_ ( __snake_case : int ) -> bool: """simple docstring""" lowerCamelCase_ =0 lowerCamelCase_ =n while left <= right: lowerCamelCase_ =(left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowerCamelCase_ =mid - 1 else: lowerCamelCase_ =mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
75
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _A = logging.get_logger(__name__) _A = { '''nielsr/canine-s''': 2_048, } # Unicode defines 1,114,112 total “codepoints” _A = 1_114_112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _A = 0 _A = 0xe0_00 _A = 0xe0_01 _A = 0xe0_02 _A = 0xe0_03 _A = 0xe0_04 # Maps special codepoints to human-readable names. _A = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class A ( __UpperCAmelCase ): __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=False, UpperCamelCase__=2048, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else bos_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else eos_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else sep_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else cls_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else mask_token super().__init__( bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, sep_token=UpperCamelCase__, cls_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token=UpperCamelCase__, add_prefix_space=UpperCamelCase__, model_max_length=UpperCamelCase__, **UpperCamelCase__, ) # Creates a mapping for looking up the IDs of special symbols. lowerCAmelCase_ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): lowerCAmelCase_ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. lowerCAmelCase_ = { codepoint: name for name, codepoint in self._special_codepoints.items() } lowerCAmelCase_ = UNICODE_VOCAB_SIZE lowerCAmelCase_ = len(self._special_codepoints ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self._unicode_vocab_size def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" return list(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" try: return ord(UpperCamelCase__ ) except TypeError: raise ValueError(f"invalid token: '{token}'" ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(UpperCamelCase__ ) except TypeError: raise ValueError(f"invalid id: {index}" ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" return "".join(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] lowerCAmelCase_ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__, token_ids_a=UpperCamelCase__, already_has_special_tokens=UpperCamelCase__ ) lowerCAmelCase_ = [1] + ([0] * len(UpperCamelCase__ )) + [1] if token_ids_a is not None: result += ([0] * len(UpperCamelCase__ )) + [1] return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] lowerCAmelCase_ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" return ()
278
0
from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCAmelCase__ ( _a , _a , _a ): """simple docstring""" a = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"] @register_to_config def __init__( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 5_0257 , __lowerCamelCase : int = 1024 , __lowerCamelCase : int = 768 , __lowerCamelCase : int = 12 , __lowerCamelCase : int = 12 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : str = "gelu_new" , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 1e-5 , __lowerCamelCase : float = 0.02 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , ) -> Tuple: super().__init__() SCREAMING_SNAKE_CASE__ = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' f''' `n_embd`: {n_embd} are not equal.''' ) SCREAMING_SNAKE_CASE__ = prefix_inner_dim SCREAMING_SNAKE_CASE__ = prefix_hidden_dim SCREAMING_SNAKE_CASE__ = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) SCREAMING_SNAKE_CASE__ = ( nn.Linear(self.prefix_hidden_dim , snake_case_ ) if self.prefix_hidden_dim is not None else nn.Identity() ) SCREAMING_SNAKE_CASE__ = GPTaConfig( vocab_size=snake_case_ , n_positions=snake_case_ , n_embd=snake_case_ , n_layer=snake_case_ , n_head=snake_case_ , n_inner=snake_case_ , activation_function=snake_case_ , resid_pdrop=snake_case_ , embd_pdrop=snake_case_ , attn_pdrop=snake_case_ , layer_norm_epsilon=snake_case_ , initializer_range=snake_case_ , scale_attn_weights=snake_case_ , use_cache=snake_case_ , scale_attn_by_inverse_layer_idx=snake_case_ , reorder_and_upcast_attn=snake_case_ , ) SCREAMING_SNAKE_CASE__ = GPTaLMHeadModel(snake_case_ ) def lowercase_ ( self : Optional[int] , __lowerCamelCase : torch.Tensor , __lowerCamelCase : torch.Tensor , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[torch.Tensor] = None , ) -> Dict: SCREAMING_SNAKE_CASE__ = self.transformer.transformer.wte(snake_case_ ) SCREAMING_SNAKE_CASE__ = self.encode_prefix(snake_case_ ) SCREAMING_SNAKE_CASE__ = self.decode_prefix(snake_case_ ) SCREAMING_SNAKE_CASE__ = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: SCREAMING_SNAKE_CASE__ = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) SCREAMING_SNAKE_CASE__ = torch.cat((dummy_token, input_ids) , dim=1 ) SCREAMING_SNAKE_CASE__ = self.transformer(inputs_embeds=snake_case_ , labels=snake_case_ , attention_mask=snake_case_ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def lowercase_ ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : torch.device ) -> int: return torch.zeros(snake_case_ , self.prefix_length , dtype=torch.intaa , device=snake_case_ ) def lowercase_ ( self : Tuple , __lowerCamelCase : Tuple ) -> int: return self.encode_prefix(snake_case_ ) @torch.no_grad() def lowercase_ ( self : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ) -> Tuple: SCREAMING_SNAKE_CASE__ = torch.split(snake_case_ , 1 , dim=0 ) SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] for feature in features: SCREAMING_SNAKE_CASE__ = self.decode_prefix(feature.to(snake_case_ ) ) # back to the clip feature # Only support beam search for now SCREAMING_SNAKE_CASE__ = self.generate_beam( input_embeds=snake_case_ , device=snake_case_ , eos_token_id=snake_case_ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) SCREAMING_SNAKE_CASE__ = torch.stack(snake_case_ ) SCREAMING_SNAKE_CASE__ = torch.stack(snake_case_ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def lowercase_ ( self : Optional[int] , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int = 5 , __lowerCamelCase : int = 67 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : Optional[int] = None , ) -> Any: SCREAMING_SNAKE_CASE__ = eos_token_id SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = torch.ones(snake_case_ , device=snake_case_ , dtype=torch.int ) SCREAMING_SNAKE_CASE__ = torch.zeros(snake_case_ , device=snake_case_ , dtype=torch.bool ) if input_embeds is not None: SCREAMING_SNAKE_CASE__ = input_embeds else: SCREAMING_SNAKE_CASE__ = self.transformer.transformer.wte(snake_case_ ) for i in range(snake_case_ ): SCREAMING_SNAKE_CASE__ = self.transformer(inputs_embeds=snake_case_ ) SCREAMING_SNAKE_CASE__ = outputs.logits SCREAMING_SNAKE_CASE__ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) SCREAMING_SNAKE_CASE__ = logits.softmax(-1 ).log() if scores is None: SCREAMING_SNAKE_CASE__ = logits.topk(snake_case_ , -1 ) SCREAMING_SNAKE_CASE__ = generated.expand(snake_case_ , *generated.shape[1:] ) SCREAMING_SNAKE_CASE__ = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: SCREAMING_SNAKE_CASE__ = next_tokens else: SCREAMING_SNAKE_CASE__ = tokens.expand(snake_case_ , *tokens.shape[1:] ) SCREAMING_SNAKE_CASE__ = torch.cat((tokens, next_tokens) , dim=1 ) else: SCREAMING_SNAKE_CASE__ = -float(np.inf ) SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = scores[:, None] + logits seq_lengths[~is_stopped] += 1 SCREAMING_SNAKE_CASE__ = scores_sum / seq_lengths[:, None] SCREAMING_SNAKE_CASE__ = scores_sum_average.view(-1 ).topk(snake_case_ , -1 ) SCREAMING_SNAKE_CASE__ = next_tokens // scores_sum.shape[1] SCREAMING_SNAKE_CASE__ = seq_lengths[next_tokens_source] SCREAMING_SNAKE_CASE__ = next_tokens % scores_sum.shape[1] SCREAMING_SNAKE_CASE__ = next_tokens.unsqueeze(1 ) SCREAMING_SNAKE_CASE__ = tokens[next_tokens_source] SCREAMING_SNAKE_CASE__ = torch.cat((tokens, next_tokens) , dim=1 ) SCREAMING_SNAKE_CASE__ = generated[next_tokens_source] SCREAMING_SNAKE_CASE__ = scores_sum_average * seq_lengths SCREAMING_SNAKE_CASE__ = is_stopped[next_tokens_source] SCREAMING_SNAKE_CASE__ = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) SCREAMING_SNAKE_CASE__ = torch.cat((generated, next_token_embed) , dim=1 ) SCREAMING_SNAKE_CASE__ = is_stopped + next_tokens.eq(snake_case_ ).squeeze() if is_stopped.all(): break SCREAMING_SNAKE_CASE__ = scores / seq_lengths SCREAMING_SNAKE_CASE__ = scores.argsort(descending=snake_case_ ) # tokens tensors are already padded to max_seq_length SCREAMING_SNAKE_CASE__ = [tokens[i] for i in order] SCREAMING_SNAKE_CASE__ = torch.stack(snake_case_ , dim=0 ) SCREAMING_SNAKE_CASE__ = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
371
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) _SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Tuple = OrderedDict( [ ('''align''', '''EfficientNetImageProcessor'''), ('''beit''', '''BeitImageProcessor'''), ('''bit''', '''BitImageProcessor'''), ('''blip''', '''BlipImageProcessor'''), ('''blip-2''', '''BlipImageProcessor'''), ('''bridgetower''', '''BridgeTowerImageProcessor'''), ('''chinese_clip''', '''ChineseCLIPImageProcessor'''), ('''clip''', '''CLIPImageProcessor'''), ('''clipseg''', '''ViTImageProcessor'''), ('''conditional_detr''', '''ConditionalDetrImageProcessor'''), ('''convnext''', '''ConvNextImageProcessor'''), ('''convnextv2''', '''ConvNextImageProcessor'''), ('''cvt''', '''ConvNextImageProcessor'''), ('''data2vec-vision''', '''BeitImageProcessor'''), ('''deformable_detr''', '''DeformableDetrImageProcessor'''), ('''deit''', '''DeiTImageProcessor'''), ('''deta''', '''DetaImageProcessor'''), ('''detr''', '''DetrImageProcessor'''), ('''dinat''', '''ViTImageProcessor'''), ('''donut-swin''', '''DonutImageProcessor'''), ('''dpt''', '''DPTImageProcessor'''), ('''efficientformer''', '''EfficientFormerImageProcessor'''), ('''efficientnet''', '''EfficientNetImageProcessor'''), ('''flava''', '''FlavaImageProcessor'''), ('''focalnet''', '''BitImageProcessor'''), ('''git''', '''CLIPImageProcessor'''), ('''glpn''', '''GLPNImageProcessor'''), ('''groupvit''', '''CLIPImageProcessor'''), ('''imagegpt''', '''ImageGPTImageProcessor'''), ('''instructblip''', '''BlipImageProcessor'''), ('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''), ('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''), ('''levit''', '''LevitImageProcessor'''), ('''mask2former''', '''Mask2FormerImageProcessor'''), ('''maskformer''', '''MaskFormerImageProcessor'''), ('''mgp-str''', '''ViTImageProcessor'''), ('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''), ('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevitv2''', '''MobileViTImageProcessor'''), ('''nat''', '''ViTImageProcessor'''), ('''oneformer''', '''OneFormerImageProcessor'''), ('''owlvit''', '''OwlViTImageProcessor'''), ('''perceiver''', '''PerceiverImageProcessor'''), ('''pix2struct''', '''Pix2StructImageProcessor'''), ('''poolformer''', '''PoolFormerImageProcessor'''), ('''regnet''', '''ConvNextImageProcessor'''), ('''resnet''', '''ConvNextImageProcessor'''), ('''sam''', '''SamImageProcessor'''), ('''segformer''', '''SegformerImageProcessor'''), ('''swiftformer''', '''ViTImageProcessor'''), ('''swin''', '''ViTImageProcessor'''), ('''swin2sr''', '''Swin2SRImageProcessor'''), ('''swinv2''', '''ViTImageProcessor'''), ('''table-transformer''', '''DetrImageProcessor'''), ('''timesformer''', '''VideoMAEImageProcessor'''), ('''tvlt''', '''TvltImageProcessor'''), ('''upernet''', '''SegformerImageProcessor'''), ('''van''', '''ConvNextImageProcessor'''), ('''videomae''', '''VideoMAEImageProcessor'''), ('''vilt''', '''ViltImageProcessor'''), ('''vit''', '''ViTImageProcessor'''), ('''vit_hybrid''', '''ViTHybridImageProcessor'''), ('''vit_mae''', '''ViTImageProcessor'''), ('''vit_msn''', '''ViTImageProcessor'''), ('''xclip''', '''CLIPImageProcessor'''), ('''yolos''', '''YolosImageProcessor'''), ] ) _SCREAMING_SNAKE_CASE : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def UpperCAmelCase_ ( _A ): '''simple docstring''' for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: SCREAMING_SNAKE_CASE__ = model_type_to_module_name(_A ) SCREAMING_SNAKE_CASE__ = importlib.import_module(F'''.{module_name}''' , '''transformers.models''' ) try: return getattr(_A , _A ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(_A , '''__name__''' , _A ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. SCREAMING_SNAKE_CASE__ = importlib.import_module('''transformers''' ) if hasattr(_A , _A ): return getattr(_A , _A ) return None def UpperCAmelCase_ ( _A , _A = None , _A = False , _A = False , _A = None , _A = None , _A = None , _A = False , **_A , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = get_file_from_repo( _A , _A , cache_dir=_A , force_download=_A , resume_download=_A , proxies=_A , use_auth_token=_A , revision=_A , local_files_only=_A , ) if resolved_config_file is None: logger.info( '''Could not locate the image processor configuration file, will try to use the model config instead.''' ) return {} with open(_A , encoding='''utf-8''' ) as reader: return json.load(_A ) class UpperCAmelCase__ : """simple docstring""" def __init__( self : List[Any] ) -> int: raise EnvironmentError( '''AutoImageProcessor is designed to be instantiated ''' '''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' ) @classmethod @replace_list_option_in_docstrings(__lowerCamelCase ) def lowercase_ ( cls : Optional[int] , __lowerCamelCase : Any , **__lowerCamelCase : Tuple ) -> List[str]: SCREAMING_SNAKE_CASE__ = kwargs.pop('''config''' , __lowerCamelCase ) SCREAMING_SNAKE_CASE__ = kwargs.pop('''trust_remote_code''' , __lowerCamelCase ) SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = ImageProcessingMixin.get_image_processor_dict(__lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = config_dict.get('''image_processor_type''' , __lowerCamelCase ) SCREAMING_SNAKE_CASE__ = None if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ): SCREAMING_SNAKE_CASE__ = config_dict['''auto_map''']['''AutoImageProcessor'''] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: SCREAMING_SNAKE_CASE__ = config_dict.pop('''feature_extractor_type''' , __lowerCamelCase ) if feature_extractor_class is not None: logger.warning( '''Could not find image processor class in the image processor config or the model config. Loading''' ''' based on pattern matching with the model\'s feature extractor configuration.''' ) SCREAMING_SNAKE_CASE__ = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' ) if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ): SCREAMING_SNAKE_CASE__ = config_dict['''auto_map''']['''AutoFeatureExtractor'''] SCREAMING_SNAKE_CASE__ = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' ) logger.warning( '''Could not find image processor auto map in the image processor config or the model config.''' ''' Loading based on pattern matching with the model\'s feature extractor configuration.''' ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(__lowerCamelCase , __lowerCamelCase ): SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) # It could be in `config.image_processor_type`` SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , '''image_processor_type''' , __lowerCamelCase ) if hasattr(__lowerCamelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map: SCREAMING_SNAKE_CASE__ = config.auto_map['''AutoImageProcessor'''] if image_processor_class is not None: SCREAMING_SNAKE_CASE__ = image_processor_class_from_name(__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = image_processor_auto_map is not None SCREAMING_SNAKE_CASE__ = image_processor_class is not None or type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING SCREAMING_SNAKE_CASE__ = resolve_trust_remote_code( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if has_remote_code and trust_remote_code: SCREAMING_SNAKE_CASE__ = get_class_from_dynamic_module( __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = kwargs.pop('''code_revision''' , __lowerCamelCase ) if os.path.isdir(__lowerCamelCase ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase ) elif image_processor_class is not None: return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING: SCREAMING_SNAKE_CASE__ = IMAGE_PROCESSOR_MAPPING[type(__lowerCamelCase )] return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase ) raise ValueError( f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a ''' f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following ''' f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def lowercase_ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ) -> str: IMAGE_PROCESSOR_MAPPING.register(__lowerCamelCase , __lowerCamelCase )
218
0
'''simple docstring''' from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch("socket.socket" ) @patch("builtins.open" ) def UpperCamelCase_( snake_case : Tuple , snake_case : Dict ): '''simple docstring''' snake_case_ = Mock() snake_case_ = conn, Mock() snake_case_ = iter([1, None] ) snake_case_ = lambda snake_case : next(snake_case ) # ===== invoke ===== send_file(filename="mytext.txt" , testing=snake_case ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
85
'''simple docstring''' import argparse from collections import defaultdict import yaml SCREAMING_SNAKE_CASE__ = 'docs/source/en/_toctree.yml' def lowercase__ ( __UpperCamelCase )-> Optional[Any]: UpperCamelCase = defaultdict(__UpperCamelCase ) UpperCamelCase = [] UpperCamelCase = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} ) else: new_doc_list.append(__UpperCamelCase ) UpperCamelCase = new_doc_list UpperCamelCase = [key for key, value in counts.items() if value > 1] UpperCamelCase = [] for duplicate_key in duplicates: UpperCamelCase = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} ) if len(__UpperCamelCase ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] ) UpperCamelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() ) # "overview" gets special treatment and is always first if len(__UpperCamelCase ) > 1: raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" ) overview_doc.extend(__UpperCamelCase ) # Sort return overview_doc def lowercase__ ( __UpperCamelCase=False )-> List[str]: with open(__UpperCamelCase , encoding="""utf-8""" ) as f: UpperCamelCase = yaml.safe_load(f.read() ) # Get to the API doc UpperCamelCase = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCamelCase = content[api_idx]["""sections"""] # Then to the model doc UpperCamelCase = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 UpperCamelCase = api_doc[scheduler_idx]["""sections"""] UpperCamelCase = clean_doc_toc(__UpperCamelCase ) UpperCamelCase = False if new_scheduler_doc != scheduler_doc: UpperCamelCase = True if overwrite: UpperCamelCase = new_scheduler_doc if diff: if overwrite: UpperCamelCase = api_doc with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) def lowercase__ ( __UpperCamelCase=False )-> Tuple: with open(__UpperCamelCase , encoding="""utf-8""" ) as f: UpperCamelCase = yaml.safe_load(f.read() ) # Get to the API doc UpperCamelCase = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCamelCase = content[api_idx]["""sections"""] # Then to the model doc UpperCamelCase = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 UpperCamelCase = False UpperCamelCase = api_doc[pipeline_idx]["""sections"""] UpperCamelCase = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: UpperCamelCase = pipeline_doc["""section"""] UpperCamelCase = clean_doc_toc(__UpperCamelCase ) if overwrite: UpperCamelCase = new_sub_pipeline_doc new_pipeline_docs.append(__UpperCamelCase ) # sort overall pipeline doc UpperCamelCase = clean_doc_toc(__UpperCamelCase ) if new_pipeline_docs != pipeline_docs: UpperCamelCase = True if overwrite: UpperCamelCase = new_pipeline_docs if diff: if overwrite: UpperCamelCase = api_doc with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action="https://huggingface.co/datasets/infinityofspace/python_codestyles-mixed1-500/viewer/default/store_true", help='Whether to fix inconsistencies.') SCREAMING_SNAKE_CASE__ = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
321
0
'''simple docstring''' from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge UpperCamelCase_ = [ "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the" " final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe" " depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.", "The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal" " accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's" " founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the" " body.", "Amnesty International releases its annual report on the death penalty. The report catalogs the use of" " state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the" " world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital" " punishment.", ] UpperCamelCase_ = [ "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ." " Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz" " had informed his Lufthansa training school of an episode of severe depression, airline says .", "Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ." " Israel and the United States opposed the move, which could open the door to war crimes investigations against" " Israelis .", "Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to" " death . Organization claims that governments around the world are using the threat of terrorism to advance" " executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death" " sentences up by 28% .", ] def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = calculate_rouge(__UpperCamelCase ,__UpperCamelCase ,bootstrap_aggregation=__UpperCamelCase ,rouge_keys=['rouge2', 'rougeL'] ) assert isinstance(__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = calculate_rouge(__UpperCamelCase ,__UpperCamelCase ,bootstrap_aggregation=__UpperCamelCase ,rouge_keys=['rouge2'] ) assert ( pd.DataFrame(no_aggregation['rouge2'] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra['rouge2'] ).fmeasure.mean() ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = 'rougeLsum' SCREAMING_SNAKE_CASE : Dict = calculate_rouge(__UpperCamelCase ,__UpperCamelCase ,newline_sep=__UpperCamelCase ,rouge_keys=[k] )[k] SCREAMING_SNAKE_CASE : Tuple = calculate_rouge(__UpperCamelCase ,__UpperCamelCase ,newline_sep=__UpperCamelCase ,rouge_keys=[k] )[k] assert score > score_no_sep def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : int = ['rouge1', 'rouge2', 'rougeL'] SCREAMING_SNAKE_CASE : Optional[int] = calculate_rouge(__UpperCamelCase ,__UpperCamelCase ,newline_sep=__UpperCamelCase ,rouge_keys=__UpperCamelCase ) SCREAMING_SNAKE_CASE : Tuple = calculate_rouge(__UpperCamelCase ,__UpperCamelCase ,newline_sep=__UpperCamelCase ,rouge_keys=__UpperCamelCase ) assert score_sep == score_no_sep def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = [ 'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.', 'Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .', ] SCREAMING_SNAKE_CASE : Dict = [ 'Margot Frank, died in 1945, a month earlier than previously thought.', 'Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of' ' the final seconds on board Flight 9525.', ] assert calculate_rouge(__UpperCamelCase ,__UpperCamelCase ,newline_sep=__UpperCamelCase ) == calculate_rouge(__UpperCamelCase ,__UpperCamelCase ,newline_sep=__UpperCamelCase ) def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = [ '\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" ' ] SCREAMING_SNAKE_CASE : Any = [ ' Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .' ] SCREAMING_SNAKE_CASE : List[str] = calculate_rouge(__UpperCamelCase ,__UpperCamelCase ,rouge_keys=['rougeLsum'] ,newline_sep=__UpperCamelCase )['rougeLsum'] SCREAMING_SNAKE_CASE : List[Any] = calculate_rouge(__UpperCamelCase ,__UpperCamelCase ,rouge_keys=['rougeLsum'] )['rougeLsum'] assert new_score > prev_score def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : int = Path('examples/seq2seq/test_data/wmt_en_ro' ) SCREAMING_SNAKE_CASE : int = calculate_rouge_path(data_dir.joinpath('test.source' ) ,data_dir.joinpath('test.target' ) ) assert isinstance(__UpperCamelCase ,__UpperCamelCase ) SCREAMING_SNAKE_CASE : str = calculate_rouge_path( data_dir.joinpath('test.source' ) ,data_dir.joinpath('test.target' ) ,bootstrap_aggregation=__UpperCamelCase ) assert isinstance(__UpperCamelCase ,__UpperCamelCase )
354
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"} class _a ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A : int = '''openai-gpt''' A : Dict = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self, A=40_478, A=512, A=768, A=12, A=12, A="gelu", A=0.1, A=0.1, A=0.1, A=1E-5, A=0.02, A="cls_index", A=True, A=None, A=True, A=0.1, **A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = vocab_size SCREAMING_SNAKE_CASE : Union[str, Any] = n_positions SCREAMING_SNAKE_CASE : Tuple = n_embd SCREAMING_SNAKE_CASE : Any = n_layer SCREAMING_SNAKE_CASE : Tuple = n_head SCREAMING_SNAKE_CASE : Union[str, Any] = afn SCREAMING_SNAKE_CASE : List[str] = resid_pdrop SCREAMING_SNAKE_CASE : List[str] = embd_pdrop SCREAMING_SNAKE_CASE : Tuple = attn_pdrop SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon SCREAMING_SNAKE_CASE : Any = initializer_range SCREAMING_SNAKE_CASE : List[Any] = summary_type SCREAMING_SNAKE_CASE : int = summary_use_proj SCREAMING_SNAKE_CASE : Union[str, Any] = summary_activation SCREAMING_SNAKE_CASE : List[str] = summary_first_dropout SCREAMING_SNAKE_CASE : Any = summary_proj_to_labels super().__init__(**A )
246
0
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers UpperCamelCase__: Optional[int] = "3" print("Python version:", sys.version) print("transformers version:", transformers.__version__) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) print("NCCL version:", torch.cuda.nccl.version()) except ImportError: print("Torch version:", None) try: import deepspeed print("DeepSpeed version:", deepspeed.__version__) except ImportError: print("DeepSpeed version:", None) try: import tensorflow as tf print("TensorFlow version:", tf.__version__) print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU"))) print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU"))) except ImportError: print("TensorFlow version:", None)
23
'''simple docstring''' from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake UpperCamelCase__: Tuple = numpy.array([0, 0]) UpperCamelCase__: Union[str, Any] = numpy.array([0.5, 0.8660254]) UpperCamelCase__: Dict = numpy.array([1, 0]) UpperCamelCase__: int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def snake_case_ ( _lowerCAmelCase : list[numpy.ndarray] , _lowerCAmelCase : int ) -> list[numpy.ndarray]: UpperCAmelCase : Union[str, Any] = initial_vectors for _ in range(_lowerCAmelCase ): UpperCAmelCase : Union[str, Any] = iteration_step(_lowerCAmelCase ) return vectors def snake_case_ ( _lowerCAmelCase : list[numpy.ndarray] ) -> list[numpy.ndarray]: UpperCAmelCase : Tuple = [] for i, start_vector in enumerate(vectors[:-1] ): UpperCAmelCase : List[str] = vectors[i + 1] new_vectors.append(_lowerCAmelCase ) UpperCAmelCase : Optional[Any] = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def snake_case_ ( _lowerCAmelCase : numpy.ndarray , _lowerCAmelCase : float ) -> numpy.ndarray: UpperCAmelCase : List[str] = numpy.radians(_lowerCAmelCase ) UpperCAmelCase , UpperCAmelCase : Tuple = numpy.cos(_lowerCAmelCase ), numpy.sin(_lowerCAmelCase ) UpperCAmelCase : Union[str, Any] = numpy.array(((c, -s), (s, c)) ) return numpy.dot(_lowerCAmelCase , _lowerCAmelCase ) def snake_case_ ( _lowerCAmelCase : list[numpy.ndarray] ) -> None: UpperCAmelCase : List[Any] = plt.gca() axes.set_aspect('''equal''' ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() UpperCAmelCase , UpperCAmelCase : str = zip(*_lowerCAmelCase ) plt.plot(_lowerCAmelCase , _lowerCAmelCase ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase__: List[Any] = iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
23
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING __snake_case : List[str] = logging.get_logger(__name__) __snake_case : Tuple = { """Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""", } class __SCREAMING_SNAKE_CASE ( __lowercase): _SCREAMING_SNAKE_CASE : Optional[int] = '''instructblip_vision_model''' def __init__( self , _UpperCamelCase=14_08 , _UpperCamelCase=61_44 , _UpperCamelCase=39 , _UpperCamelCase=16 , _UpperCamelCase=2_24 , _UpperCamelCase=14 , _UpperCamelCase="gelu" , _UpperCamelCase=1E-6 , _UpperCamelCase=0.0 , _UpperCamelCase=1E-10 , _UpperCamelCase=True , **_UpperCamelCase , ): """simple docstring""" super().__init__(**_UpperCamelCase ) lowerCAmelCase__ = hidden_size lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = patch_size lowerCAmelCase__ = image_size lowerCAmelCase__ = initializer_range lowerCAmelCase__ = attention_dropout lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = hidden_act lowerCAmelCase__ = qkv_bias @classmethod def UpperCamelCase__ ( cls , _UpperCamelCase , **_UpperCamelCase ): """simple docstring""" cls._set_token_in_kwargs(_UpperCamelCase ) lowerCAmelCase__ , lowerCAmelCase__ = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": lowerCAmelCase__ = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"You are using a model of type {config_dict['model_type']} to instantiate a model of type " F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(_UpperCamelCase , **_UpperCamelCase ) class __SCREAMING_SNAKE_CASE ( __lowercase): _SCREAMING_SNAKE_CASE : str = '''instructblip_qformer''' def __init__( self , _UpperCamelCase=3_05_22 , _UpperCamelCase=7_68 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=30_72 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_12 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-12 , _UpperCamelCase=0 , _UpperCamelCase="absolute" , _UpperCamelCase=2 , _UpperCamelCase=14_08 , **_UpperCamelCase , ): """simple docstring""" super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase ) lowerCAmelCase__ = vocab_size lowerCAmelCase__ = hidden_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = hidden_act lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = initializer_range lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = position_embedding_type lowerCAmelCase__ = cross_attention_frequency lowerCAmelCase__ = encoder_hidden_size @classmethod def UpperCamelCase__ ( cls , _UpperCamelCase , **_UpperCamelCase ): """simple docstring""" cls._set_token_in_kwargs(_UpperCamelCase ) lowerCAmelCase__ , lowerCAmelCase__ = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": lowerCAmelCase__ = config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"You are using a model of type {config_dict['model_type']} to instantiate a model of type " F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(_UpperCamelCase , **_UpperCamelCase ) class __SCREAMING_SNAKE_CASE ( __lowercase): _SCREAMING_SNAKE_CASE : List[str] = '''instructblip''' _SCREAMING_SNAKE_CASE : List[str] = True def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=32 , **_UpperCamelCase ): """simple docstring""" super().__init__(**_UpperCamelCase ) if vision_config is None: lowerCAmelCase__ = {} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' ) if qformer_config is None: lowerCAmelCase__ = {} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' ) if text_config is None: lowerCAmelCase__ = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) lowerCAmelCase__ = InstructBlipVisionConfig(**_UpperCamelCase ) lowerCAmelCase__ = InstructBlipQFormerConfig(**_UpperCamelCase ) lowerCAmelCase__ = text_config['model_type'] if 'model_type' in text_config else 'opt' lowerCAmelCase__ = CONFIG_MAPPING[text_model_type](**_UpperCamelCase ) lowerCAmelCase__ = self.text_config.tie_word_embeddings lowerCAmelCase__ = self.text_config.is_encoder_decoder lowerCAmelCase__ = num_query_tokens lowerCAmelCase__ = self.vision_config.hidden_size lowerCAmelCase__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES lowerCAmelCase__ = 1.0 lowerCAmelCase__ = 0.02 @classmethod def UpperCamelCase__ ( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , ): """simple docstring""" return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCamelCase , ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase__ = copy.deepcopy(self.__dict__ ) lowerCAmelCase__ = self.vision_config.to_dict() lowerCAmelCase__ = self.qformer_config.to_dict() lowerCAmelCase__ = self.text_config.to_dict() lowerCAmelCase__ = self.__class__.model_type return output
122
import qiskit def _UpperCamelCase ( UpperCamelCase_ : int , UpperCamelCase_ : int ) -> qiskit.result.counts.Counts: """simple docstring""" lowerCAmelCase__ = qiskit.Aer.get_backend('aer_simulator' ) # Create a Quantum Circuit acting on the q register lowerCAmelCase__ = qiskit.QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator lowerCAmelCase__ = qiskit.execute(UpperCamelCase_ , UpperCamelCase_ , shots=1000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(UpperCamelCase_ ) if __name__ == "__main__": print(f'Total count for various states are: {single_qubit_measure(1, 1)}')
122
1
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]: '''simple docstring''' A__ = old_name if "patch_embed" in old_name: A__ , A__ , A__ = old_name.split('.' ) if layer == "0": A__ = old_name.replace('0' , 'convolution1' ) elif layer == "1": A__ = old_name.replace('1' , 'batchnorm_before' ) elif layer == "3": A__ = old_name.replace('3' , 'convolution2' ) else: A__ = old_name.replace('4' , 'batchnorm_after' ) if "network" in old_name and re.search(R'\d\.\d' , UpperCAmelCase__ ): A__ = R'\b\d{2}\b' if bool(re.search(UpperCAmelCase__ , UpperCAmelCase__ ) ): A__ = re.search(R'\d\.\d\d.' , UpperCAmelCase__ ).group() else: A__ = re.search(R'\d\.\d.' , UpperCAmelCase__ ).group() if int(match[0] ) < 6: A__ = old_name.replace(UpperCAmelCase__ , '' ) A__ = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] ) A__ = 'intermediate_stages.' + trimmed_name else: A__ = old_name.replace(UpperCAmelCase__ , '' ) if int(match[2] ) < num_meta4D_last_stage: A__ = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] ) else: A__ = str(int(match[2] ) - num_meta4D_last_stage ) A__ = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index ) if "norm1" in old_name: A__ = trimmed_name.replace('norm1' , 'layernorm1' ) elif "norm2" in old_name: A__ = trimmed_name.replace('norm2' , 'layernorm2' ) elif "fc1" in old_name: A__ = trimmed_name.replace('fc1' , 'linear_in' ) elif "fc2" in old_name: A__ = trimmed_name.replace('fc2' , 'linear_out' ) A__ = 'last_stage.' + trimmed_name elif "network" in old_name and re.search(R'.\d.' , UpperCAmelCase__ ): A__ = old_name.replace('network' , 'intermediate_stages' ) if "fc" in new_name: A__ = new_name.replace('fc' , 'convolution' ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): A__ = new_name.replace('norm1' , 'batchnorm_before' ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): A__ = new_name.replace('norm2' , 'batchnorm_after' ) if "proj" in new_name: A__ = new_name.replace('proj' , 'projection' ) if "dist_head" in new_name: A__ = new_name.replace('dist_head' , 'distillation_classifier' ) elif "head" in new_name: A__ = new_name.replace('head' , 'classifier' ) elif "patch_embed" in new_name: A__ = 'efficientformer.' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": A__ = new_name.replace('norm' , 'layernorm' ) A__ = 'efficientformer.' + new_name else: A__ = 'efficientformer.encoder.' + new_name return new_name def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> int: '''simple docstring''' for key in checkpoint.copy().keys(): A__ = checkpoint.pop(UpperCAmelCase__ ) A__ = val return checkpoint def _snake_case( ) -> int: '''simple docstring''' A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' A__ = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw ) return image def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' A__ = torch.load(UpperCAmelCase__ , map_location='cpu' )['model'] A__ = EfficientFormerConfig.from_json_file(UpperCAmelCase__ ) A__ = EfficientFormerForImageClassificationWithTeacher(UpperCAmelCase__ ) A__ = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] ) A__ = config.depths[-1] - config.num_metaad_blocks + 1 A__ = convert_torch_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ ) model.load_state_dict(UpperCAmelCase__ ) model.eval() A__ = { 'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST, } # prepare image A__ = prepare_img() A__ = 256 A__ = 224 A__ = EfficientFormerImageProcessor( size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , ) A__ = processor(images=UpperCAmelCase__ , return_tensors='pt' ).pixel_values # original processing pipeline A__ = Compose( [ Resize(UpperCAmelCase__ , interpolation=pillow_resamplings['bicubic'] ), CenterCrop(UpperCAmelCase__ ), ToTensor(), Normalize(UpperCAmelCase__ , UpperCAmelCase__ ), ] ) A__ = image_transforms(UpperCAmelCase__ ).unsqueeze(0 ) assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) A__ = model(UpperCAmelCase__ ) A__ = outputs.logits A__ = (1, 1000) if "l1" in model_name: A__ = torch.Tensor( [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] ) assert torch.allclose(logits[0, :10] , UpperCAmelCase__ , atol=1E-3 ) assert logits.shape == expected_shape elif "l3" in model_name: A__ = torch.Tensor( [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] ) assert torch.allclose(logits[0, :10] , UpperCAmelCase__ , atol=1E-3 ) assert logits.shape == expected_shape elif "l7" in model_name: A__ = torch.Tensor( [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] ) assert logits.shape == expected_shape else: raise ValueError( f'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' ) # Save Checkpoints Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) model.save_pretrained(UpperCAmelCase__ ) print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' ) processor.save_pretrained(UpperCAmelCase__ ) print(f'Processor successfuly saved at {pytorch_dump_path}' ) if push_to_hub: print('Pushing model to the hub...' ) model.push_to_hub( repo_id=f'Bearnardd/{pytorch_dump_path}' , commit_message='Add model' , use_temp_dir=UpperCAmelCase__ , ) processor.push_to_hub( repo_id=f'Bearnardd/{pytorch_dump_path}' , commit_message='Add image processor' , use_temp_dir=UpperCAmelCase__ , ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to EfficientFormer pytorch checkpoint.", ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for EfficientFormer model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument("--push_to_hub", action="https://huggingface.co/datasets/infinityofspace/python_codestyles-mixed1-500/viewer/default/store_true", help="Push model and image processor to the hub") parser.add_argument( "--no-push_to_hub", dest="push_to_hub", action="https://huggingface.co/datasets/infinityofspace/python_codestyles-mixed1-500/viewer/default/store_false", help="Do not push model and image processor to the hub", ) parser.set_defaults(push_to_hub=True) lowercase_ = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
7
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class A__ ( unittest.TestCase ): lowercase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowercase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple: '''simple docstring''' A_ = TextaTextGenerationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) return generator, ["Something to write", "Something else"] def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any: '''simple docstring''' A_ = generator("""Something there""" ) self.assertEqual(UpperCamelCase__ , [{"""generated_text""": ANY(UpperCamelCase__ )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) A_ = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=UpperCamelCase__ ) self.assertEqual( UpperCamelCase__ , [ [{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}], [{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}], ] , ) A_ = generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase__ ) self.assertEqual( UpperCamelCase__ , [ [{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}], [{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}], ] , ) with self.assertRaises(UpperCamelCase__ ): generator(4 ) @require_torch def snake_case_ ( self ) -> Dict: '''simple docstring''' A_ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" ) # do_sample=False necessary for reproducibility A_ = generator("""Something there""" , do_sample=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """"""}] ) A_ = 3 A_ = generator( """Something there""" , num_return_sequences=UpperCamelCase__ , num_beams=UpperCamelCase__ , ) A_ = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) A_ = generator("""This is a test""" , do_sample=UpperCamelCase__ , num_return_sequences=2 , return_tensors=UpperCamelCase__ ) self.assertEqual( UpperCamelCase__ , [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] , ) A_ = generator.model.config.eos_token_id A_ = """<pad>""" A_ = generator( ["""This is a test""", """This is a second test"""] , do_sample=UpperCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase__ , ) self.assertEqual( UpperCamelCase__ , [ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] , ) @require_tf def snake_case_ ( self ) -> Any: '''simple docstring''' A_ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" ) # do_sample=False necessary for reproducibility A_ = generator("""Something there""" , do_sample=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """"""}] )
162
0
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="%(message)s") def UpperCamelCase ( _A : np.ndarray )-> np.ndarray: """simple docstring""" return input_array.reshape((input_array.size, 1) ) def UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int )-> np.ndarray: """simple docstring""" A__ = np.nan for i in range(_A ): A__ = features[:, labels == i] A__ = data.mean(1 ) # Centralize the data of class i A__ = data - column_reshape(_A ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(_A , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) A__ = np.dot(_A , centered_data.T ) return covariance_sum / features.shape[1] def UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int )-> np.ndarray: """simple docstring""" A__ = features.mean(1 ) A__ = np.nan for i in range(_A ): A__ = features[:, labels == i] A__ = data.shape[1] A__ = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(_A ) - column_reshape(_A ) , (column_reshape(_A ) - column_reshape(_A )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) A__ = device_data * np.dot( column_reshape(_A ) - column_reshape(_A ) , (column_reshape(_A ) - column_reshape(_A )).T , ) return covariance_sum / features.shape[1] def UpperCamelCase ( _A : np.ndarray , _A : int )-> np.ndarray: """simple docstring""" if features.any(): A__ = features.mean(1 ) # Center the dataset A__ = features - np.reshape(_A , (data_mean.size, 1) ) A__ = np.dot(_A , centered_data.T ) / features.shape[1] A__ , A__ = np.linalg.eigh(_A ) # Take all the columns in the reverse order (-1), and then takes only the first A__ = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space A__ = np.dot(filtered_eigenvectors.T , _A ) logging.info("Principal Component Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_A ) logging.error("Dataset empty" ) raise AssertionError def UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int , _A : int )-> np.ndarray: """simple docstring""" assert classes > dimensions # Check if features have been already loaded if features.any: A__ , A__ = eigh( covariance_between_classes(_A , _A , _A ) , covariance_within_classes(_A , _A , _A ) , ) A__ = eigenvectors[:, ::-1][:, :dimensions] A__ , A__ , A__ = np.linalg.svd(_A ) A__ = svd_matrix[:, 0:dimensions] A__ = np.dot(filtered_svd_matrix.T , _A ) logging.info("Linear Discriminant Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_A ) logging.error("Dataset empty" ) raise AssertionError def UpperCamelCase ( )-> None: """simple docstring""" A__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) A__ = np.array([0, 0, 0, 1, 1] ) A__ = 2 A__ = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(_A ) as error_info: A__ = linear_discriminant_analysis( _A , _A , _A , _A ) if isinstance(_A , np.ndarray ): raise AssertionError( "Did not raise AssertionError for dimensions > classes" ) assert error_info.type is AssertionError def UpperCamelCase ( )-> None: """simple docstring""" A__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) A__ = 2 A__ = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] ) with pytest.raises(_A ) as error_info: A__ = principal_component_analysis(_A , _A ) if not np.allclose(_A , _A ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
198
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def UpperCamelCase ( _A : Optional[int] )-> List[Any]: """simple docstring""" A__ = FileLock(str(tmpdir / "foo.lock" ) ) A__ = FileLock(str(tmpdir / "foo.lock" ) ) A__ = 0.01 with locka.acquire(): with pytest.raises(_A ): A__ = time.time() locka.acquire(_A ) assert time.time() - _start > timeout def UpperCamelCase ( _A : str )-> List[Any]: """simple docstring""" A__ = "a" * 1000 + ".lock" A__ = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(".lock" ) assert not locka._lock_file.endswith(_A ) assert len(os.path.basename(locka._lock_file ) ) <= 255 A__ = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(_A ): locka.acquire(0 )
198
1
"""simple docstring""" class UpperCamelCase_ : """simple docstring""" def __init__( self : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] ) -> str: __SCREAMING_SNAKE_CASE = name __SCREAMING_SNAKE_CASE = value __SCREAMING_SNAKE_CASE = weight def __repr__( self : Tuple ) -> Union[str, Any]: return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})""" def UpperCAmelCase_ ( self : Dict ) -> Any: return self.value def UpperCAmelCase_ ( self : List[Any] ) -> str: return self.name def UpperCAmelCase_ ( self : Tuple ) -> int: return self.weight def UpperCAmelCase_ ( self : int ) -> int: return self.value / self.weight def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] for i in range(len(lowerCAmelCase_ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = sorted(lowerCAmelCase_ , key=lowerCAmelCase_ , reverse=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0, 0.0 for i in range(len(lowerCAmelCase_ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def UpperCAmelCase__ (): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
54
"""simple docstring""" import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' return x + 2 class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" def UpperCAmelCase_ ( self : Any ) -> Any: __SCREAMING_SNAKE_CASE = "x = 3" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ ) assert result == 3 self.assertDictEqual(UpperCAmelCase__ , {"x": 3} ) __SCREAMING_SNAKE_CASE = "x = y" __SCREAMING_SNAKE_CASE = {"y": 5} __SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(UpperCAmelCase__ , {"x": 5, "y": 5} ) def UpperCAmelCase_ ( self : Dict ) -> List[str]: __SCREAMING_SNAKE_CASE = "y = add_two(x)" __SCREAMING_SNAKE_CASE = {"x": 3} __SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ ) assert result == 5 self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 5} ) # Won't work without the tool with CaptureStdout() as out: __SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ ) assert result is None assert "tried to execute add_two" in out.out def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE = "x = 3" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ ) assert result == 3 self.assertDictEqual(UpperCAmelCase__ , {"x": 3} ) def UpperCAmelCase_ ( self : str ) -> Any: __SCREAMING_SNAKE_CASE = "test_dict = {'x': x, 'y': add_two(x)}" __SCREAMING_SNAKE_CASE = {"x": 3} __SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ ) self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 5} ) self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} ) def UpperCAmelCase_ ( self : int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = "x = 3\ny = 5" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 5} ) def UpperCAmelCase_ ( self : Any ) -> Any: __SCREAMING_SNAKE_CASE = "text = f'This is x: {x}.'" __SCREAMING_SNAKE_CASE = {"x": 3} __SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "text": "This is x: 3."} ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = "if x <= 3:\n y = 2\nelse:\n y = 5" __SCREAMING_SNAKE_CASE = {"x": 3} __SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 2} ) __SCREAMING_SNAKE_CASE = {"x": 8} __SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(UpperCAmelCase__ , {"x": 8, "y": 5} ) def UpperCAmelCase_ ( self : Tuple ) -> str: __SCREAMING_SNAKE_CASE = "test_list = [x, add_two(x)]" __SCREAMING_SNAKE_CASE = {"x": 3} __SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , [3, 5] ) self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "test_list": [3, 5]} ) def UpperCAmelCase_ ( self : Any ) -> int: __SCREAMING_SNAKE_CASE = "y = x" __SCREAMING_SNAKE_CASE = {"x": 3} __SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ ) assert result == 3 self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 3} ) def UpperCAmelCase_ ( self : Tuple ) -> int: __SCREAMING_SNAKE_CASE = "test_list = [x, add_two(x)]\ntest_list[1]" __SCREAMING_SNAKE_CASE = {"x": 3} __SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ ) assert result == 5 self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "test_list": [3, 5]} ) __SCREAMING_SNAKE_CASE = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']" __SCREAMING_SNAKE_CASE = {"x": 3} __SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ ) assert result == 5 self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} ) def UpperCAmelCase_ ( self : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = "x = 0\nfor i in range(3):\n x = i" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"range": range} , state=UpperCAmelCase__ ) assert result == 2 self.assertDictEqual(UpperCAmelCase__ , {"x": 2, "i": 2} )
54
1
'''simple docstring''' import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler SCREAMING_SNAKE_CASE__ = 1_6 SCREAMING_SNAKE_CASE__ = 3_2 def lowercase__ ( __UpperCamelCase )-> Union[str, Any]: return int(x / 2**20 ) class a_ : def __enter__( self ) -> Optional[Any]: """simple docstring""" gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero UpperCamelCase = torch.cuda.memory_allocated() return self def __exit__( self , *_SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" gc.collect() torch.cuda.empty_cache() UpperCamelCase = torch.cuda.memory_allocated() UpperCamelCase = torch.cuda.max_memory_allocated() UpperCamelCase = bamb(self.end - self.begin ) UpperCamelCase = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def lowercase__ ( __UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = "bert-base-cased" , __UpperCamelCase = 320 , __UpperCamelCase = 160 , )-> Optional[int]: UpperCamelCase = AutoTokenizer.from_pretrained(_a ) UpperCamelCase = load_dataset( """glue""" , """mrpc""" , split={"""train""": F"train[:{n_train}]", """validation""": F"validation[:{n_val}]"} ) def tokenize_function(__UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) UpperCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_a , max_length=_a ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCamelCase = datasets.map( _a , batched=_a , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_a ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_a , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(_a , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. UpperCamelCase = DataLoader( tokenized_datasets["""train"""] , shuffle=_a , collate_fn=_a , batch_size=_a ) UpperCamelCase = DataLoader( tokenized_datasets["""validation"""] , shuffle=_a , collate_fn=_a , batch_size=_a ) return train_dataloader, eval_dataloader def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Tuple: # Initialize accelerator UpperCamelCase = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCamelCase = config["lr"] UpperCamelCase = int(config["""num_epochs"""] ) UpperCamelCase = int(config["""seed"""] ) UpperCamelCase = int(config["""batch_size"""] ) UpperCamelCase = args.model_name_or_path set_seed(_a ) UpperCamelCase = get_dataloaders(_a , _a , _a , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(_a , return_dict=_a ) # Instantiate optimizer UpperCamelCase = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCamelCase = optimizer_cls(params=model.parameters() , lr=_a ) if accelerator.state.deepspeed_plugin is not None: UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: UpperCamelCase = 1 UpperCamelCase = (len(_a ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCamelCase = get_linear_schedule_with_warmup( optimizer=_a , num_warmup_steps=0 , num_training_steps=_a , ) else: UpperCamelCase = DummyScheduler(_a , total_num_steps=_a , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCamelCase = accelerator.prepare( _a , _a , _a , _a , _a ) # We need to keep track of how many total steps we have iterated over UpperCamelCase = 0 # We also need to keep track of the stating epoch so files are named properly UpperCamelCase = 0 # Now we train the model UpperCamelCase = {} for epoch in range(_a , _a ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(_a ): UpperCamelCase = model(**_a ) UpperCamelCase = outputs.loss UpperCamelCase = loss / gradient_accumulation_steps accelerator.backward(_a ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) ) accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) ) accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) ) accelerator.print( """Total Peak Memory consumed during the train (max): {}""".format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) UpperCamelCase = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[F"epoch-{epoch}"] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f: json.dump(_a , _a ) def lowercase__ ( )-> List[Any]: UpperCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=_a , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_a , ) parser.add_argument( """--output_dir""" , type=_a , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--peak_memory_upper_bound""" , type=_a , default=_a , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , ) parser.add_argument( """--n_train""" , type=_a , default=320 , help="""Number of training examples to use.""" , ) parser.add_argument( """--n_val""" , type=_a , default=160 , help="""Number of validation examples to use.""" , ) parser.add_argument( """--num_epochs""" , type=_a , default=1 , help="""Number of train epochs.""" , ) UpperCamelCase = parser.parse_args() UpperCamelCase = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(_a , _a ) if __name__ == "__main__": main()
364
'''simple docstring''' import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {'vocab_file': 'vocab.txt'} SCREAMING_SNAKE_CASE__ = { 'vocab_file': { 'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt', 'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt', }, } SCREAMING_SNAKE_CASE__ = { 'facebook/esm2_t6_8M_UR50D': 1_0_2_4, 'facebook/esm2_t12_35M_UR50D': 1_0_2_4, } def lowercase__ ( __UpperCamelCase )-> Any: with open(__UpperCamelCase , """r""" ) as f: UpperCamelCase = f.read().splitlines() return [l.strip() for l in lines] class a_ ( lowerCamelCase ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ["""input_ids""", """attention_mask"""] def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<cls>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE="<eos>" , **_SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(**_SCREAMING_SNAKE_CASE ) UpperCamelCase = load_vocab_file(_SCREAMING_SNAKE_CASE ) UpperCamelCase = dict(enumerate(self.all_tokens ) ) UpperCamelCase = {tok: ind for ind, tok in enumerate(self.all_tokens )} UpperCamelCase = unk_token UpperCamelCase = cls_token UpperCamelCase = pad_token UpperCamelCase = mask_token UpperCamelCase = eos_token UpperCamelCase = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) ) def A__ ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" return text.split() def A__ ( self , _SCREAMING_SNAKE_CASE=False ) -> Dict: """simple docstring""" return len(self._id_to_token ) def A__ ( self ) -> Tuple: """simple docstring""" return {token: i for i, token in enumerate(self.all_tokens )} def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) ) def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" UpperCamelCase = [self.cls_token_id] UpperCamelCase = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] UpperCamelCase = [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] if token_ids_a is not None: mask += [0] * len(_SCREAMING_SNAKE_CASE ) + [1] return mask def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" ) with open(_SCREAMING_SNAKE_CASE , """w""" ) as f: f.write("""\n""".join(self.all_tokens ) ) return (vocab_file,) @property def A__ ( self ) -> int: """simple docstring""" return self.get_vocab_size(with_added_tokens=_SCREAMING_SNAKE_CASE ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> int: """simple docstring""" return super()._add_tokens(_SCREAMING_SNAKE_CASE , special_tokens=_SCREAMING_SNAKE_CASE )
183
0
'''simple docstring''' def lowerCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ) ->List[str]: _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None _SCREAMING_SNAKE_CASE = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(__lowerCamelCase ): return None _SCREAMING_SNAKE_CASE = sorted_collection[point] if current_item == item: return point else: if point < left: _SCREAMING_SNAKE_CASE = left _SCREAMING_SNAKE_CASE = point elif point > right: _SCREAMING_SNAKE_CASE = right _SCREAMING_SNAKE_CASE = point else: if item < current_item: _SCREAMING_SNAKE_CASE = point - 1 else: _SCREAMING_SNAKE_CASE = point + 1 return None def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : str ) ->List[str]: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None _SCREAMING_SNAKE_CASE = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(__lowerCamelCase ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) elif point > right: return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , point - 1 ) else: return interpolation_search_by_recursion( __lowerCamelCase , __lowerCamelCase , point + 1 , __lowerCamelCase ) def lowerCamelCase ( __lowerCamelCase : Dict ) ->int: if collection != sorted(__lowerCamelCase ): raise ValueError("""Collection must be ascending sorted""" ) return True if __name__ == "__main__": import sys lowercase_ = 0 if debug == 1: lowercase_ = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit("""Sequence must be ascending sorted to apply interpolation search""") lowercase_ = 67 lowercase_ = interpolation_search(collection, target) if result is not None: print(f"""{target} found at positions: {result}""") else: print("""Not found""")
58
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : Any = { 'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'], 'processing_git': ['GitProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ 'GIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GitForCausalLM', 'GitModel', 'GitPreTrainedModel', 'GitVisionModel', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys _lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
167
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowerCAmelCase_ ( _snake_case ): """simple docstring""" _lowerCAmelCase : Any = """megatron-bert""" def __init__( self , lowerCAmelCase=2_90_56 , lowerCAmelCase=10_24 , lowerCAmelCase=24 , lowerCAmelCase=16 , lowerCAmelCase=40_96 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0 , lowerCAmelCase="absolute" , lowerCAmelCase=True , **lowerCAmelCase , ): """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = hidden_act snake_case = intermediate_size snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = initializer_range snake_case = layer_norm_eps snake_case = position_embedding_type snake_case = use_cache
363
"""simple docstring""" import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ : """simple docstring""" def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): """simple docstring""" snake_case = parent snake_case = batch_size snake_case = seq_length snake_case = is_training snake_case = use_input_mask snake_case = use_token_type_ids snake_case = use_labels snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = intermediate_size snake_case = hidden_act snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = type_sequence_label_size snake_case = initializer_range snake_case = num_labels snake_case = num_choices snake_case = scope def snake_case ( self ): """simple docstring""" snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case = None if self.use_input_mask: snake_case = random_attention_mask([self.batch_size, self.seq_length] ) snake_case = None if self.use_token_type_ids: snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case = None snake_case = None snake_case = None if self.use_labels: snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case = ids_tensor([self.batch_size] , self.num_choices ) snake_case = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self ): """simple docstring""" return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" snake_case = BioGptModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase ) snake_case = model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): """simple docstring""" snake_case = BioGptForCausalLM(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): """simple docstring""" snake_case = BioGptModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() # create attention mask snake_case = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase ) snake_case = self.seq_length // 2 snake_case = 0 # first forward pass snake_case ,snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase ).to_tuple() # create hypothetical next token and extent to next_input_ids snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids snake_case = ids_tensor((1,) , lowerCAmelCase ).item() + 1 snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) snake_case = random_other_next_tokens # append to next input_ids and attn_mask snake_case = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCAmelCase )] , dim=1 , ) # get two different outputs snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state'] snake_case = model(lowerCAmelCase , past_key_values=lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state'] # select random slice snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case = output_from_no_past[:, -1, random_slice_idx].detach() snake_case = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): """simple docstring""" snake_case = BioGptModel(config=lowerCAmelCase ).to(lowerCAmelCase ).eval() snake_case = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase ) # first forward pass snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , use_cache=lowerCAmelCase ) snake_case ,snake_case = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and snake_case = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state'] snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase )[ 'last_hidden_state' ] # select random slice snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase , lowerCAmelCase=False ): """simple docstring""" snake_case = BioGptForCausalLM(lowerCAmelCase ) model.to(lowerCAmelCase ) if gradient_checkpointing: model.gradient_checkpointing_enable() snake_case = model(lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def snake_case ( self , lowerCAmelCase , *lowerCAmelCase ): """simple docstring""" snake_case = BioGptModel(lowerCAmelCase ) snake_case = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): """simple docstring""" snake_case = self.num_labels snake_case = BioGptForTokenClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self ): """simple docstring""" snake_case = self.prepare_config_and_inputs() ( ( snake_case ) ,( snake_case ) ,( snake_case ) ,( snake_case ) ,( snake_case ) ,( snake_case ) ,( snake_case ) , ) = config_and_inputs snake_case = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): """simple docstring""" _lowerCAmelCase : List[Any] = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) _lowerCAmelCase : str = (BioGptForCausalLM,) if is_torch_available() else () _lowerCAmelCase : str = ( { """feature-extraction""": BioGptModel, """text-classification""": BioGptForSequenceClassification, """text-generation""": BioGptForCausalLM, """token-classification""": BioGptForTokenClassification, """zero-shot""": BioGptForSequenceClassification, } if is_torch_available() else {} ) _lowerCAmelCase : List[str] = False def snake_case ( self ): """simple docstring""" snake_case = BioGptModelTester(self ) snake_case = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 ) def snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() def snake_case ( self ): """simple docstring""" snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) def snake_case ( self ): """simple docstring""" snake_case = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case = type self.model_tester.create_and_check_model(*lowerCAmelCase ) def snake_case ( self ): """simple docstring""" snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCAmelCase ) def snake_case ( self ): """simple docstring""" snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*lowerCAmelCase , gradient_checkpointing=lowerCAmelCase ) def snake_case ( self ): """simple docstring""" snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCAmelCase ) def snake_case ( self ): """simple docstring""" snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCAmelCase ) def snake_case ( self ): """simple docstring""" snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCAmelCase ) @slow def snake_case ( self ): """simple docstring""" snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) model.to(lowerCAmelCase ) snake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) snake_case = 'left' # Define PAD Token = EOS Token = 50256 snake_case = tokenizer.eos_token snake_case = model.config.eos_token_id # use different length sentences to test batching snake_case = [ 'Hello, my dog is a little', 'Today, I', ] snake_case = tokenizer(lowerCAmelCase , return_tensors='pt' , padding=lowerCAmelCase ) snake_case = inputs['input_ids'].to(lowerCAmelCase ) snake_case = model.generate( input_ids=lowerCAmelCase , attention_mask=inputs['attention_mask'].to(lowerCAmelCase ) , ) snake_case = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(lowerCAmelCase ) snake_case = model.generate(input_ids=lowerCAmelCase ) snake_case = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item() snake_case = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(lowerCAmelCase ) snake_case = model.generate(input_ids=lowerCAmelCase , max_length=model.config.max_length - num_paddings ) snake_case = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase ) snake_case = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase ) snake_case = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase ) snake_case = [ 'Hello, my dog is a little bit bigger than a little bit.', 'Today, I have a good idea of how to use the information', ] self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , [non_padded_sentence, padded_sentence] ) @slow def snake_case ( self ): """simple docstring""" for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case = BioGptModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) def snake_case ( self ): """simple docstring""" snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common() snake_case = 3 snake_case = input_dict['input_ids'] snake_case = input_ids.ne(1 ).to(lowerCAmelCase ) snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) snake_case = BioGptForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def snake_case ( self ): """simple docstring""" snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common() snake_case = 3 snake_case = 'multi_label_classification' snake_case = input_dict['input_ids'] snake_case = input_ids.ne(1 ).to(lowerCAmelCase ) snake_case = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) snake_case = BioGptForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self ): """simple docstring""" snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) snake_case = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) snake_case = model(lowerCAmelCase )[0] snake_case = 4_23_84 snake_case = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , lowerCAmelCase ) snake_case = torch.tensor( [[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=1E-4 ) ) @slow def snake_case ( self ): """simple docstring""" snake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) model.to(lowerCAmelCase ) torch.manual_seed(0 ) snake_case = tokenizer('COVID-19 is' , return_tensors='pt' ).to(lowerCAmelCase ) snake_case = model.generate( **lowerCAmelCase , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=lowerCAmelCase , ) snake_case = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase ) snake_case = ( 'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the' ' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and' ' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),' ' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and' ' more than 800,000 deaths.' ) self.assertEqual(lowerCAmelCase , lowerCAmelCase )
149
0
"""simple docstring""" from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
74
"""simple docstring""" import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ,A_ : str ,A_ : Dict=13 ,A_ : str=7 ,A_ : str=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=False ,A_ : str=False ,A_ : Tuple=False ,A_ : str=2 ,A_ : Optional[int]=99 ,A_ : Union[str, Any]=0 ,A_ : Optional[Any]=32 ,A_ : Optional[int]=5 ,A_ : Optional[int]=4 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=0.1 ,A_ : Union[str, Any]=512 ,A_ : Union[str, Any]=2 ,A_ : Any=0.02 ,A_ : List[str]=2 ,A_ : int=4 ,A_ : int="last" ,A_ : Dict=True ,A_ : Union[str, Any]=None ,A_ : Any=0 ,) -> List[Any]: A = parent A = batch_size A = seq_length A = is_training A = use_input_lengths A = use_token_type_ids A = use_labels A = gelu_activation A = sinusoidal_embeddings A = causal A = asm A = n_langs A = vocab_size A = n_special A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = summary_type A = use_proj A = scope A = bos_token_id def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = random_attention_mask([self.batch_size, self.seq_length] ) A = None if self.use_input_lengths: A = ( ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,2 ).float() A = ids_tensor([self.batch_size] ,self.num_choices ) A = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: return XLMConfig( vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ,A_ : int ,A_ : Dict ,A_ : str ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : int ,A_ : str ,) -> Any: A = XLMModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,lengths=A_ ,langs=A_ ) A = model(A_ ,langs=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Any ,A_ : str ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : str ,A_ : Any ,A_ : str ,A_ : Dict ,) -> Dict: A = XLMWithLMHeadModel(A_ ) model.to(A_ ) model.eval() A = model(A_ ,token_type_ids=A_ ,labels=A_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Any ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[Any] ,) -> int: A = XLMForQuestionAnsweringSimple(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model(A_ ,start_positions=A_ ,end_positions=A_ ) A = outputs self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Optional[int] ,A_ : Any ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Tuple ,A_ : List[str] ,A_ : Optional[int] ,) -> List[Any]: A = XLMForQuestionAnswering(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model( A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,) A = model( A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,) ((A) , ) = result_with_labels.to_tuple() A = model(A_ ,start_positions=A_ ,end_positions=A_ ) ((A) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape ,() ) self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Tuple ,A_ : int ,A_ : Optional[int] ,A_ : List[str] ,A_ : str ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ,) -> Optional[int]: A = XLMForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ) A = model(A_ ,labels=A_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ,A_ : str ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : Optional[int] ,) -> List[str]: A = self.num_labels A = XLMForTokenClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : Union[str, Any] ,A_ : Dict ,A_ : List[Any] ,) -> List[str]: A = self.num_choices A = XLMForMultipleChoice(config=A_ ) model.to(A_ ) model.eval() A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A = model( A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _lowerCamelCase: str = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _lowerCamelCase: Optional[int] = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ) -> Any: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Optional[int] ,A_ : List[Any]=False ) -> int: A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A_ ) A = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=A_ ) return inputs_dict def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = XLMModelTester(self ) A = ConfigTester(self ,config_class=A_ ,emb_dim=37 ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ,A_ : Any ,A_ : str ,A_ : Tuple ,A_ : Any ,A_ : Any=False ,A_ : Any=1 ) -> List[Any]: self.assertIsInstance(A_ ,A_ ) self.assertListEqual( [isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) ) self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(A_ ): # adds PAD dummy token A = min_length + idx + 1 A = min_length + idx + 1 A = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : str ,A_ : Optional[int] ,A_ : int ,A_ : Any ,A_ : str=False ,A_ : Any=1 ) -> Tuple: self.assertIsInstance(A_ ,A_ ) self.assertListEqual( [isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,) self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(A_ ): # adds PAD dummy token A = min_length + idx + 1 A = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,) pass @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = XLMModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str: A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(A_ ) A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president A = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference A = model.generate(A_ ,do_sample=A_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ )
74
1
"""simple docstring""" def lowercase ( _snake_case : list , _snake_case : int = 0 ) ->list: """simple docstring""" __snake_case : Optional[int] = length or len(_snake_case ) __snake_case : List[str] = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: __snake_case , __snake_case : str = list_data[i + 1], list_data[i] __snake_case : Dict = True return list_data if not swapped else bubble_sort(_snake_case , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
24
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : str = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """adapter_layer""": """encoder.layers.*.adapter_layer""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", """pooling_layer.linear""": """projector""", """pooling_layer.projection""": """classifier""", } SCREAMING_SNAKE_CASE : int = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """projector""", """classifier""", ] def lowercase ( _snake_case : Optional[int] ) ->int: """simple docstring""" __snake_case : int = {} with open(_snake_case , '''r''' ) as file: for line_number, line in enumerate(_snake_case ): __snake_case : Union[str, Any] = line.strip() if line: __snake_case : str = line.split() __snake_case : Union[str, Any] = line_number __snake_case : Dict = words[0] __snake_case : str = value return result def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[str] ) ->List[str]: """simple docstring""" for attribute in key.split('''.''' ): __snake_case : Dict = getattr(_snake_case , _snake_case ) __snake_case : Any = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_snake_case ): __snake_case : int = PARAM_MAPPING[full_name.split('''.''' )[-1]] __snake_case : str = '''param''' if weight_type is not None and weight_type != "param": __snake_case : Union[str, Any] = getattr(_snake_case , _snake_case ).shape elif weight_type is not None and weight_type == "param": __snake_case : Optional[Any] = hf_pointer for attribute in hf_param_name.split('''.''' ): __snake_case : Dict = getattr(_snake_case , _snake_case ) __snake_case : List[str] = shape_pointer.shape # let's reduce dimension __snake_case : int = value[0] else: __snake_case : int = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __snake_case : List[Any] = value elif weight_type == "weight_g": __snake_case : Tuple = value elif weight_type == "weight_v": __snake_case : str = value elif weight_type == "bias": __snake_case : str = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): __snake_case : List[Any] = getattr(_snake_case , _snake_case ) __snake_case : int = value else: __snake_case : List[Any] = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowercase ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : int ) ->int: """simple docstring""" __snake_case : Optional[Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_snake_case ): __snake_case : Dict = PARAM_MAPPING[full_name.split('''.''' )[-1]] __snake_case : List[str] = '''param''' if weight_type is not None and weight_type != "param": __snake_case : str = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __snake_case : Tuple = '''.'''.join([key, hf_param_name] ) else: __snake_case : Optional[int] = key __snake_case : List[Any] = value if '''lm_head''' in full_key else value[0] SCREAMING_SNAKE_CASE : Tuple = { """W_a""": """linear_1.weight""", """W_b""": """linear_2.weight""", """b_a""": """linear_1.bias""", """b_b""": """linear_2.bias""", """ln_W""": """norm.weight""", """ln_b""": """norm.bias""", } def lowercase ( _snake_case : str , _snake_case : List[Any] , _snake_case : Tuple=None , _snake_case : int=None ) ->Dict: """simple docstring""" __snake_case : Tuple = False for key, mapped_key in MAPPING.items(): __snake_case : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __snake_case : int = True if "*" in mapped_key: __snake_case : List[Any] = name.split(_snake_case )[0].split('''.''' )[-2] __snake_case : Tuple = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: __snake_case : Union[str, Any] = '''weight_g''' elif "weight_v" in name: __snake_case : List[str] = '''weight_v''' elif "bias" in name: __snake_case : Any = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __snake_case : List[Any] = '''weight''' else: __snake_case : Union[str, Any] = None if hf_dict is not None: rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) else: set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) return is_used return is_used def lowercase ( _snake_case : str , _snake_case : Dict , _snake_case : List[str] ) ->Any: """simple docstring""" __snake_case : Union[str, Any] = [] __snake_case : Union[str, Any] = fairseq_model.state_dict() __snake_case : str = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __snake_case : str = False if "conv_layers" in name: load_conv_layer( _snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , ) __snake_case : Union[str, Any] = True else: __snake_case : Optional[Any] = load_wavaveca_layer(_snake_case , _snake_case , _snake_case ) if not is_used: unused_weights.append(_snake_case ) logger.warning(f"""Unused weights: {unused_weights}""" ) def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Any , _snake_case : Tuple , _snake_case : List[str] ) ->Optional[int]: """simple docstring""" __snake_case : Union[str, Any] = full_name.split('''conv_layers.''' )[-1] __snake_case : str = name.split('''.''' ) __snake_case : Optional[int] = int(items[0] ) __snake_case : Any = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __snake_case : int = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __snake_case : Any = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __snake_case : Any = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __snake_case : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_snake_case ) @torch.no_grad() def lowercase ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : str=None , _snake_case : List[Any]=True , _snake_case : int=False ) ->Dict: """simple docstring""" if config_path is not None: __snake_case : Optional[Any] = WavaVecaConfig.from_pretrained(_snake_case ) else: __snake_case : Tuple = WavaVecaConfig() if is_seq_class: __snake_case : Optional[int] = read_txt_into_dict(_snake_case ) __snake_case : List[Any] = idalabel __snake_case : int = WavaVecaForSequenceClassification(_snake_case ) __snake_case : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) feature_extractor.save_pretrained(_snake_case ) elif is_finetuned: if dict_path: __snake_case : int = Dictionary.load(_snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case : Tuple = target_dict.pad_index __snake_case : int = target_dict.bos_index __snake_case : Tuple = target_dict.eos_index __snake_case : Optional[Any] = len(target_dict.symbols ) __snake_case : Any = os.path.join(_snake_case , '''vocab.json''' ) if not os.path.isdir(_snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) ) return os.makedirs(_snake_case , exist_ok=_snake_case ) __snake_case : Optional[Any] = target_dict.indices # fairseq has the <pad> and <s> switched __snake_case : Dict = 0 __snake_case : List[Any] = 1 with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(_snake_case , _snake_case ) __snake_case : List[Any] = WavaVecaCTCTokenizer( _snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , ) __snake_case : Tuple = True if config.feat_extract_norm == '''layer''' else False __snake_case : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , ) __snake_case : Tuple = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case ) processor.save_pretrained(_snake_case ) __snake_case : Optional[int] = WavaVecaForCTC(_snake_case ) else: __snake_case : Tuple = WavaVecaForPreTraining(_snake_case ) if is_finetuned or is_seq_class: __snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __snake_case : Dict = argparse.Namespace(task='''audio_pretraining''' ) __snake_case : Optional[int] = fairseq.tasks.setup_task(_snake_case ) __snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case ) __snake_case : int = model[0].eval() recursively_load_weights(_snake_case , _snake_case , not is_finetuned ) hf_wavavec.save_pretrained(_snake_case ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) parser.add_argument( """--is_seq_class""", action="""store_true""", help="""Whether the model to convert is a fine-tuned sequence classification model or not""", ) SCREAMING_SNAKE_CASE : Any = parser.parse_args() SCREAMING_SNAKE_CASE : Tuple = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
24
1
'''simple docstring''' import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def lowerCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : int=() , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[Any]="no" , __lowerCamelCase : Optional[int]="29500" ) ->List[str]: _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ): _SCREAMING_SNAKE_CASE = True elif "IPython" in sys.modules: _SCREAMING_SNAKE_CASE = "google.colab" in str(sys.modules["""IPython"""].get_ipython() ) try: _SCREAMING_SNAKE_CASE = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' ) if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , __lowerCamelCase ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( """To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """ """your training function. Restart your notebook and make sure no cells initializes an """ """`Accelerator`.""" ) if num_processes is None: _SCREAMING_SNAKE_CASE = 8 _SCREAMING_SNAKE_CASE = PrepareForLaunch(__lowerCamelCase , distributed_type="""TPU""" ) print(F'Launching a training on {num_processes} TPU cores.' ) xmp.spawn(__lowerCamelCase , args=__lowerCamelCase , nprocs=__lowerCamelCase , start_method="""fork""" ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print("""Launching training on one GPU.""" ) else: print("""Launching training on one CPU.""" ) function(*__lowerCamelCase ) else: if num_processes is None: raise ValueError( """You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( """To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """ """inside your training function. Restart your notebook and make sure no cells initializes an """ """`Accelerator`.""" ) if torch.cuda.is_initialized(): raise ValueError( """To launch a multi-GPU training from your notebook, you need to avoid running any instruction """ """using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """ """function.""" ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=__lowerCamelCase , master_addr="""127.0.01""" , master_port=__lowerCamelCase , mixed_precision=__lowerCamelCase ): _SCREAMING_SNAKE_CASE = PrepareForLaunch(__lowerCamelCase , distributed_type="""MULTI_GPU""" ) print(F'Launching training on {num_processes} GPUs.' ) try: start_processes(__lowerCamelCase , args=__lowerCamelCase , nprocs=__lowerCamelCase , start_method="""fork""" ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( """CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """ """This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """ """Please review your imports and test them when running the `notebook_launcher()` to identify """ """which one is problematic.""" ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): _SCREAMING_SNAKE_CASE = "1" print("""Launching training on MPS.""" ) elif torch.cuda.is_available(): print("""Launching training on one GPU.""" ) else: print("""Launching training on CPU.""" ) function(*__lowerCamelCase ) def lowerCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=() , __lowerCamelCase : Any=2 ) ->Any: from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=__lowerCamelCase , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ): _SCREAMING_SNAKE_CASE = PrepareForLaunch(__lowerCamelCase , debug=__lowerCamelCase ) start_processes(__lowerCamelCase , args=__lowerCamelCase , nprocs=__lowerCamelCase , start_method="""fork""" )
58
def _snake_case ( lowerCAmelCase : list ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase ) for i in range(1 , lowerCAmelCase ): SCREAMING_SNAKE_CASE_ : int = collection[i] SCREAMING_SNAKE_CASE_ : Any = 0 SCREAMING_SNAKE_CASE_ : Tuple = i - 1 while low <= high: SCREAMING_SNAKE_CASE_ : int = (low + high) // 2 if val < collection[mid]: SCREAMING_SNAKE_CASE_ : Optional[Any] = mid - 1 else: SCREAMING_SNAKE_CASE_ : Tuple = mid + 1 for j in range(lowerCAmelCase , lowerCAmelCase , -1 ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = collection[j - 1] SCREAMING_SNAKE_CASE_ : int = val return collection if __name__ == "__main__": __lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip() __lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')] print(binary_insertion_sort(unsorted))
18
0
'''simple docstring''' import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) UpperCamelCase__ : List[str] = { 'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in', 'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0', 'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out', 'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1', 'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm', 'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2', 'mask_downscaling.0': 'mask_embed.conv1', 'mask_downscaling.1': 'mask_embed.layer_norm1', 'mask_downscaling.3': 'mask_embed.conv2', 'mask_downscaling.4': 'mask_embed.layer_norm2', 'mask_downscaling.6': 'mask_embed.conv3', 'point_embeddings': 'point_embed', 'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding', 'image_encoder': 'vision_encoder', 'neck.0': 'neck.conv1', 'neck.1': 'neck.layer_norm1', 'neck.2': 'neck.conv2', 'neck.3': 'neck.layer_norm2', 'patch_embed.proj': 'patch_embed.projection', '.norm': '.layer_norm', 'blocks': 'layers', } def UpperCAmelCase ( a_ ) -> Dict: """simple docstring""" A_ : Optional[int] = {} state_dict.pop("""pixel_mean""" , a_ ) state_dict.pop("""pixel_std""" , a_ ) A_ : List[Any] = R""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*""" for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: A_ : Tuple = key.replace(a_ , a_ ) if re.match(a_ , a_ ): A_ : Dict = int(re.match(a_ , a_ ).group(2 ) ) if layer_nb == 0: A_ : Any = key.replace("""layers.0""" , """proj_in""" ) elif layer_nb == 1: A_ : List[str] = key.replace("""layers.1""" , """layers.0""" ) elif layer_nb == 2: A_ : Tuple = key.replace("""layers.2""" , """proj_out""" ) A_ : List[Any] = value A_ : Tuple = model_state_dict[ """prompt_encoder.shared_embedding.positional_embedding""" ] return model_state_dict def UpperCAmelCase ( a_ , a_ , a_ , a_="ybelkada/segment-anything" ) -> Tuple: """simple docstring""" A_ : Optional[int] = hf_hub_download(a_ , F"checkpoints/{model_name}.pth" ) if "sam_vit_b" in model_name: A_ : Any = SamConfig() elif "sam_vit_l" in model_name: A_ : Optional[Any] = SamVisionConfig( hidden_size=1_0_2_4 , num_hidden_layers=2_4 , num_attention_heads=1_6 , global_attn_indexes=[5, 1_1, 1_7, 2_3] , ) A_ : int = SamConfig( vision_config=a_ , ) elif "sam_vit_h" in model_name: A_ : List[str] = SamVisionConfig( hidden_size=1_2_8_0 , num_hidden_layers=3_2 , num_attention_heads=1_6 , global_attn_indexes=[7, 1_5, 2_3, 3_1] , ) A_ : Any = SamConfig( vision_config=a_ , ) A_ : Any = torch.load(a_ , map_location="""cpu""" ) A_ : int = replace_keys(a_ ) A_ : Optional[int] = SamImageProcessor() A_ : List[Any] = SamProcessor(image_processor=a_ ) A_ : Optional[Any] = SamModel(a_ ) hf_model.load_state_dict(a_ ) A_ : List[Any] = hf_model.to("""cuda""" ) A_ : str = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png""" A_ : Union[str, Any] = Image.open(requests.get(a_ , stream=a_ ).raw ).convert("""RGB""" ) A_ : int = [[[4_0_0, 6_5_0]]] A_ : Any = [[1]] A_ : int = processor(images=np.array(a_ ) , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): A_ : Dict = hf_model(**a_ ) A_ : int = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579890251159668 A_ : Union[str, Any] = processor( images=np.array(a_ ) , input_points=a_ , input_labels=a_ , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): A_ : Optional[int] = hf_model(**a_ ) A_ : Any = output.iou_scores.squeeze() assert scores[-1].item() == 0.9712603092193604 A_ : List[str] = ((7_5, 2_7_5, 1_7_2_5, 8_5_0),) A_ : List[Any] = processor(images=np.array(a_ ) , input_boxes=a_ , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): A_ : Tuple = hf_model(**a_ ) A_ : Tuple = output.iou_scores.squeeze() assert scores[-1].item() == 0.8686015605926514 # Test with 2 points and 1 image. A_ : str = [[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]] A_ : Dict = [[1, 1]] A_ : int = processor( images=np.array(a_ ) , input_points=a_ , input_labels=a_ , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): A_ : List[Any] = hf_model(**a_ ) A_ : Dict = output.iou_scores.squeeze() assert scores[-1].item() == 0.9936047792434692 if __name__ == "__main__": UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser() UpperCamelCase__ : Union[str, Any] = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195'] parser.add_argument( '--model_name', default='sam_vit_h_4b8939', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action="https://huggingface.co/datasets/infinityofspace/python_codestyles-mixed1-500/viewer/default/store_true", help='Whether to push the model and processor to the hub after converting', ) parser.add_argument( '--model_hub_id', default='ybelkada/segment-anything', choices=choices, type=str, help='Path to hf config.json of model to convert', ) UpperCamelCase__ : Optional[int] = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
365
'''simple docstring''' # XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path UpperCamelCase__ : Optional[Any] = Path(__file__).resolve().parents[3] / 'src' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) UpperCamelCase__ : Tuple = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'} UpperCamelCase__ : Optional[Any] = 'zero2' UpperCamelCase__ : Optional[int] = 'zero3' UpperCamelCase__ : Dict = [ZEROa, ZEROa] def UpperCAmelCase ( a_ , a_ , a_ ) -> int: """simple docstring""" A_ : int = parameterized.to_safe_name("""_""".join(str(a_ ) for x in param.args ) ) return F"{func.__name__}_{param_based_name}" # Cartesian-product of zero stages with models to test UpperCamelCase__ : Tuple = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class _lowerCAmelCase ( __A ): """simple docstring""" @parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Tuple: self.run_and_check( stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , ) @require_torch_multi_gpu @parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: self.run_and_check( stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , ) @parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Dict: self.run_and_check( stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , ) @require_torch_multi_gpu @parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase ) def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> int: self.run_and_check( stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , ) def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Optional[Any]: # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 10 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = True , ) -> List[str]: A_ : Union[str, Any] = models[model] A_ : Tuple = self.run_trainer( stage=_lowerCamelCase , model_name=_lowerCamelCase , eval_steps=_lowerCamelCase , num_train_epochs=1 , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , ) self.do_checks(_lowerCamelCase ) return output_dir def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 10 , _lowerCamelCase = 1 , _lowerCamelCase = True , _lowerCamelCase = True , ) -> Any: A_ : Dict = self.get_auto_remove_tmp_dir("""./xxx""" , after=_lowerCamelCase ) A_ : str = F"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(_lowerCamelCase )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split() if fpaa: args.extend(["""--fp16"""] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files A_ : List[str] = F"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split() A_ : Union[str, Any] = [F"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"] A_ : Tuple = self.get_launcher(_lowerCamelCase ) A_ : Optional[int] = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(_lowerCamelCase , env=self.get_env() ) return output_dir def UpperCAmelCase_ ( self , _lowerCamelCase=False ) -> Any: # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) A_ : int = min(2 , get_gpu_count() ) if distributed else 1 return F"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
164
0
"""simple docstring""" from __future__ import annotations import time import numpy as np SCREAMING_SNAKE_CASE__ = [8, 5, 9, 7] SCREAMING_SNAKE_CASE__ = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] SCREAMING_SNAKE_CASE__ = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class lowerCAmelCase_ : """simple docstring""" def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): """simple docstring""" snake_case = claim_vector snake_case = allocated_resources_table snake_case = maximum_claim_table def snake_case ( self ): """simple docstring""" return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def snake_case ( self ): """simple docstring""" return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def snake_case ( self ): """simple docstring""" return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def snake_case ( self ): """simple docstring""" return {self.__need().index(__a ): i for i in self.__need()} def snake_case ( self , **lowerCAmelCase ): """simple docstring""" snake_case = self.__need() snake_case = self.__allocated_resources_table snake_case = self.__available_resources() snake_case = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('_' * 50 + '\n' ) while need_list: snake_case = False for each_need in need_list: snake_case = True for index, need in enumerate(__a ): if need > available_resources[index]: snake_case = False break if execution: snake_case = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: snake_case = original_need_index print(F"""Process {process_number + 1} is executing.""" ) # remove the process run from stack need_list.remove(__a ) # update available/freed resources stack snake_case = np.array(__a ) + np.array( alloc_resources_table[process_number] ) print( 'Updated available resource stack for processes: ' + ' '.join([str(__a ) for x in available_resources] ) ) break if safe: print('The process is in a safe state.\n' ) else: print('System in unsafe state. Aborting...\n' ) break def snake_case ( self ): """simple docstring""" print(' ' * 9 + 'Allocated Resource Table' ) for item in self.__allocated_resources_table: print( F"""P{self.__allocated_resources_table.index(__a ) + 1}""" + ' '.join(F"""{it:>8}""" for it in item ) + '\n' ) print(' ' * 9 + 'System Resource Table' ) for item in self.__maximum_claim_table: print( F"""P{self.__maximum_claim_table.index(__a ) + 1}""" + ' '.join(F"""{it:>8}""" for it in item ) + '\n' ) print( 'Current Usage by Active Processes: ' + ' '.join(str(__a ) for x in self.__claim_vector ) ) print( 'Initial Available Resources: ' + ' '.join(str(__a ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
150
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __UpperCamelCase : def __init__( self , __a , __a=2 , __a=3 , __a=4 , __a=2 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=36 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=6 , __a=6 , __a=3 , __a=4 , __a=None , __a=1000 , ): '''simple docstring''' __a : Optional[Any] = parent __a : int = batch_size __a : Any = num_channels __a : Optional[int] = image_size __a : Dict = patch_size __a : int = is_training __a : Union[str, Any] = use_input_mask __a : Optional[int] = use_token_type_ids __a : Dict = use_labels __a : str = vocab_size __a : List[Any] = hidden_size __a : Union[str, Any] = num_hidden_layers __a : str = num_attention_heads __a : Union[str, Any] = intermediate_size __a : Any = hidden_act __a : List[str] = hidden_dropout_prob __a : List[str] = attention_probs_dropout_prob __a : List[Any] = max_position_embeddings __a : Tuple = type_vocab_size __a : Any = type_sequence_label_size __a : Optional[int] = initializer_range __a : Any = coordinate_size __a : List[Any] = shape_size __a : Optional[int] = num_labels __a : Dict = num_choices __a : Union[str, Any] = scope __a : Union[str, Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __a : Optional[int] = text_seq_length __a : Any = (image_size // patch_size) ** 2 + 1 __a : Dict = self.text_seq_length + self.image_seq_length def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __a : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __a : Any = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __a : List[Any] = bbox[i, j, 3] __a : Tuple = bbox[i, j, 1] __a : str = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __a : int = bbox[i, j, 2] __a : Dict = bbox[i, j, 0] __a : int = tmp_coordinate __a : Optional[int] = tf.constant(__a ) __a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : str = None if self.use_input_mask: __a : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] ) __a : str = None if self.use_token_type_ids: __a : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __a : Optional[Any] = None __a : Optional[int] = None if self.use_labels: __a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __a : int = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Dict = TFLayoutLMvaModel(config=__a ) # text + image __a : List[Any] = model(__a , pixel_values=__a , training=__a ) __a : Any = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , training=__a , ) __a : Optional[int] = model(__a , bbox=__a , pixel_values=__a , training=__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __a : Any = model(__a , training=__a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __a : str = model({'pixel_values': pixel_values} , training=__a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : Any = self.num_labels __a : Dict = TFLayoutLMvaForSequenceClassification(config=__a ) __a : List[str] = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : str = self.num_labels __a : Optional[Any] = TFLayoutLMvaForTokenClassification(config=__a ) __a : List[str] = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ): '''simple docstring''' __a : List[Any] = 2 __a : Any = TFLayoutLMvaForQuestionAnswering(config=__a ) __a : Any = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , training=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = self.prepare_config_and_inputs() ((__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Dict = config_and_inputs __a : Any = { 'input_ids': input_ids, 'bbox': bbox, 'pixel_values': pixel_values, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) A_ = ( {"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel} if is_tf_available() else {} ) A_ = False A_ = False A_ = False def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ): '''simple docstring''' return True def __UpperCAmelCase ( self , __a , __a , __a=False ): '''simple docstring''' __a : str = copy.deepcopy(__a ) if model_class in get_values(__a ): __a : str = { k: tf.tile(tf.expand_dims(__a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__a , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__a ): __a : Optional[int] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__a ): __a : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __a : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__a ): __a : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__a ): __a : Union[str, Any] = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = TFLayoutLMvaModelTester(self ) __a : Optional[int] = ConfigTester(self , config_class=__a , hidden_size=37 ) def __UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCAmelCase ( self ): '''simple docstring''' __a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Dict = model_class(__a ) if getattr(__a , 'hf_compute_loss' , __a ): # The number of elements in the loss should be the same as the number of elements in the label __a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : str = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__a )[0] ] __a : Dict = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : Dict = prepared_for_class.pop('input_ids' ) __a : Tuple = model(__a , **__a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : str = prepared_for_class.pop('input_ids' ) if "labels" in prepared_for_class: __a : Union[str, Any] = prepared_for_class['labels'].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __a : List[Any] = -100 __a : List[str] = tf.convert_to_tensor(__a ) __a : Any = model(__a , **__a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) __a : str = model(__a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __a : Tuple = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a ) # Get keys that were added with the _prepare_for_class function __a : Dict = prepared_for_class.keys() - inputs_dict.keys() __a : Any = inspect.signature(model.call ).parameters __a : str = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __a : List[Any] = {0: 'input_ids'} for label_key in label_keys: __a : List[Any] = signature_names.index(__a ) __a : Union[str, Any] = label_key __a : List[str] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __a : Union[str, Any] = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __a : Optional[Any] = prepared_for_class[value] __a : str = tuple(__a ) # Send to model __a : Tuple = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __a : Any = type self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __a , __a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __a , __a , __a , __a , __a , __a , __a ) def __UpperCAmelCase ( self ): '''simple docstring''' ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __a , __a , __a , __a , __a , __a , __a ) @slow def __UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : List[Any] = TFLayoutLMvaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCamelCase (): __a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf class __UpperCamelCase ( unittest.TestCase ): @cached_property def __UpperCAmelCase ( self ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=__a ) if is_vision_available() else None @slow def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ) __a : Tuple = self.default_image_processor __a : List[Any] = prepare_img() __a : int = image_processor(images=__a , return_tensors='tf' ).pixel_values __a : Union[str, Any] = tf.constant([[1, 2]] ) __a : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __a : Tuple = model(input_ids=__a , bbox=__a , pixel_values=__a , training=__a ) # verify the logits __a : List[Any] = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , __a ) __a : Optional[Any] = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ) )
27
0
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin snake_case__ = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = XLMRobertaTokenizer _lowerCAmelCase = XLMRobertaTokenizerFast _lowerCAmelCase = True _lowerCAmelCase = True def _a ( self : str ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing A_ : int = XLMRobertaTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Union[str, Any] = '''<pad>''' A_ : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase ) def _a ( self : Any ): """simple docstring""" A_ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(_lowerCamelCase ) , 1002 ) def _a ( self : List[Any] ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1002 ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[str] = XLMRobertaTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase ) A_ : Dict = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) A_ : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) A_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) A_ : str = tokenizer.convert_ids_to_tokens(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def _a ( self : Optional[Any] ): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return A_ : Dict = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): A_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase ) A_ : List[str] = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase ) A_ : Optional[int] = tempfile.mkdtemp() A_ : List[Any] = tokenizer_r.save_pretrained(_lowerCamelCase ) A_ : List[Any] = tokenizer_p.save_pretrained(_lowerCamelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) A_ : List[str] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(_lowerCamelCase , _lowerCamelCase ) # Checks everything loads correctly in the same way A_ : Union[str, Any] = tokenizer_r.from_pretrained(_lowerCamelCase ) A_ : List[Any] = tokenizer_p.from_pretrained(_lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_lowerCamelCase ) # Save tokenizer rust, legacy_format=True A_ : Optional[Any] = tempfile.mkdtemp() A_ : Tuple = tokenizer_r.save_pretrained(_lowerCamelCase , legacy_format=_lowerCamelCase ) A_ : Tuple = tokenizer_p.save_pretrained(_lowerCamelCase ) # Checks it save with the same files self.assertSequenceEqual(_lowerCamelCase , _lowerCamelCase ) # Checks everything loads correctly in the same way A_ : Any = tokenizer_r.from_pretrained(_lowerCamelCase ) A_ : Union[str, Any] = tokenizer_p.from_pretrained(_lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) ) shutil.rmtree(_lowerCamelCase ) # Save tokenizer rust, legacy_format=False A_ : Tuple = tempfile.mkdtemp() A_ : Optional[Any] = tokenizer_r.save_pretrained(_lowerCamelCase , legacy_format=_lowerCamelCase ) A_ : Optional[int] = tokenizer_p.save_pretrained(_lowerCamelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way A_ : Optional[Any] = tokenizer_r.from_pretrained(_lowerCamelCase ) A_ : List[str] = tokenizer_p.from_pretrained(_lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) ) shutil.rmtree(_lowerCamelCase ) @cached_property def _a ( self : Tuple ): """simple docstring""" return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' ) def _a ( self : Tuple ): """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(_lowerCamelCase , f.name ) A_ : int = XLMRobertaTokenizer(f.name , keep_accents=_lowerCamelCase ) A_ : Tuple = pickle.dumps(_lowerCamelCase ) pickle.loads(_lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" if not self.test_rust_tokenizer: return A_ : Union[str, Any] = self.get_tokenizer() A_ : List[Any] = self.get_rust_tokenizer() A_ : Union[str, Any] = '''I was born in 92000, and this is falsé.''' A_ : Any = tokenizer.tokenize(_lowerCamelCase ) A_ : List[str] = rust_tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Any = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) A_ : Tuple = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = self.get_rust_tokenizer() A_ : Any = tokenizer.encode(_lowerCamelCase ) A_ : Tuple = rust_tokenizer.encode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) @slow def _a ( self : List[Any] ): """simple docstring""" A_ : Tuple = '''Hello World!''' A_ : Optional[Any] = [0, 35378, 6661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) ) @slow def _a ( self : Tuple ): """simple docstring""" A_ : Dict = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) A_ : Dict = [ 0, 3293, 83, 10, 4552, 4989, 7986, 678, 10, 5915, 111, 179459, 124850, 4, 6044, 237, 12, 6, 5, 6, 4, 6780, 705, 15, 1388, 44, 378, 10114, 711, 152, 20, 6, 5, 22376, 642, 1221, 15190, 34153, 450, 5608, 959, 1119, 57702, 136, 186, 47, 1098, 29367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6044, 237, 6284, 50901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) ) @slow def _a ( self : Optional[int] ): """simple docstring""" A_ : Optional[Any] = {'''input_ids''': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
4
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Dict = -1 A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : List[str] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: A_ : List[str] = TextStreamer(_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer A_ : Dict = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Dict = -1 A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : str = tokenizer.decode(greedy_ids[0] ) A_ : int = TextIteratorStreamer(_lowerCamelCase ) A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() A_ : List[Any] = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : List[str] = -1 A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : Tuple = greedy_ids[:, input_ids.shape[1] :] A_ : Tuple = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer A_ : Any = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase ) A_ : List[Any] = -1 A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token A_ : List[str] = cs.out[:-1] # Remove the final "\n" A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Union[str, Any] = -1 A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 ) A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowerCamelCase ): A_ : str = '''''' for new_text in streamer: streamer_text += new_text
4
1
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> bool: A: Union[str, Any] = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
319
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = """convbert""" def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=3_05_22 , SCREAMING_SNAKE_CASE_ : int=7_68 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : Dict=30_72 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : List[Any]=7_68 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Any=9 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]: '''simple docstring''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) A: Dict = vocab_size A: Tuple = hidden_size A: Optional[int] = num_hidden_layers A: List[str] = num_attention_heads A: int = intermediate_size A: int = hidden_act A: List[str] = hidden_dropout_prob A: int = attention_probs_dropout_prob A: Tuple = max_position_embeddings A: Any = type_vocab_size A: str = initializer_range A: Union[str, Any] = layer_norm_eps A: str = embedding_size A: Optional[int] = head_ratio A: List[Any] = conv_kernel_size A: List[Any] = num_groups A: Optional[int] = classifier_dropout class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' @property def _snake_case ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": A: Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A: List[str] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
319
1
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer lowerCAmelCase_ : str = ['gpt2'] lowerCAmelCase_ : Any = 'gpt2' if is_tf_available(): class __SCREAMING_SNAKE_CASE (tf.Module ): """simple docstring""" def __init__( self : Optional[int] , __a : Any ): super().__init__() _a = tokenizer _a = AutoConfig.from_pretrained(__a ) _a = TFGPTaLMHeadModel.from_config(__a ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) ) def UpperCamelCase__ ( self : Optional[Any] , __a : Union[str, Any] ): _a = self.tokenizer(__a ) _a = tokenized["input_ids"].to_tensor() _a = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) _a = self.model(input_ids=__a , attention_mask=__a )["logits"] return outputs @require_tf @require_keras_nlp class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : Union[str, Any] ): super().setUp() _a = [GPTaTokenizer.from_pretrained(__a ) for checkpoint in (TOKENIZER_CHECKPOINTS)] _a = [TFGPTaTokenizer.from_pretrained(__a ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) _a = [ "This is a straightforward English test sentence.", "This one has some weird characters\rto\nsee\r\nif those\u00E9break things.", "Now we're going to add some Chinese: 一 二 三 一二三", "And some much more rare Chinese: 齉 堃 齉堃", "Je vais aussi écrire en français pour tester les accents", "Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ", ] _a = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def UpperCamelCase__ ( self : List[str] ): for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: _a = tokenizer([test_inputs] , return_tensors="tf" ) _a = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors _a = python_outputs[key].numpy() _a = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(__a , tf.intaa ) == tf_outputs_values ) ) @slow def UpperCamelCase__ ( self : Dict ): for tf_tokenizer in self.tf_tokenizers: _a = tf.function(__a ) for test_inputs in self.test_sentences: _a = tf.constant(__a ) _a = compiled_tokenizer(__a ) _a = tf_tokenizer(__a ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def UpperCamelCase__ ( self : str ): for tf_tokenizer in self.tf_tokenizers: _a = ModelToSave(tokenizer=__a ) _a = tf.convert_to_tensor([self.test_sentences[0]] ) _a = model.serving(__a ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: _a = Path(__a ) / "saved.model" tf.saved_model.save(__a , __a , signatures={"serving_default": model.serving} ) _a = tf.saved_model.load(__a ) _a = loaded_model.signatures["serving_default"](__a )["output_0"] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def UpperCamelCase__ ( self : List[str] ): for tf_tokenizer in self.tf_tokenizers: _a = tf.convert_to_tensor([self.test_sentences[0]] ) _a = tf_tokenizer(__a ) # Build model with some sample inputs _a = tf_tokenizer.get_config() _a = TFGPTaTokenizer.from_config(__a ) _a = model_from_config(__a ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def UpperCamelCase__ ( self : Tuple ): for tf_tokenizer in self.tf_tokenizers: # for the test to run _a = 12_31_23 for max_length in [3, 5, 10_24]: _a = tf.convert_to_tensor([self.test_sentences[0]] ) _a = tf_tokenizer(__a , max_length=__a ) _a = out["input_ids"].numpy().shape[1] assert out_length == max_length
346
'''simple docstring''' def _lowerCamelCase ( lowercase : int = 6008_5147_5143 ) -> int: try: _a = int(lowercase ) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int." ) if n <= 0: raise ValueError("Parameter n must be greater than or equal to one." ) _a = 2 _a = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 _a = i while n % i == 0: _a = n // i i += 1 return int(lowercase ) if __name__ == "__main__": print(f"""{solution() = }""")
346
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase_ = { "configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST", "PegasusXForConditionalGeneration", "PegasusXModel", "PegasusXPreTrainedModel", ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
251
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers _lowercase : Dict ="3" print("Python version:", sys.version) print("transformers version:", transformers.__version__) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) print("NCCL version:", torch.cuda.nccl.version()) except ImportError: print("Torch version:", None) try: import deepspeed print("DeepSpeed version:", deepspeed.__version__) except ImportError: print("DeepSpeed version:", None) try: import tensorflow as tf print("TensorFlow version:", tf.__version__) print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU"))) print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU"))) except ImportError: print("TensorFlow version:", None)
170
0
from math import pi, sqrt def lowerCAmelCase_ ( UpperCamelCase_ ) -> float: if num <= 0: raise ValueError("math domain error" ) if num > 1_71.5: raise OverflowError("math range error" ) elif num - int(UpperCamelCase_ ) not in (0, 0.5): raise NotImplementedError("num must be an integer or a half-integer" ) elif num == 0.5: return sqrt(UpperCamelCase_ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def lowerCAmelCase_ ( ) -> None: assert gamma(0.5 ) == sqrt(UpperCamelCase_ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() _UpperCAmelCase = 1.0 while num: _UpperCAmelCase = float(input('Gamma of: ')) print(f'''gamma({num}) = {gamma(num)}''') print('\nEnter 0 to exit...')
328
import math def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]: if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(UpperCamelCase_ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("This should never happen" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. _UpperCAmelCase = 'Enter the base and the power separated by a comma: ' _UpperCAmelCase , _UpperCAmelCase = map(int, input(prompt).split(',')) _UpperCAmelCase , _UpperCAmelCase = map(int, input(prompt).split(',')) # We find the log of each number, using the function res(), which takes two # arguments. _UpperCAmelCase = res(xa, ya) _UpperCAmelCase = res(xa, ya) # We check for the largest number if resa > resa: print('Largest number is', xa, '^', ya) elif resa > resa: print('Largest number is', xa, '^', ya) else: print('Both are equal')
328
1
"""simple docstring""" from __future__ import annotations __magic_name__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] __magic_name__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = len(UpperCamelCase_ ) for i in range(UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = -1 for j in range(i + 1 , UpperCamelCase_ ): if arr[i] < arr[j]: __SCREAMING_SNAKE_CASE = arr[j] break result.append(UpperCamelCase_ ) return result def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = [] for i, outer in enumerate(UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = -1 for inner in arr[i + 1 :]: if outer < inner: __SCREAMING_SNAKE_CASE = inner break result.append(UpperCamelCase_ ) return result def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = len(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [-1] * arr_size for index in reversed(range(UpperCamelCase_ ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: __SCREAMING_SNAKE_CASE = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) __magic_name__ = ( "from __main__ import arr, next_greatest_element_slow, " "next_greatest_element_fast, next_greatest_element" ) print( "next_greatest_element_slow():", timeit("next_greatest_element_slow(arr)", setup=setup), ) print( "next_greatest_element_fast():", timeit("next_greatest_element_fast(arr)", setup=setup), ) print( " next_greatest_element():", timeit("next_greatest_element(arr)", setup=setup), )
100
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING __magic_name__ = logging.get_logger(__name__) __magic_name__ = { "salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json", } class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" __lowercase : Optional[Any] = '''blip_2_vision_model''' def __init__( self , lowerCAmelCase__=1_4_0_8 , lowerCAmelCase__=6_1_4_4 , lowerCAmelCase__=3_9 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=1_4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0_00_01 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , **lowerCAmelCase__ , ): super().__init__(**lowerCAmelCase__) __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = qkv_bias @classmethod def snake_case_ ( cls , lowerCAmelCase__ , **lowerCAmelCase__): cls._set_token_in_kwargs(lowerCAmelCase__) __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__) # get the vision config dict if we are loading from Blip2Config if config_dict.get("""model_type""") == "blip-2": __SCREAMING_SNAKE_CASE = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__) class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" __lowercase : Tuple = '''blip_2_qformer''' def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=2 , lowerCAmelCase__=1_4_0_8 , **lowerCAmelCase__ , ): super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = cross_attention_frequency __SCREAMING_SNAKE_CASE = encoder_hidden_size @classmethod def snake_case_ ( cls , lowerCAmelCase__ , **lowerCAmelCase__): cls._set_token_in_kwargs(lowerCAmelCase__) __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__) # get the qformer config dict if we are loading from Blip2Config if config_dict.get("""model_type""") == "blip-2": __SCREAMING_SNAKE_CASE = config_dict["""qformer_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__) class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" __lowercase : Optional[Any] = '''blip-2''' __lowercase : Any = True def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=3_2 , **lowerCAmelCase__): super().__init__(**lowerCAmelCase__) if vision_config is None: __SCREAMING_SNAKE_CASE = {} logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""") if qformer_config is None: __SCREAMING_SNAKE_CASE = {} logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""") if text_config is None: __SCREAMING_SNAKE_CASE = {} logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""") __SCREAMING_SNAKE_CASE = BlipaVisionConfig(**lowerCAmelCase__) __SCREAMING_SNAKE_CASE = BlipaQFormerConfig(**lowerCAmelCase__) __SCREAMING_SNAKE_CASE = text_config["""model_type"""] if """model_type""" in text_config else """opt""" __SCREAMING_SNAKE_CASE = CONFIG_MAPPING[text_model_type](**lowerCAmelCase__) __SCREAMING_SNAKE_CASE = self.text_config.tie_word_embeddings __SCREAMING_SNAKE_CASE = self.text_config.is_encoder_decoder __SCREAMING_SNAKE_CASE = num_query_tokens __SCREAMING_SNAKE_CASE = self.vision_config.hidden_size __SCREAMING_SNAKE_CASE = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES __SCREAMING_SNAKE_CASE = 1.0 __SCREAMING_SNAKE_CASE = 0.02 @classmethod def snake_case_ ( cls , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , ): return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase__ , ) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__) __SCREAMING_SNAKE_CASE = self.vision_config.to_dict() __SCREAMING_SNAKE_CASE = self.qformer_config.to_dict() __SCREAMING_SNAKE_CASE = self.text_config.to_dict() __SCREAMING_SNAKE_CASE = self.__class__.model_type return output
100
1
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model") @require_sentencepiece @require_tokenizers class UpperCAmelCase_ ( A_, unittest.TestCase ): lowercase__ = SpeechTaTokenizer lowercase__ = False lowercase__ = True def __magic_name__ ( self : Dict ) -> Optional[int]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ = SpeechTaTokenizer(snake_case_ ) A__ = AddedToken("<mask>" , lstrip=snake_case_ , rstrip=snake_case_ ) A__ = mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) tokenizer.save_pretrained(self.tmpdirname ) def __magic_name__ ( self : Optional[Any] , snake_case_ : List[str] ) -> Tuple: '''simple docstring''' A__ = "this is a test" A__ = "this is a test" return input_text, output_text def __magic_name__ ( self : Dict , snake_case_ : Dict , snake_case_ : Any=False , snake_case_ : int=20 , snake_case_ : Dict=5 ) -> Optional[int]: '''simple docstring''' A__, A__ = self.get_input_output_texts(snake_case_ ) A__ = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) A__ = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ ) return text, ids def __magic_name__ ( self : Dict ) -> List[str]: '''simple docstring''' A__ = "<pad>" A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ ) def __magic_name__ ( self : Dict ) -> Dict: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-4] , "œ" ) self.assertEqual(vocab_keys[-2] , "<mask>" ) self.assertEqual(vocab_keys[-1] , "<ctc_blank>" ) self.assertEqual(len(snake_case_ ) , 81 ) def __magic_name__ ( self : List[str] ) -> Dict: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def __magic_name__ ( self : List[str] ) -> Any: '''simple docstring''' A__ = self.get_tokenizers(do_lower_case=snake_case_ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): A__ = tokenizer.vocab_size A__ = len(snake_case_ ) self.assertNotEqual(snake_case_ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) A__ = ["aaaaa bbbbbb", "cccccccccdddddddd"] A__ = tokenizer.add_tokens(snake_case_ ) A__ = tokenizer.vocab_size A__ = len(snake_case_ ) self.assertNotEqual(snake_case_ , 0 ) self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , len(snake_case_ ) ) self.assertEqual(snake_case_ , all_size + len(snake_case_ ) ) A__ = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=snake_case_ ) self.assertGreaterEqual(len(snake_case_ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) A__ = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} A__ = tokenizer.add_special_tokens(snake_case_ ) A__ = tokenizer.vocab_size A__ = len(snake_case_ ) self.assertNotEqual(snake_case_ , 0 ) self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , len(snake_case_ ) ) self.assertEqual(snake_case_ , all_size_a + len(snake_case_ ) ) A__ = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=snake_case_ ) self.assertGreaterEqual(len(snake_case_ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def __magic_name__ ( self : Tuple ) -> int: '''simple docstring''' pass def __magic_name__ ( self : int ) -> Union[str, Any]: '''simple docstring''' pass def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' A__ = self.get_tokenizer() A__ = tokenizer.tokenize("This is a test" ) # fmt: off self.assertListEqual(snake_case_ , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( snake_case_ , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] ) A__ = tokenizer.convert_tokens_to_ids(snake_case_ ) # fmt: off self.assertListEqual(snake_case_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on A__ = tokenizer.convert_ids_to_tokens(snake_case_ ) self.assertListEqual( snake_case_ , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] ) @slow def __magic_name__ ( self : Union[str, Any] ) -> str: '''simple docstring''' A__ = [ "Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides " "general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural " "Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained " "models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.", "BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly " "conditioning on both left and right context in all layers.", "The quick brown fox jumps over the lazy dog.", ] # fmt: off A__ = { "input_ids": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], "attention_mask": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case_ , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=snake_case_ , )
230
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCAmelCase_ ( A_ ): lowercase__ = ['''image_processor''', '''tokenizer'''] lowercase__ = '''AutoImageProcessor''' lowercase__ = '''AutoTokenizer''' def __init__( self : str , snake_case_ : Dict , snake_case_ : List[str] ) -> str: '''simple docstring''' super().__init__(snake_case_ , snake_case_ ) A__ = self.image_processor def __call__( self : int , snake_case_ : Any=None , snake_case_ : Any=None , snake_case_ : Union[str, Any]=None , **snake_case_ : Optional[int] ) -> Optional[int]: '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: A__ = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ ) if images is not None: A__ = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ ) if text is not None and images is not None: A__ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ ) def __magic_name__ ( self : Optional[int] , *snake_case_ : Union[str, Any] , **snake_case_ : List[Any] ) -> int: '''simple docstring''' return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ ) def __magic_name__ ( self : List[str] , *snake_case_ : List[str] , **snake_case_ : Optional[int] ) -> Tuple: '''simple docstring''' return self.tokenizer.decode(*snake_case_ , **snake_case_ ) @property def __magic_name__ ( self : List[Any] ) -> List[Any]: '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
230
1
def a__ ( snake_case = 1_000_000 ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , A__ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
303
from abc import ABC, abstractmethod from argparse import ArgumentParser class __snake_case ( _lowerCamelCase ): @staticmethod @abstractmethod def __a ( __UpperCamelCase ) -> Dict: '''simple docstring''' raise NotImplementedError() @abstractmethod def __a ( self ) -> Optional[int]: '''simple docstring''' raise NotImplementedError()
143
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : List[str] = { "configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = ["AlbertTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ["AlbertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Dict = [ "ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "AlbertForMaskedLM", "AlbertForMultipleChoice", "AlbertForPreTraining", "AlbertForQuestionAnswering", "AlbertForSequenceClassification", "AlbertForTokenClassification", "AlbertModel", "AlbertPreTrainedModel", "load_tf_weights_in_albert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : int = [ "TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFAlbertForMaskedLM", "TFAlbertForMultipleChoice", "TFAlbertForPreTraining", "TFAlbertForQuestionAnswering", "TFAlbertForSequenceClassification", "TFAlbertForTokenClassification", "TFAlbertMainLayer", "TFAlbertModel", "TFAlbertPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = [ "FlaxAlbertForMaskedLM", "FlaxAlbertForMultipleChoice", "FlaxAlbertForPreTraining", "FlaxAlbertForQuestionAnswering", "FlaxAlbertForSequenceClassification", "FlaxAlbertForTokenClassification", "FlaxAlbertModel", "FlaxAlbertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
371
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __A : List[Any] = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys __A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
326
0
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Any ): if isinstance(__lowerCamelCase , __lowerCamelCase ): snake_case : int = np.full((len(__lowerCamelCase ), sequence_length, 2) , __lowerCamelCase ) else: snake_case : List[Any] = np.full((len(__lowerCamelCase ), sequence_length) , __lowerCamelCase ) for i, tensor in enumerate(__lowerCamelCase ): if padding_side == "right": if isinstance(__lowerCamelCase , __lowerCamelCase ): snake_case : Dict = tensor[:sequence_length] else: snake_case : Tuple = tensor[:sequence_length] else: if isinstance(__lowerCamelCase , __lowerCamelCase ): snake_case : str = tensor[:sequence_length] else: snake_case : Optional[int] = tensor[:sequence_length] return out_tensor.tolist() def UpperCamelCase ( __lowerCamelCase : List[Any] ): snake_case : Any = ord(__lowerCamelCase ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True snake_case : List[str] = unicodedata.category(__lowerCamelCase ) if cat.startswith("P" ): return True return False @dataclass class UpperCAmelCase ( A_ ): A__ : PreTrainedTokenizerBase A__ : Union[bool, str, PaddingStrategy] = True A__ : Optional[int] = None A__ : Optional[int] = None A__ : int = -1_00 A__ : str = "pt" def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Tuple ) -> int: '''simple docstring''' import torch snake_case : Optional[Any] = "label" if "label" in features[0].keys() else "labels" snake_case : str = [feature[label_name] for feature in features] if label_name in features[0].keys() else None snake_case : List[Any] = self.tokenizer.pad( snake_case__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , ) if labels is None: return batch snake_case : int = torch.tensor(batch["entity_ids"] ).shape[1] snake_case : List[Any] = self.tokenizer.padding_side if padding_side == "right": snake_case : Tuple = [ list(snake_case__ ) + [self.label_pad_token_id] * (sequence_length - len(snake_case__ )) for label in labels ] else: snake_case : str = [ [self.label_pad_token_id] * (sequence_length - len(snake_case__ )) + list(snake_case__ ) for label in labels ] snake_case : int = [feature["ner_tags"] for feature in features] snake_case : int = padding_tensor(snake_case__ , -1 , snake_case__ , snake_case__ ) snake_case : str = [feature["original_entity_spans"] for feature in features] snake_case : Dict = padding_tensor(snake_case__ , (-1, -1) , snake_case__ , snake_case__ ) snake_case : Dict = {k: torch.tensor(snake_case__ , dtype=torch.intaa ) for k, v in batch.items()} return batch
59
"""simple docstring""" from __future__ import annotations def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ): '''simple docstring''' return [ord(SCREAMING_SNAKE_CASE ) - 96 for elem in plain] def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : list[int] ): '''simple docstring''' return "".join(chr(elem + 96 ) for elem in encoded ) def UpperCAmelCase__ ( ): '''simple docstring''' lowerCAmelCase = encode(input("""-> """ ).strip().lower() ) print("""Encoded: """ , SCREAMING_SNAKE_CASE ) print("""Decoded:""" , decode(SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": main()
46
0
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def _UpperCAmelCase ( _UpperCamelCase : Optional[int], _UpperCamelCase : Tuple ) -> Dict: assert isinstance(_UpperCamelCase, _UpperCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''', [False, True] ) def _UpperCAmelCase ( _UpperCamelCase : Any, _UpperCamelCase : int, _UpperCamelCase : List[str] ) -> Dict: A_ = tmp_path / '''cache''' A_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): A_ = JsonDatasetReader(_UpperCamelCase, cache_dir=_UpperCamelCase, keep_in_memory=_UpperCamelCase ).read() _check_json_dataset(_UpperCamelCase, _UpperCamelCase ) @pytest.mark.parametrize( '''features''', [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ], ) def _UpperCAmelCase ( _UpperCamelCase : Dict, _UpperCamelCase : Optional[int], _UpperCamelCase : int ) -> Dict: A_ = tmp_path / '''cache''' A_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} A_ = features.copy() if features else default_expected_features A_ = ( Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) A_ = JsonDatasetReader(_UpperCamelCase, features=_UpperCamelCase, cache_dir=_UpperCamelCase ).read() _check_json_dataset(_UpperCamelCase, _UpperCamelCase ) @pytest.mark.parametrize( '''features''', [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ], ) def _UpperCAmelCase ( _UpperCamelCase : Dict, _UpperCamelCase : Dict, _UpperCamelCase : Optional[int] ) -> List[str]: A_ = tmp_path / '''cache''' A_ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} A_ = features.copy() if features else default_expected_features A_ = ( Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) A_ = JsonDatasetReader(_UpperCamelCase, features=_UpperCamelCase, cache_dir=_UpperCamelCase ).read() assert isinstance(_UpperCamelCase, _UpperCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def _UpperCAmelCase ( _UpperCamelCase : List[Any], _UpperCamelCase : Union[str, Any] ) -> Union[str, Any]: # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} A_ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} A_ = features.copy() A_ = ( Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) A_ = tmp_path / '''cache''' A_ = JsonDatasetReader(_UpperCamelCase, features=_UpperCamelCase, cache_dir=_UpperCamelCase ).read() assert isinstance(_UpperCamelCase, _UpperCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : Union[str, Any], _UpperCamelCase : Optional[Any] ) -> str: A_ = tmp_path / '''cache''' A_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} A_ = JsonDatasetReader(_UpperCamelCase, cache_dir=_UpperCamelCase, split=_UpperCamelCase ).read() _check_json_dataset(_UpperCamelCase, _UpperCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''', [str, list] ) def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : Union[str, Any], _UpperCamelCase : str ) -> Union[str, Any]: if issubclass(_UpperCamelCase, _UpperCamelCase ): A_ = jsonl_path elif issubclass(_UpperCamelCase, _UpperCamelCase ): A_ = [jsonl_path] A_ = tmp_path / '''cache''' A_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} A_ = JsonDatasetReader(_UpperCamelCase, cache_dir=_UpperCamelCase ).read() _check_json_dataset(_UpperCamelCase, _UpperCamelCase ) def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : Union[str, Any], _UpperCamelCase : str=("train",) ) -> int: assert isinstance(_UpperCamelCase, _UpperCamelCase ) for split in splits: A_ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''', [False, True] ) def _UpperCAmelCase ( _UpperCamelCase : List[str], _UpperCamelCase : Union[str, Any], _UpperCamelCase : int ) -> Union[str, Any]: A_ = tmp_path / '''cache''' A_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): A_ = JsonDatasetReader({'''train''': jsonl_path}, cache_dir=_UpperCamelCase, keep_in_memory=_UpperCamelCase ).read() _check_json_datasetdict(_UpperCamelCase, _UpperCamelCase ) @pytest.mark.parametrize( '''features''', [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ], ) def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : int, _UpperCamelCase : Optional[Any] ) -> str: A_ = tmp_path / '''cache''' A_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} A_ = features.copy() if features else default_expected_features A_ = ( Features({feature: Value(_UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) A_ = JsonDatasetReader({'''train''': jsonl_path}, features=_UpperCamelCase, cache_dir=_UpperCamelCase ).read() _check_json_datasetdict(_UpperCamelCase, _UpperCamelCase ) @pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : List[str], _UpperCamelCase : Optional[int] ) -> Tuple: if split: A_ = {split: jsonl_path} else: A_ = '''train''' A_ = {'''train''': jsonl_path, '''test''': jsonl_path} A_ = tmp_path / '''cache''' A_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} A_ = JsonDatasetReader(_UpperCamelCase, cache_dir=_UpperCamelCase ).read() _check_json_datasetdict(_UpperCamelCase, _UpperCamelCase, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def _UpperCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[Any]: return json.load(_UpperCamelCase ) def _UpperCAmelCase ( _UpperCamelCase : Dict ) -> Tuple: return [json.loads(_UpperCamelCase ) for line in buffer] class __UpperCAmelCase : '''simple docstring''' @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: with io.BytesIO() as buffer: JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE ).write() buffer.seek(0 ) A_ = load_json_function(_SCREAMING_SNAKE_CASE ) assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert isinstance(exported_content[0] , _SCREAMING_SNAKE_CASE ) assert len(_SCREAMING_SNAKE_CASE ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: with io.BytesIO() as buffer: JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE , orient=_SCREAMING_SNAKE_CASE ).write() buffer.seek(0 ) A_ = load_json(_SCREAMING_SNAKE_CASE ) assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(_SCREAMING_SNAKE_CASE , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(_SCREAMING_SNAKE_CASE ) == 10 @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: with io.BytesIO() as buffer: JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE , num_proc=2 ).write() buffer.seek(0 ) A_ = load_json_function(_SCREAMING_SNAKE_CASE ) assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert isinstance(exported_content[0] , _SCREAMING_SNAKE_CASE ) assert len(_SCREAMING_SNAKE_CASE ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: with io.BytesIO() as buffer: JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE , orient=_SCREAMING_SNAKE_CASE , num_proc=2 ).write() buffer.seek(0 ) A_ = load_json(_SCREAMING_SNAKE_CASE ) assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(_SCREAMING_SNAKE_CASE , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(_SCREAMING_SNAKE_CASE ) == 10 def __A ( self , _SCREAMING_SNAKE_CASE ) -> str: with pytest.raises(_SCREAMING_SNAKE_CASE ): with io.BytesIO() as buffer: JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=0 ) @pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: A_ = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}''' A_ = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compression=_SCREAMING_SNAKE_CASE ).write() with fsspec.open(_SCREAMING_SNAKE_CASE , '''rb''' , compression='''infer''' ) as f: A_ = f.read() with fsspec.open(_SCREAMING_SNAKE_CASE , '''rb''' , compression='''infer''' ) as f: A_ = f.read() assert exported_content == original_content
18
'''simple docstring''' import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel __snake_case : str = '0.12' # assumed parallelism: 8 @require_flax @is_staging_test class __UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @classmethod def __A ( cls ) -> Dict: A_ = TOKEN HfFolder.save_token(_SCREAMING_SNAKE_CASE ) @classmethod def __A ( cls ) -> Optional[int]: try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def __A ( self ) -> str: A_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' ) A_ = flatten_dict(unfreeze(model.params ) ) A_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): A_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id='''test-model-flax''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' ) A_ = flatten_dict(unfreeze(model.params ) ) A_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): A_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' ) def __A ( self ) -> List[str]: A_ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) A_ = flatten_dict(unfreeze(model.params ) ) A_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): A_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( _SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) A_ = flatten_dict(unfreeze(model.params ) ) A_ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): A_ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' ) def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : Tuple ) -> Dict: A_ = True A_ = flatten_dict(modela.params ) A_ = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: A_ = False return models_are_equal @require_flax class __UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __A ( self ) -> List[str]: A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE ) A_ = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE ) A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE ) self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def __A ( self ) -> List[Any]: A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE ) A_ = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , max_shard_size='''10KB''' ) with self.assertRaises(_SCREAMING_SNAKE_CASE ): A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE ) A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE ) self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def __A ( self ) -> Dict: A_ = '''bert''' A_ = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(_SCREAMING_SNAKE_CASE ): A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE ) A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def __A ( self ) -> Optional[Any]: A_ = '''bert''' A_ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(_SCREAMING_SNAKE_CASE ): A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE ) A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
18
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ : str = logging.get_logger(__name__) UpperCAmelCase_ : Optional[int] = {'vocab_file': 'sentencepiece.model'} UpperCAmelCase_ : str = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, } UpperCAmelCase_ : Union[str, Any] = { 'google/rembert': 256, } class lowercase__ ( _snake_case ): '''simple docstring''' A_ : List[str] = VOCAB_FILES_NAMES A_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP A_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , __snake_case , __snake_case=False , __snake_case=True , __snake_case=True , __snake_case="[CLS]" , __snake_case="[SEP]" , __snake_case="[UNK]" , __snake_case="[SEP]" , __snake_case="[PAD]" , __snake_case="[CLS]" , __snake_case="[MASK]" , **__snake_case , ): super().__init__( do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , ) _SCREAMING_SNAKE_CASE : List[str] = do_lower_case _SCREAMING_SNAKE_CASE : Optional[int] = remove_space _SCREAMING_SNAKE_CASE : List[str] = keep_accents _SCREAMING_SNAKE_CASE : Optional[int] = vocab_file _SCREAMING_SNAKE_CASE : List[str] = spm.SentencePieceProcessor() self.sp_model.Load(__snake_case ) @property def UpperCAmelCase_ ( self ): return len(self.sp_model ) def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : int = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): _SCREAMING_SNAKE_CASE : List[str] = self.__dict__.copy() _SCREAMING_SNAKE_CASE : Any = None return state def __setstate__( self , __snake_case ): _SCREAMING_SNAKE_CASE : List[str] = d _SCREAMING_SNAKE_CASE : int = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def UpperCAmelCase_ ( self , __snake_case , __snake_case=False ): _SCREAMING_SNAKE_CASE : Union[str, Any] = self.sp_model.EncodeAsPieces(__snake_case ) return pieces def UpperCAmelCase_ ( self , __snake_case ): return self.sp_model.PieceToId(__snake_case ) def UpperCAmelCase_ ( self , __snake_case ): return self.sp_model.IdToPiece(__snake_case ) def UpperCAmelCase_ ( self , __snake_case ): _SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.decode_pieces(__snake_case ) return out_string def UpperCAmelCase_ ( self , __snake_case , __snake_case = None ): _SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] _SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCAmelCase_ ( self , __snake_case , __snake_case = None , __snake_case = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1] return [1] + ([0] * len(__snake_case )) + [1] def UpperCAmelCase_ ( self , __snake_case , __snake_case = None ): _SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id] _SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self , __snake_case , __snake_case = None ): if not os.path.isdir(__snake_case ): logger.error("""Vocabulary path ({}) should be a directory""".format(__snake_case ) ) return _SCREAMING_SNAKE_CASE : Tuple = os.path.join( __snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ): copyfile(self.vocab_file , __snake_case ) return (out_vocab_file,)
200
'''simple docstring''' def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : int = 0 while len(SCREAMING_SNAKE_CASE__ ) > 1: _SCREAMING_SNAKE_CASE : Any = 0 # Consider two files with minimum cost to be merged for _ in range(2 ): _SCREAMING_SNAKE_CASE : Optional[int] = files.index(min(SCREAMING_SNAKE_CASE__ ) ) temp += files[min_index] files.pop(SCREAMING_SNAKE_CASE__ ) files.append(SCREAMING_SNAKE_CASE__ ) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
200
1
import torch def lowerCAmelCase_ ( ) -> int: if torch.cuda.is_available(): UpperCamelCase__ : Optional[int] = torch.cuda.device_count() else: UpperCamelCase__ : int = 0 print(f"Successfully ran on {num_gpus} GPUs" ) if __name__ == "__main__": main()
361
from manim import * class lowercase__ ( __lowerCamelCase ): '''simple docstring''' def UpperCamelCase__ ( self ) -> int: """simple docstring""" UpperCamelCase__ : int = Rectangle(height=0.5, width=0.5 ) UpperCamelCase__ : Optional[int] = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 ) UpperCamelCase__ : Dict = [mem.copy() for i in range(6 )] UpperCamelCase__ : Any = [mem.copy() for i in range(6 )] UpperCamelCase__ : int = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 ) UpperCamelCase__ : Tuple = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 ) UpperCamelCase__ : int = VGroup(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0 ) UpperCamelCase__ : Optional[int] = Text('''CPU''', font_size=24 ) UpperCamelCase__ : Any = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__magic_name__ ) UpperCamelCase__ : Any = [mem.copy() for i in range(1 )] UpperCamelCase__ : Optional[int] = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 ) UpperCamelCase__ : Union[str, Any] = Text('''GPU''', font_size=24 ) UpperCamelCase__ : List[Any] = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ ) gpu.align_to(__magic_name__, __magic_name__ ) gpu.set_x(gpu.get_x() - 1 ) self.add(__magic_name__ ) UpperCamelCase__ : str = [mem.copy() for i in range(6 )] UpperCamelCase__ : Optional[int] = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 ) UpperCamelCase__ : Optional[int] = Text('''Model''', font_size=24 ) UpperCamelCase__ : int = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ ) model.move_to([3, -1.0, 0] ) self.play( Create(__magic_name__, run_time=1 ), Create(__magic_name__, run_time=1 ), Create(__magic_name__, run_time=1 ), ) UpperCamelCase__ : Optional[int] = MarkupText( f"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.", font_size=24, ) UpperCamelCase__ : List[str] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) UpperCamelCase__ : Union[str, Any] = MarkupText( f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model", font_size=18, ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(__magic_name__, run_time=2.5 ), Write(__magic_name__ ), Write(__magic_name__ ) ) self.add(__magic_name__ ) UpperCamelCase__ : Dict = [] UpperCamelCase__ : Any = [] UpperCamelCase__ : int = [] for i, rect in enumerate(__magic_name__ ): UpperCamelCase__ : Union[str, Any] = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0.0 ).set_fill(__magic_name__, opacity=0.7 ) cpu_target.move_to(__magic_name__ ) cpu_target.generate_target() UpperCamelCase__ : Tuple = 0.46 / 4 UpperCamelCase__ : Optional[Any] = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=__magic_name__ ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target, direction=__magic_name__, buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target, direction=__magic_name__, buff=0.0 ) cpu_targs.append(__magic_name__ ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__magic_name__ ) ) second_animations.append(MoveToTarget(__magic_name__, run_time=1.5 ) ) self.play(*__magic_name__ ) self.play(*__magic_name__ ) self.wait()
247
0
def a_ ( _A = 600851475143 ) -> int: """simple docstring""" try: snake_case__ = int(_A ) except (TypeError, ValueError): raise TypeError('Parameter n must be int or castable to int.' ) if n <= 0: raise ValueError('Parameter n must be greater than or equal to one.' ) snake_case__ = 2 snake_case__ = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 snake_case__ = i while n % i == 0: snake_case__ = n // i i += 1 return int(_A ) if __name__ == "__main__": print(f'''{solution() = }''')
307
def a_ ( _A , _A ) -> int: """simple docstring""" return 1 if input_a == input_a else 0 def a_ ( ) -> None: """simple docstring""" assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
307
1
"""simple docstring""" from __future__ import annotations lowercase__ : Dict = 1_0 def UpperCamelCase_ ( lowerCAmelCase__ : list[int] ) -> list[int]: """simple docstring""" lowerCAmelCase_ : str = 1 lowerCAmelCase_ : Optional[Any] = max(lowerCAmelCase__ ) while placement <= max_digit: # declare and initialize empty buckets lowerCAmelCase_ : list[list] = [[] for _ in range(lowerCAmelCase__ )] # split list_of_ints between the buckets for i in list_of_ints: lowerCAmelCase_ : Union[str, Any] = int((i / placement) % RADIX ) buckets[tmp].append(lowerCAmelCase__ ) # put each buckets' contents into list_of_ints lowerCAmelCase_ : List[str] = 0 for b in range(lowerCAmelCase__ ): for i in buckets[b]: lowerCAmelCase_ : Optional[int] = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
364
"""simple docstring""" from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class UpperCamelCase__ : """simple docstring""" pass
289
0
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets lowercase_ = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n" lowercase_ = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n" lowercase_ = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): """simple docstring""" def snake_case__ ( self : Optional[int] )-> List[str]: '''simple docstring''' if version.parse(scb.__version__ ) < version.parse('1.4.12' ): raise ImportWarning( 'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n' 'You can install it with `pip install "sacrebleu>=1.4.12"`.' ) return datasets.MetricInfo( description=_DESCRIPTION,citation=_CITATION,homepage='http://www.cs.umd.edu/~snover/tercom/',inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features( { 'predictions': datasets.Value('string',id='sequence' ), 'references': datasets.Sequence(datasets.Value('string',id='sequence' ),id='references' ), } ),codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'],reference_urls=[ 'https://github.com/jhclark/tercom', ],) def snake_case__ ( self : int,lowercase_ : Any,lowercase_ : Optional[Any],lowercase_ : bool = False,lowercase_ : bool = False,lowercase_ : bool = False,lowercase_ : bool = False,)-> Optional[Any]: '''simple docstring''' A__ = len(references[0] ) if any(len(lowercase_ ) != references_per_prediction for refs in references ): raise ValueError('Sacrebleu requires the same number of references for each prediction' ) A__ = [[refs[i] for refs in references] for i in range(lowercase_ )] A__ = TER( normalized=lowercase_,no_punct=lowercase_,asian_support=lowercase_,case_sensitive=lowercase_,) A__ = sb_ter.corpus_score(lowercase_,lowercase_ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
7
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> int: '''simple docstring''' A__ = 384 A__ = 7 if "tiny" in model_name: A__ = 96 A__ = (2, 2, 6, 2) A__ = (3, 6, 12, 24) elif "small" in model_name: A__ = 96 A__ = (2, 2, 18, 2) A__ = (3, 6, 12, 24) elif "base" in model_name: A__ = 128 A__ = (2, 2, 18, 2) A__ = (4, 8, 16, 32) A__ = 12 A__ = 512 elif "large" in model_name: A__ = 192 A__ = (2, 2, 18, 2) A__ = (6, 12, 24, 48) A__ = 12 A__ = 768 # set label information A__ = 150 A__ = 'huggingface/label-files' A__ = 'ade20k-id2label.json' A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) ) A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} A__ = {v: k for k, v in idalabel.items()} A__ = SwinConfig( embed_dim=SCREAMING_SNAKE_CASE__ , depths=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , window_size=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) A__ = UperNetConfig( backbone_config=SCREAMING_SNAKE_CASE__ , auxiliary_in_channels=SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , ) return config def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: '''simple docstring''' A__ = [] # fmt: off # stem rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') ) rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') ) rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') ) # decode head rename_keys.extend( [ ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'), ] ) # fmt: on return rename_keys def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]: '''simple docstring''' A__ = dct.pop(SCREAMING_SNAKE_CASE__ ) A__ = val def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any: '''simple docstring''' A__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): A__ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' ) A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[:dim, :] A__ = in_proj_bias[: dim] A__ = in_proj_weight[ dim : dim * 2, : ] A__ = in_proj_bias[ dim : dim * 2 ] A__ = in_proj_weight[ -dim :, : ] A__ = in_proj_bias[-dim :] # fmt: on def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' A__ , A__ = x.shape A__ = x.reshape(SCREAMING_SNAKE_CASE__ , 4 , in_channel // 4 ) A__ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]: '''simple docstring''' A__ , A__ = x.shape A__ = x.reshape(SCREAMING_SNAKE_CASE__ , in_channel // 4 , 4 ) A__ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]: '''simple docstring''' A__ = x.shape[0] A__ = x.reshape(4 , in_channel // 4 ) A__ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: '''simple docstring''' A__ = x.shape[0] A__ = x.reshape(in_channel // 4 , 4 ) A__ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' A__ = { 'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth', 'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth', 'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth', 'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth', } A__ = model_name_to_url[model_name] A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' , file_name=SCREAMING_SNAKE_CASE__ )[ 'state_dict' ] for name, param in state_dict.items(): print(SCREAMING_SNAKE_CASE__ , param.shape ) A__ = get_upernet_config(SCREAMING_SNAKE_CASE__ ) A__ = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE__ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): A__ = state_dict.pop(SCREAMING_SNAKE_CASE__ ) if "bn" in key: A__ = key.replace('bn' , 'batch_norm' ) A__ = val # rename keys A__ = create_rename_keys(SCREAMING_SNAKE_CASE__ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) read_in_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: A__ = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE__ ) if "norm" in key: A__ = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE__ ) model.load_state_dict(SCREAMING_SNAKE_CASE__ ) # verify on image A__ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('RGB' ) A__ = SegformerImageProcessor() A__ = processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values with torch.no_grad(): A__ = model(SCREAMING_SNAKE_CASE__ ) A__ = outputs.logits print(logits.shape ) print('First values of logits:' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": A__ = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": A__ = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": A__ = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": A__ = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print('Logits:' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(f'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: print(f'Pushing model and processor for {model_name} to hub' ) model.push_to_hub(f'openmmlab/{model_name}' ) processor.push_to_hub(f'openmmlab/{model_name}' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-swin-tiny", type=str, choices=[f"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]], help="Name of the Swin + UperNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="https://huggingface.co/datasets/infinityofspace/python_codestyles-mixed1-500/viewer/default/store_true", help="Whether or not to push the converted model to the 🤗 hub." ) lowercase_ = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
7
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->list: '''simple docstring''' if n_term == "": return [] a : list = [] for temp in range(int(_lowercase ) ): series.append(F"""1/{temp + 1}""" if series else "1" ) return series if __name__ == "__main__": a : Tuple = input('''Enter the last number (nth term) of the Harmonic Series''') print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''') print(harmonic_series(nth_term))
79
"""simple docstring""" import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets a : Tuple = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' a : List[str] = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' a : List[Any] = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def __a ( self ) -> Dict: if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , ) -> Any: a : Optional[int] = len(references[0] ) if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) a : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )] a : Union[str, Any] = TER( normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , ) a : Optional[Any] = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
79
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowercase : int =logging.get_logger(__name__) def lowerCAmelCase_ ( _lowercase : List[Any] , _lowercase : Optional[int]=False , _lowercase : List[str]=False , _lowercase : Any=False) -> Optional[Any]: """simple docstring""" a__ : Tuple = [] for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''')) rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''')) rename_keys.append( (F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''')) rename_keys.append( (F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''')) rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''')) rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''')) rename_keys.append( (F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''')) rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''')) rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''')) rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''')) # embeddings rename_keys.extend( [ # text embeddings ("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""), ( """text_embeddings.position_embeddings.weight""", """vilt.embeddings.text_embeddings.position_embeddings.weight""", ), ("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""), ( """text_embeddings.token_type_embeddings.weight""", """vilt.embeddings.text_embeddings.token_type_embeddings.weight""", ), ("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""), ("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""), # patch embeddings ("""transformer.cls_token""", """vilt.embeddings.cls_token"""), ("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""), ("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""), ("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""), # token type embeddings ("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""), ]) # final layernorm + pooler rename_keys.extend( [ ("""transformer.norm.weight""", """vilt.layernorm.weight"""), ("""transformer.norm.bias""", """vilt.layernorm.bias"""), ("""pooler.dense.weight""", """vilt.pooler.dense.weight"""), ("""pooler.dense.bias""", """vilt.pooler.dense.bias"""), ]) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ("""vqa_classifier.0.weight""", """classifier.0.weight"""), ("""vqa_classifier.0.bias""", """classifier.0.bias"""), ("""vqa_classifier.1.weight""", """classifier.1.weight"""), ("""vqa_classifier.1.bias""", """classifier.1.bias"""), ("""vqa_classifier.3.weight""", """classifier.3.weight"""), ("""vqa_classifier.3.bias""", """classifier.3.bias"""), ]) elif nlvr_model: # classification head rename_keys.extend( [ ("""nlvr2_classifier.0.weight""", """classifier.0.weight"""), ("""nlvr2_classifier.0.bias""", """classifier.0.bias"""), ("""nlvr2_classifier.1.weight""", """classifier.1.weight"""), ("""nlvr2_classifier.1.bias""", """classifier.1.bias"""), ("""nlvr2_classifier.3.weight""", """classifier.3.weight"""), ("""nlvr2_classifier.3.bias""", """classifier.3.bias"""), ]) else: pass return rename_keys def lowerCAmelCase_ ( _lowercase : Union[str, Any] , _lowercase : Optional[int]) -> List[str]: """simple docstring""" for i in range(config.num_hidden_layers): a__ : Optional[int] = 'vilt.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) a__ : Dict = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''') a__ : Tuple = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''') # next, add query, keys and values (in that order) to the state dict a__ : Optional[Any] = in_proj_weight[ : config.hidden_size, : ] a__ : List[Any] = in_proj_bias[: config.hidden_size] a__ : int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] a__ : Union[str, Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] a__ : Tuple = in_proj_weight[ -config.hidden_size :, : ] a__ : Optional[Any] = in_proj_bias[-config.hidden_size :] def lowerCAmelCase_ ( _lowercase : str) -> Optional[Any]: """simple docstring""" a__ : Tuple = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(__a , __a) def lowerCAmelCase_ ( _lowercase : int , _lowercase : Dict , _lowercase : Optional[int]) -> int: """simple docstring""" a__ : Optional[int] = dct.pop(__a) a__ : Optional[int] = val @torch.no_grad() def lowerCAmelCase_ ( _lowercase : int , _lowercase : str) -> str: """simple docstring""" a__ : Dict = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=__a) a__ : List[Any] = False a__ : List[Any] = False a__ : List[str] = False a__ : Optional[int] = False if "vqa" in checkpoint_url: a__ : Optional[Any] = True a__ : List[str] = 3129 a__ : Any = 'huggingface/label-files' a__ : Optional[Any] = 'vqa2-id2label.json' a__ : List[Any] = json.load(open(hf_hub_download(__a , __a , repo_type="""dataset""") , """r""")) a__ : List[str] = {int(__a): v for k, v in idalabel.items()} a__ : Union[str, Any] = idalabel a__ : List[Any] = {v: k for k, v in idalabel.items()} a__ : List[Any] = ViltForQuestionAnswering(__a) elif "nlvr" in checkpoint_url: a__ : str = True a__ : List[Any] = 2 a__ : List[str] = {0: 'False', 1: 'True'} a__ : Optional[Any] = {v: k for k, v in config.idalabel.items()} a__ : Tuple = 3 a__ : str = ViltForImagesAndTextClassification(__a) elif "irtr" in checkpoint_url: a__ : Optional[Any] = True a__ : Union[str, Any] = ViltForImageAndTextRetrieval(__a) elif "mlm_itm" in checkpoint_url: a__ : str = True a__ : int = ViltForMaskedLM(__a) else: raise ValueError("""Unknown model type""") # load state_dict of original model, remove and rename some keys a__ : Optional[int] = torch.hub.load_state_dict_from_url(__a , map_location="""cpu""")['state_dict'] a__ : List[str] = create_rename_keys(__a , __a , __a , __a) for src, dest in rename_keys: rename_key(__a , __a , __a) read_in_q_k_v(__a , __a) if mlm_model or irtr_model: a__ : List[str] = ['itm_score.fc.weight', 'itm_score.fc.bias'] for k in ignore_keys: state_dict.pop(__a , __a) # load state dict into HuggingFace model model.eval() if mlm_model: a__ : List[Any] = model.load_state_dict(__a , strict=__a) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(__a) # Define processor a__ : Union[str, Any] = ViltImageProcessor(size=384) a__ : Any = BertTokenizer.from_pretrained("""bert-base-uncased""") a__ : Optional[int] = ViltProcessor(__a , __a) # Forward pass on example inputs (image + text) if nlvr_model: a__ : Optional[Any] = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=__a).raw) a__ : Union[str, Any] = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=__a).raw) a__ : Any = ( 'The left image contains twice the number of dogs as the right image, and at least two dogs in total are' ' standing.' ) a__ : Dict = processor(__a , __a , return_tensors="""pt""") a__ : List[str] = processor(__a , __a , return_tensors="""pt""") a__ : Dict = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: a__ : Dict = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=__a).raw) if mlm_model: a__ : Union[str, Any] = 'a bunch of [MASK] laying on a [MASK].' else: a__ : Optional[int] = 'How many cats are there?' a__ : List[str] = processor(__a , __a , return_tensors="""pt""") a__ : int = model(**__a) # Verify outputs if mlm_model: a__ : List[str] = torch.Size([1, 11, 3_0522]) a__ : Any = torch.tensor([-12.5061, -12.5123, -12.5174]) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , __a , atol=1e-4) # verify masked token prediction equals "cats" a__ : str = outputs.logits[0, 4, :].argmax(-1).item() assert tokenizer.decode([predicted_id]) == "cats" elif vqa_model: a__ : Tuple = torch.Size([1, 3129]) a__ : Optional[int] = torch.tensor([-15.9495, -18.1472, -10.3041]) assert torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , __a , atol=1e-4) # verify vqa prediction equals "2" a__ : Optional[Any] = outputs.logits.argmax(-1).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: a__ : str = torch.Size([1, 2]) a__ : Dict = torch.tensor([-2.8721, 2.1291]) assert torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4) assert outputs.logits.shape == expected_shape Path(__a).mkdir(exist_ok=__a) print(F'''Saving model and processor to {pytorch_dump_folder_path}''') model.save_pretrained(__a) processor.save_pretrained(__a) if __name__ == "__main__": _lowercase : int =argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt", type=str, help="URL of the checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) _lowercase : Tuple =parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
170
from __future__ import annotations from collections import namedtuple def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): snake_case_ : Any = namedtuple('result' , 'name value' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('Only one argument must be 0' ) elif power < 0: raise ValueError( 'Power cannot be negative in any electrical/electronics system' ) elif voltage == 0: return result('voltage' , power / current ) elif current == 0: return result('current' , power / voltage ) elif power == 0: return result('power' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
327
0
'''simple docstring''' import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = """""" _SCREAMING_SNAKE_CASE = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _SCREAMING_SNAKE_CASE = None # compression type in fsspec. ex: "gzip" _SCREAMING_SNAKE_CASE = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : int , UpperCamelCase__ : str = "" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , **UpperCamelCase__ : List[str] ): """simple docstring""" super().__init__(self , **UpperCamelCase__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode UpperCamelCase = fsspec.open( UpperCamelCase__ , mode='rb' , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={ 'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459 'trust_env': True, # Enable reading proxy env variables. **(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) UpperCamelCase = os.path.basename(self.file.path.split('::' )[0] ) UpperCamelCase = ( self.compressed_name[: self.compressed_name.rindex('.' )] if '.' in self.compressed_name else self.compressed_name ) UpperCamelCase = None @classmethod def A ( cls : int , UpperCamelCase__ : List[str] ): """simple docstring""" return super()._strip_protocol(UpperCamelCase__ ).lstrip('/' ) def A ( self : Optional[int] ): """simple docstring""" if self.dir_cache is None: UpperCamelCase = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name} UpperCamelCase = {f['name']: f} def A ( self : Any , UpperCamelCase__ : str ): """simple docstring""" return self.file.open().read() def A ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : Dict , ): """simple docstring""" UpperCamelCase = self._strip_protocol(UpperCamelCase__ ) if mode != "rb": raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" ) return self.file.open() class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = """bz2""" _SCREAMING_SNAKE_CASE = """bz2""" _SCREAMING_SNAKE_CASE = """.bz2""" class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = """gzip""" _SCREAMING_SNAKE_CASE = """gzip""" _SCREAMING_SNAKE_CASE = """.gz""" class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = """lz4""" _SCREAMING_SNAKE_CASE = """lz4""" _SCREAMING_SNAKE_CASE = """.lz4""" class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = """xz""" _SCREAMING_SNAKE_CASE = """xz""" _SCREAMING_SNAKE_CASE = """.xz""" class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = """zstd""" _SCREAMING_SNAKE_CASE = """zstd""" _SCREAMING_SNAKE_CASE = """.zst""" def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , UpperCamelCase__ : int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__ : Dict , ): """simple docstring""" super().__init__( fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 UpperCamelCase = self.file.__enter__ class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int] ): """simple docstring""" UpperCamelCase = file_ def __enter__( self : Tuple ): """simple docstring""" self._file.__enter__() return self def __exit__( self : int , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Tuple ): """simple docstring""" self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__ ) def __iter__( self : Dict ): """simple docstring""" return iter(self._file ) def A ( self : str ): """simple docstring""" return next(self._file ) def __getattr__( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] ): """simple docstring""" return getattr(self._file , UpperCamelCase__ ) def fixed_enter(*UpperCamelCase__ : List[str] , **UpperCamelCase__ : Any ): return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__ ) ) UpperCamelCase = fixed_enter
249
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _SCREAMING_SNAKE_CASE = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def A ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ): """simple docstring""" UpperCamelCase = TextaTextGenerationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) return generator, ["Something to write", "Something else"] def A ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ): """simple docstring""" UpperCamelCase = generator('Something there' ) self.assertEqual(UpperCamelCase__ , [{'generated_text': ANY(UpperCamelCase__ )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) ) UpperCamelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=UpperCamelCase__ ) self.assertEqual( UpperCamelCase__ , [ [{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}], [{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}], ] , ) UpperCamelCase = generator( ['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase__ ) self.assertEqual( UpperCamelCase__ , [ [{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}], [{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}], ] , ) with self.assertRaises(UpperCamelCase__ ): generator(4 ) @require_torch def A ( self : Dict ): """simple docstring""" UpperCamelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' ) # do_sample=False necessary for reproducibility UpperCamelCase = generator('Something there' , do_sample=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , [{'generated_text': ''}] ) UpperCamelCase = 3 UpperCamelCase = generator( 'Something there' , num_return_sequences=UpperCamelCase__ , num_beams=UpperCamelCase__ , ) UpperCamelCase = [ {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': ''}, ] self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = generator('This is a test' , do_sample=UpperCamelCase__ , num_return_sequences=2 , return_tensors=UpperCamelCase__ ) self.assertEqual( UpperCamelCase__ , [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ] , ) UpperCamelCase = generator.model.config.eos_token_id UpperCamelCase = '<pad>' UpperCamelCase = generator( ['This is a test', 'This is a second test'] , do_sample=UpperCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase__ , ) self.assertEqual( UpperCamelCase__ , [ [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], ] , ) @require_tf def A ( self : str ): """simple docstring""" UpperCamelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' ) # do_sample=False necessary for reproducibility UpperCamelCase = generator('Something there' , do_sample=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , [{'generated_text': ''}] )
249
1
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class UpperCAmelCase__ ( __UpperCamelCase ): '''simple docstring''' UpperCamelCase = """char""" UpperCamelCase = """bpe""" UpperCamelCase = """wp""" __A =(DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class UpperCAmelCase__ ( __UpperCamelCase ): '''simple docstring''' UpperCamelCase = ["""image_processor""", """char_tokenizer"""] UpperCamelCase = """ViTImageProcessor""" UpperCamelCase = """MgpstrTokenizer""" def __init__( self : Tuple , a_ : List[Any]=None , a_ : List[Any]=None , **a_ : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[str] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a_ , ) __UpperCAmelCase : Optional[Any] = kwargs.pop('''feature_extractor''' ) __UpperCAmelCase : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) __UpperCAmelCase : Tuple = tokenizer __UpperCAmelCase : str = AutoTokenizer.from_pretrained('''gpt2''' ) __UpperCAmelCase : str = AutoTokenizer.from_pretrained('''bert-base-uncased''' ) super().__init__(a_ , a_ ) def __call__( self : Tuple , a_ : Any=None , a_ : Optional[int]=None , a_ : Union[str, Any]=None , **a_ : int ): '''simple docstring''' if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: __UpperCAmelCase : List[Any] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None: __UpperCAmelCase : List[Any] = self.char_tokenizer(a_ , return_tensors=a_ , **a_ ) if text is None: return inputs elif images is None: return encodings else: __UpperCAmelCase : int = encodings['''input_ids'''] return inputs def snake_case__ ( self : Union[str, Any] , a_ : int ): '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = sequences __UpperCAmelCase : Tuple = char_preds.size(0 ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._decode_helper(a_ , '''char''' ) __UpperCAmelCase , __UpperCAmelCase : List[str] = self._decode_helper(a_ , '''bpe''' ) __UpperCAmelCase , __UpperCAmelCase : List[str] = self._decode_helper(a_ , '''wp''' ) __UpperCAmelCase : int = [] __UpperCAmelCase : Optional[int] = [] for i in range(a_ ): __UpperCAmelCase : List[str] = [char_scores[i], bpe_scores[i], wp_scores[i]] __UpperCAmelCase : Tuple = [char_strs[i], bpe_strs[i], wp_strs[i]] __UpperCAmelCase : Optional[Any] = scores.index(max(a_ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) __UpperCAmelCase : Union[str, Any] = {} __UpperCAmelCase : Optional[Any] = final_strs __UpperCAmelCase : int = final_scores __UpperCAmelCase : Union[str, Any] = char_strs __UpperCAmelCase : Any = bpe_strs __UpperCAmelCase : Optional[int] = wp_strs return out def snake_case__ ( self : Dict , a_ : Tuple , a_ : Optional[int] ): '''simple docstring''' if format == DecodeType.CHARACTER: __UpperCAmelCase : Optional[Any] = self.char_decode __UpperCAmelCase : Tuple = 1 __UpperCAmelCase : List[str] = '''[s]''' elif format == DecodeType.BPE: __UpperCAmelCase : Optional[int] = self.bpe_decode __UpperCAmelCase : List[Any] = 2 __UpperCAmelCase : List[Any] = '''#''' elif format == DecodeType.WORDPIECE: __UpperCAmelCase : Tuple = self.wp_decode __UpperCAmelCase : Tuple = 1_02 __UpperCAmelCase : str = '''[SEP]''' else: raise ValueError(F'Format {format} is not supported.' ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = [], [] __UpperCAmelCase : Tuple = pred_logits.size(0 ) __UpperCAmelCase : int = pred_logits.size(1 ) __UpperCAmelCase , __UpperCAmelCase : Tuple = pred_logits.topk(1 , dim=-1 , largest=a_ , sorted=a_ ) __UpperCAmelCase : Any = preds_index.view(-1 , a_ )[:, 1:] __UpperCAmelCase : Optional[Any] = decoder(a_ ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = torch.nn.functional.softmax(a_ , dim=2 ).max(dim=2 ) __UpperCAmelCase : List[str] = preds_max_prob[:, 1:] for index in range(a_ ): __UpperCAmelCase : Union[str, Any] = preds_str[index].find(a_ ) __UpperCAmelCase : Dict = preds_str[index][:pred_eos] __UpperCAmelCase : Union[str, Any] = preds_index[index].cpu().tolist() __UpperCAmelCase : Optional[int] = pred_index.index(a_ ) if eos_token in pred_index else -1 __UpperCAmelCase : Union[str, Any] = preds_max_prob[index][: pred_eos_index + 1] __UpperCAmelCase : List[Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(a_ ) conf_scores.append(a_ ) return dec_strs, conf_scores def snake_case__ ( self : List[str] , a_ : int ): '''simple docstring''' __UpperCAmelCase : int = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(a_ )] return decode_strs def snake_case__ ( self : str , a_ : Tuple ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(a_ ) def snake_case__ ( self : Optional[int] , a_ : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(a_ )] return decode_strs
226
import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def a ( _UpperCAmelCase : Any , _UpperCAmelCase : Any=None ): '''simple docstring''' __UpperCAmelCase : Optional[int] = None if token is not None: __UpperCAmelCase : Optional[Any] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'Bearer {token}'} __UpperCAmelCase : List[str] = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100' __UpperCAmelCase : Union[str, Any] = requests.get(_UpperCAmelCase , headers=_UpperCAmelCase ).json() __UpperCAmelCase : Union[str, Any] = {} try: job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) __UpperCAmelCase : List[Any] = math.ceil((result['''total_count'''] - 1_00) / 1_00 ) for i in range(_UpperCAmelCase ): __UpperCAmelCase : Optional[Any] = requests.get(url + f'&page={i + 2}' , headers=_UpperCAmelCase ).json() job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) return job_links except Exception: print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' ) return {} def a ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str]=None ): '''simple docstring''' __UpperCAmelCase : List[Any] = None if token is not None: __UpperCAmelCase : Tuple = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'Bearer {token}'} __UpperCAmelCase : int = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100' __UpperCAmelCase : str = requests.get(_UpperCAmelCase , headers=_UpperCAmelCase ).json() __UpperCAmelCase : List[Any] = {} try: artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} ) __UpperCAmelCase : str = math.ceil((result['''total_count'''] - 1_00) / 1_00 ) for i in range(_UpperCAmelCase ): __UpperCAmelCase : Dict = requests.get(url + f'&page={i + 2}' , headers=_UpperCAmelCase ).json() artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} ) return artifacts except Exception: print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' ) return {} def a ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int ): '''simple docstring''' __UpperCAmelCase : str = None if token is not None: __UpperCAmelCase : str = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'Bearer {token}'} __UpperCAmelCase : int = requests.get(_UpperCAmelCase , headers=_UpperCAmelCase , allow_redirects=_UpperCAmelCase ) __UpperCAmelCase : Optional[int] = result.headers['''Location'''] __UpperCAmelCase : Optional[int] = requests.get(_UpperCAmelCase , allow_redirects=_UpperCAmelCase ) __UpperCAmelCase : int = os.path.join(_UpperCAmelCase , f'{artifact_name}.zip' ) with open(_UpperCAmelCase , '''wb''' ) as fp: fp.write(response.content ) def a ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[str]=None ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [] __UpperCAmelCase : int = [] __UpperCAmelCase : List[Any] = None with zipfile.ZipFile(_UpperCAmelCase ) as z: for filename in z.namelist(): if not os.path.isdir(_UpperCAmelCase ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(_UpperCAmelCase ) as f: for line in f: __UpperCAmelCase : Tuple = line.decode('''UTF-8''' ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs __UpperCAmelCase : int = line[: line.index(''': ''' )] __UpperCAmelCase : Any = line[line.index(''': ''' ) + len(''': ''' ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ): # `test` is the test method that failed __UpperCAmelCase : str = line[len('''FAILED ''' ) :] failed_tests.append(_UpperCAmelCase ) elif filename == "job_name.txt": __UpperCAmelCase : Tuple = line if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError( f'`errors` and `failed_tests` should have the same number of elements. Got {len(_UpperCAmelCase )} for `errors` ' f'and {len(_UpperCAmelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some' ''' problem.''' ) __UpperCAmelCase : str = None if job_name and job_links: __UpperCAmelCase : Any = job_links.get(_UpperCAmelCase , _UpperCAmelCase ) # A list with elements of the form (line of error, error, failed test) __UpperCAmelCase : Dict = [x + [y] + [job_link] for x, y in zip(_UpperCAmelCase , _UpperCAmelCase )] return result def a ( _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=None ): '''simple docstring''' __UpperCAmelCase : str = [] __UpperCAmelCase : str = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for p in os.listdir(_UpperCAmelCase ) if p.endswith('''.zip''' )] for p in paths: errors.extend(get_errors_from_single_artifact(_UpperCAmelCase , job_links=_UpperCAmelCase ) ) return errors def a ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any]=None ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = Counter() counter.update([x[1] for x in logs] ) __UpperCAmelCase : Dict = counter.most_common() __UpperCAmelCase : int = {} for error, count in counts: if error_filter is None or error not in error_filter: __UpperCAmelCase : Tuple = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]} __UpperCAmelCase : Union[str, Any] = dict(sorted(r.items() , key=lambda _UpperCAmelCase : item[1]["count"] , reverse=_UpperCAmelCase ) ) return r def a ( _UpperCAmelCase : Dict ): '''simple docstring''' __UpperCAmelCase : List[str] = test.split('''::''' )[0] if test.startswith('''tests/models/''' ): __UpperCAmelCase : Optional[int] = test.split('''/''' )[2] else: __UpperCAmelCase : Optional[Any] = None return test def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int=None ): '''simple docstring''' __UpperCAmelCase : Dict = [(x[0], x[1], get_model(x[2] )) for x in logs] __UpperCAmelCase : int = [x for x in logs if x[2] is not None] __UpperCAmelCase : Tuple = {x[2] for x in logs} __UpperCAmelCase : List[str] = {} for test in tests: __UpperCAmelCase : List[str] = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) __UpperCAmelCase : Tuple = counter.most_common() __UpperCAmelCase : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} __UpperCAmelCase : List[Any] = sum(error_counts.values() ) if n_errors > 0: __UpperCAmelCase : Tuple = {'''count''': n_errors, '''errors''': error_counts} __UpperCAmelCase : Optional[int] = dict(sorted(r.items() , key=lambda _UpperCAmelCase : item[1]["count"] , reverse=_UpperCAmelCase ) ) return r def a ( _UpperCAmelCase : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Any = '''| no. | error | status |''' __UpperCAmelCase : List[str] = '''|-:|:-|:-|''' __UpperCAmelCase : Union[str, Any] = [header, sep] for error in reduced_by_error: __UpperCAmelCase : Tuple = reduced_by_error[error]['''count'''] __UpperCAmelCase : int = f'| {count} | {error[:1_00]} | |' lines.append(_UpperCAmelCase ) return "\n".join(_UpperCAmelCase ) def a ( _UpperCAmelCase : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = '''| model | no. of errors | major error | count |''' __UpperCAmelCase : List[Any] = '''|-:|-:|-:|-:|''' __UpperCAmelCase : Tuple = [header, sep] for model in reduced_by_model: __UpperCAmelCase : List[Any] = reduced_by_model[model]['''count'''] __UpperCAmelCase , __UpperCAmelCase : int = list(reduced_by_model[model]['''errors'''].items() )[0] __UpperCAmelCase : Dict = f'| {model} | {count} | {error[:60]} | {_count} |' lines.append(_UpperCAmelCase ) return "\n".join(_UpperCAmelCase ) if __name__ == "__main__": __A =argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") __A =parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) __A =get_job_links(args.workflow_run_id, token=args.token) __A ={} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: __A =k.find(" / ") __A =k[index + len(" / ") :] __A =v with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) __A =get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) __A =get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error __A =Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors __A =counter.most_common(3_0) for item in most_common: print(item) with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) __A =reduce_by_error(errors) __A =reduce_by_model(errors) __A =make_github_table(reduced_by_error) __A =make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp: fp.write(sa) with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp: fp.write(sa)
226
1
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
60
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __magic_name__ ( unittest.TestCase ): def __init__( self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=18 , _lowercase=30 , _lowercase=400 , _lowercase=True , _lowercase=None , _lowercase=True , )-> Optional[int]: UpperCamelCase_ = size if size is not None else {"height": 18, "width": 18} UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = num_channels UpperCamelCase_ = image_size UpperCamelCase_ = min_resolution UpperCamelCase_ = max_resolution UpperCamelCase_ = do_resize UpperCamelCase_ = size UpperCamelCase_ = apply_ocr def UpperCAmelCase_ ( self )-> str: return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __magic_name__ ( snake_case , unittest.TestCase ): UpperCamelCase_ :Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def UpperCAmelCase_ ( self )-> Any: UpperCamelCase_ = LayoutLMvaImageProcessingTester(self ) @property def UpperCAmelCase_ ( self )-> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self )-> Dict: UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowercase , "do_resize" ) ) self.assertTrue(hasattr(_lowercase , "size" ) ) self.assertTrue(hasattr(_lowercase , "apply_ocr" ) ) def UpperCAmelCase_ ( self )-> List[Any]: UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def UpperCAmelCase_ ( self )-> Any: pass def UpperCAmelCase_ ( self )-> List[str]: # Initialize image_processing UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase ) for image in image_inputs: self.assertIsInstance(_lowercase , Image.Image ) # Test not batched input UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) self.assertIsInstance(encoding.words , _lowercase ) self.assertIsInstance(encoding.boxes , _lowercase ) # Test batched UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def UpperCAmelCase_ ( self )-> str: # Initialize image_processing UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase ) for image in image_inputs: self.assertIsInstance(_lowercase , np.ndarray ) # Test not batched input UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def UpperCAmelCase_ ( self )-> List[str]: # Initialize image_processing UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase ) for image in image_inputs: self.assertIsInstance(_lowercase , torch.Tensor ) # Test not batched input UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def UpperCAmelCase_ ( self )-> Any: # with apply_OCR = True UpperCamelCase_ = LayoutLMvaImageProcessor() from datasets import load_dataset UpperCamelCase_ = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" ) UpperCamelCase_ = Image.open(ds[0]["file"] ).convert("RGB" ) UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 UpperCamelCase_ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231 UpperCamelCase_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , _lowercase ) self.assertListEqual(encoding.boxes , _lowercase ) # with apply_OCR = False UpperCamelCase_ = LayoutLMvaImageProcessor(apply_ocr=_lowercase ) UpperCamelCase_ = image_processing(_lowercase , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
60
1
"""simple docstring""" import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): # A mock response for an HTTP head request to emulate server down lowerCAmelCase_ : Union[str, Any] = mock.Mock() lowerCAmelCase_ : Dict = 5_0_0 lowerCAmelCase_ : List[str] = {} lowerCAmelCase_ : Dict = HTTPError lowerCAmelCase_ : Any = {} # Download this model to make sure it's in the cache. lowerCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=__UpperCamelCase ) as mock_head: lowerCAmelCase_ : Any = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): # A mock response for an HTTP head request to emulate server down lowerCAmelCase_ : List[str] = mock.Mock() lowerCAmelCase_ : Dict = 5_0_0 lowerCAmelCase_ : List[str] = {} lowerCAmelCase_ : List[Any] = HTTPError lowerCAmelCase_ : Optional[Any] = {} # Download this model to make sure it's in the cache. lowerCAmelCase_ : int = GPTaTokenizerFast.from_pretrained('gpt2' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=__UpperCamelCase ) as mock_head: lowerCAmelCase_ : List[str] = GPTaTokenizerFast.from_pretrained('gpt2' ) # This check we did call the fake head request mock_head.assert_called() def SCREAMING_SNAKE_CASE__ ( self : Tuple ): # This test is for deprecated behavior and can be removed in v5 try: lowerCAmelCase_ : str = tempfile.mktemp() with open(__UpperCamelCase , 'wb' ) as f: http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , __UpperCamelCase ) lowerCAmelCase_ : Tuple = AlbertTokenizer.from_pretrained(__UpperCamelCase ) finally: os.remove(__UpperCamelCase ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile('tokenizer.json' ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open('tokenizer.json' , 'wb' ) as f: http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , __UpperCamelCase ) lowerCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size , 1_0_0_0 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove('tokenizer.json' ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): # This test is for deprecated behavior and can be removed in v5 lowerCAmelCase_ : Optional[Any] = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ) @is_staging_test class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""] @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Tuple ): lowerCAmelCase_ : int = TOKEN HfFolder.save_token(__UpperCamelCase ) @classmethod def SCREAMING_SNAKE_CASE__ ( cls : Any ): try: delete_repo(token=cls._token , repo_id='test-tokenizer' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' ) except HTTPError: pass def SCREAMING_SNAKE_CASE__ ( self : int ): with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase_ : Any = os.path.join(__UpperCamelCase , 'vocab.txt' ) with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) lowerCAmelCase_ : Union[str, Any] = BertTokenizer(__UpperCamelCase ) tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token ) lowerCAmelCase_ : Dict = BertTokenizer.from_pretrained(F"{USER}/test-tokenizer" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id='test-tokenizer' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__UpperCamelCase , repo_id='test-tokenizer' , push_to_hub=__UpperCamelCase , use_auth_token=self._token ) lowerCAmelCase_ : Dict = BertTokenizer.from_pretrained(F"{USER}/test-tokenizer" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase_ : List[Any] = os.path.join(__UpperCamelCase , 'vocab.txt' ) with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) lowerCAmelCase_ : Dict = BertTokenizer(__UpperCamelCase ) tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token ) lowerCAmelCase_ : Any = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( __UpperCamelCase , repo_id='valid_org/test-tokenizer-org' , push_to_hub=__UpperCamelCase , use_auth_token=self._token ) lowerCAmelCase_ : int = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) @require_tokenizers def SCREAMING_SNAKE_CASE__ ( self : str ): CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase_ : Union[str, Any] = os.path.join(__UpperCamelCase , 'vocab.txt' ) with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) lowerCAmelCase_ : Union[str, Any] = CustomTokenizer(__UpperCamelCase ) # No fast custom tokenizer tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token ) lowerCAmelCase_ : str = AutoTokenizer.from_pretrained(F"{USER}/test-dynamic-tokenizer" , trust_remote_code=__UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase_ : List[str] = os.path.join(__UpperCamelCase , 'vocab.txt' ) with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) ) lowerCAmelCase_ : Optional[int] = BertTokenizerFast.from_pretrained(__UpperCamelCase ) bert_tokenizer.save_pretrained(__UpperCamelCase ) lowerCAmelCase_ : int = CustomTokenizerFast.from_pretrained(__UpperCamelCase ) tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token ) lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(F"{USER}/test-dynamic-tokenizer" , trust_remote_code=__UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' ) lowerCAmelCase_ : Tuple = AutoTokenizer.from_pretrained( F"{USER}/test-dynamic-tokenizer" , use_fast=__UpperCamelCase , trust_remote_code=__UpperCamelCase ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' ) class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self : str ): lowerCAmelCase_ : Optional[int] = Trie() trie.add('Hello 友達' ) self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} ) trie.add('Hello' ) trie.data self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowerCAmelCase_ : str = Trie() self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] ) trie.add('[CLS]' ) trie.add('extra_id_1' ) trie.add('extra_id_100' ) self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] ) def SCREAMING_SNAKE_CASE__ ( self : str ): lowerCAmelCase_ : Dict = Trie() trie.add('A' ) self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] ) self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] ) def SCREAMING_SNAKE_CASE__ ( self : Any ): lowerCAmelCase_ : Optional[Any] = Trie() trie.add('TOKEN]' ) trie.add('[SPECIAL_TOKEN]' ) self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowerCAmelCase_ : Optional[Any] = Trie() trie.add('A' ) trie.add('P' ) trie.add('[SPECIAL_TOKEN]' ) self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): lowerCAmelCase_ : List[str] = Trie() trie.add('AB' ) trie.add('B' ) trie.add('C' ) self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] ) def SCREAMING_SNAKE_CASE__ ( self : str ): lowerCAmelCase_ : int = Trie() trie.add('ABC' ) trie.add('B' ) trie.add('CD' ) self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): # Even if the offsets are wrong, we necessarily output correct string # parts. lowerCAmelCase_ : Optional[int] = Trie() lowerCAmelCase_ : Any = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] ) self.assertEqual(__UpperCamelCase , ['AB', 'C'] )
224
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _UpperCAmelCase ( lowercase_ , unittest.TestCase ): UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Union[str, Any]=0 ): A = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__UpperCamelCase ) ) A = np.random.RandomState(__UpperCamelCase ) A = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def lowerCamelCase ( self :Any ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Dict ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Optional[Any] ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) # warmup pass to apply optimizations A = pipe(**self.get_dummy_inputs() ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Dict ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Optional[Any] ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self :Union[str, Any] ): A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = self.get_dummy_inputs() A = pipe(**__UpperCamelCase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): @property def lowerCamelCase ( self :Optional[Any] ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowerCamelCase ( self :Optional[int] ): A = ort.SessionOptions() A = False return options def lowerCamelCase ( self :Dict ): A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) A = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = "A fantasy landscape, trending on artstation" A = np.random.RandomState(0 ) A = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , ) A = output.images A = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def lowerCamelCase ( self :Any ): A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) A = init_image.resize((7_68, 5_12) ) A = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = "A fantasy landscape, trending on artstation" A = np.random.RandomState(0 ) A = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , ) A = output.images A = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
292
0
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _a : str = logging.get_logger(__name__) _a : Tuple = { 'google/pix2struct-textcaps-base': ( 'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json' ), } class __A ( SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Optional[Any] = "pix2struct_text_model" _UpperCamelCase : Optional[int] = ["past_key_values"] _UpperCamelCase : Union[str, Any] = { "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , a__=50244 , a__=768 , a__=64 , a__=2048 , a__=12 , a__=12 , a__=32 , a__=128 , a__=0.1 , a__=1e-6 , a__=1.0 , a__="gelu_new" , a__=0 , a__=False , a__=0 , a__=1 , a__=False , a__=True , **a__ , ): _lowerCAmelCase : Dict = vocab_size _lowerCAmelCase : str = hidden_size _lowerCAmelCase : Optional[Any] = d_kv _lowerCAmelCase : Tuple = d_ff _lowerCAmelCase : Any = num_layers _lowerCAmelCase : Tuple = num_heads _lowerCAmelCase : Union[str, Any] = relative_attention_num_buckets _lowerCAmelCase : List[Any] = relative_attention_max_distance _lowerCAmelCase : Union[str, Any] = dropout_rate _lowerCAmelCase : Union[str, Any] = layer_norm_epsilon _lowerCAmelCase : Optional[int] = initializer_factor _lowerCAmelCase : Optional[Any] = use_cache _lowerCAmelCase : Tuple = eos_token_id _lowerCAmelCase : Tuple = decoder_start_token_id # for backwards compatibility _lowerCAmelCase : Dict = dense_act_fn super().__init__( pad_token_id=a__ , eos_token_id=a__ , decoder_start_token_id=a__ , tie_word_embeddings=a__ , is_decoder=a__ , **a__ , ) @classmethod def __A ( cls , a__ , **a__ ): cls._set_token_in_kwargs(a__ ) _lowerCAmelCase , _lowerCAmelCase : str = cls.get_config_dict(a__ , **a__ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": _lowerCAmelCase : List[str] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"You are using a model of type {config_dict['model_type']} to instantiate a model of type " F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a__ , **a__ ) class __A ( SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : List[str] = "pix2struct_vision_model" def __init__( self , a__=768 , a__=768 , a__=2048 , a__=64 , a__=12 , a__=12 , a__="gelu_new" , a__=1e-6 , a__=0.0 , a__=0.0 , a__=1e-10 , a__=1.0 , a__=4096 , a__=32 , a__=128 , **a__ , ): super().__init__(**a__ ) _lowerCAmelCase : int = hidden_size _lowerCAmelCase : Any = patch_embed_hidden_size _lowerCAmelCase : int = d_ff _lowerCAmelCase : str = dropout_rate _lowerCAmelCase : int = num_hidden_layers _lowerCAmelCase : Any = num_attention_heads _lowerCAmelCase : Optional[int] = initializer_range _lowerCAmelCase : List[str] = initializer_factor _lowerCAmelCase : Dict = attention_dropout _lowerCAmelCase : str = layer_norm_eps _lowerCAmelCase : Optional[int] = dense_act_fn _lowerCAmelCase : str = seq_len _lowerCAmelCase : Tuple = relative_attention_num_buckets _lowerCAmelCase : List[str] = relative_attention_max_distance _lowerCAmelCase : int = d_kv @classmethod def __A ( cls , a__ , **a__ ): cls._set_token_in_kwargs(a__ ) _lowerCAmelCase , _lowerCAmelCase : Dict = cls.get_config_dict(a__ , **a__ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": _lowerCAmelCase : List[str] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"You are using a model of type {config_dict['model_type']} to instantiate a model of type " F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a__ , **a__ ) class __A ( SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Optional[Any] = "pix2struct" _UpperCamelCase : List[str] = True def __init__( self , a__=None , a__=None , a__=1.0 , a__=0.0_2 , a__=False , a__=False , a__=True , **a__ , ): super().__init__(tie_word_embeddings=a__ , is_encoder_decoder=a__ , **a__ ) if text_config is None: _lowerCAmelCase : str = {} logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" ) if vision_config is None: _lowerCAmelCase : Union[str, Any] = {} logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" ) _lowerCAmelCase : List[Any] = PixaStructTextConfig(**a__ ) _lowerCAmelCase : Dict = PixaStructVisionConfig(**a__ ) _lowerCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id _lowerCAmelCase : Dict = self.text_config.pad_token_id _lowerCAmelCase : List[str] = self.text_config.eos_token_id _lowerCAmelCase : Union[str, Any] = initializer_factor _lowerCAmelCase : int = initializer_range _lowerCAmelCase : Optional[Any] = self.initializer_range _lowerCAmelCase : Tuple = self.initializer_range _lowerCAmelCase : Dict = is_vqa @classmethod def __A ( cls , a__ , a__ , **a__ ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ ) def __A ( self ): _lowerCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ ) _lowerCAmelCase : Union[str, Any] = self.text_config.to_dict() _lowerCAmelCase : Union[str, Any] = self.vision_config.to_dict() _lowerCAmelCase : Tuple = self.__class__.model_type return output
126
"""simple docstring""" from ..utils import DummyObject, requires_backends class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Optional[Any] = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : List[Any] = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : int = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : str = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Optional[int] = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : List[Any] = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : str = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Union[str, Any] = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Optional[int] = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : List[str] = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Any = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : str = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Tuple = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : str = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Tuple = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : str = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Tuple = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : str = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Dict = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Dict = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Union[str, Any] = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Tuple = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : List[Any] = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Optional[int] = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Optional[Any] = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Optional[Any] = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : List[Any] = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : int = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Dict = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : List[str] = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] ) class __A ( metaclass=SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : int = ["sentencepiece"] def __init__( self , *a__ , **a__ ): requires_backends(self , ["""sentencepiece"""] )
126
1
'''simple docstring''' import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast __snake_case : List[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class lowerCamelCase ( datasets.BuilderConfig ): '''simple docstring''' __snake_case = 1_0000 __snake_case = None __snake_case = None class lowerCamelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' __snake_case = ParquetConfig def lowercase__ ( self : Optional[Any] ) -> str: '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def lowercase__ ( self : List[str] , lowerCAmelCase_ : List[str] ) -> int: '''simple docstring''' if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" ) A__ : str =dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowerCAmelCase_ , (str, list, tuple) ): A__ : Any =data_files if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): A__ : List[Any] =[files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive A__ : List[str] =[dl_manager.iter_files(lowerCAmelCase_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] A__ : Optional[int] =[] for split_name, files in data_files.items(): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): A__ : Any =[files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive A__ : Tuple =[dl_manager.iter_files(lowerCAmelCase_ ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(lowerCAmelCase_ ): with open(lowerCAmelCase_ , """rb""" ) as f: A__ : Union[str, Any] =datasets.Features.from_arrow_schema(pq.read_schema(lowerCAmelCase_ ) ) break splits.append(datasets.SplitGenerator(name=lowerCAmelCase_ , gen_kwargs={"""files""": files} ) ) return splits def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : pa.Table ) -> pa.Table: '''simple docstring''' if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example A__ : str =table_cast(lowerCAmelCase_ , self.info.features.arrow_schema ) return pa_table def lowercase__ ( self : int , lowerCAmelCase_ : Dict ) -> List[Any]: '''simple docstring''' A__ : int =self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" ) for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase_ ) ): with open(lowerCAmelCase_ , """rb""" ) as f: A__ : Union[str, Any] =pq.ParquetFile(lowerCAmelCase_ ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): A__ : Any =pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"{file_idx}_{batch_idx}", self._cast_table(lowerCAmelCase_ ) except ValueError as e: logger.error(f"Failed to read file '{file}' with error {type(lowerCAmelCase_ )}: {e}" ) raise
134
'''simple docstring''' import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def __lowerCamelCase ( __snake_case : List[str], __snake_case : Union[str, Any], __snake_case : Dict ) -> Dict: """simple docstring""" return params[f"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :] def __lowerCamelCase ( __snake_case : str, __snake_case : int, __snake_case : Dict, __snake_case : int="attention" ) -> str: """simple docstring""" A__ : Union[str, Any] =np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] ) A__ : str =k_tmp.reshape(k_tmp.shape[0], k_tmp.shape[1] * k_tmp.shape[2] ) A__ : List[Any] =np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] ) A__ : Optional[int] =o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1], o_tmp.shape[2] ) A__ : Dict =np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] ) A__ : Dict =q_tmp.reshape(q_tmp.shape[0], q_tmp.shape[1] * q_tmp.shape[2] ) A__ : Union[str, Any] =np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] ) A__ : List[str] =v_tmp.reshape(v_tmp.shape[0], v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def __lowerCamelCase ( __snake_case : Dict, __snake_case : Any, __snake_case : Tuple, __snake_case : Optional[Any]=False ) -> Any: """simple docstring""" if split_mlp_wi: A__ : Any =params[f"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :] A__ : int =params[f"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :] A__ : Optional[Any] =(wi_a, wi_a) else: A__ : Optional[int] =params[f"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :] A__ : int =params[f"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :] return wi, wo def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : str, __snake_case : Any, __snake_case : int ) -> List[Any]: """simple docstring""" return params[f"{prefix}/{prefix}/{layer_name}/scale"][:, i] def __lowerCamelCase ( __snake_case : dict, *, __snake_case : int, __snake_case : bool, __snake_case : bool = False ) -> Union[str, Any]: """simple docstring""" A__ : Optional[int] =traverse_util.flatten_dict(variables["""target"""] ) A__ : int ={"""/""".join(__snake_case ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi A__ : List[Any] ="""encoder/encoder/mlp/wi_0/kernel""" in old print("""Split MLP:""", __snake_case ) A__ : Optional[int] =collections.OrderedDict() # Shared embeddings. A__ : List[Any] =old["""token_embedder/embedding"""] # Encoder. for i in range(__snake_case ): # Block i, layer 0 (Self Attention). A__ : Optional[Any] =tax_layer_norm_lookup(__snake_case, __snake_case, """encoder""", """pre_attention_layer_norm""" ) A__ , A__ , A__ , A__ : Optional[int] =tax_attention_lookup(__snake_case, __snake_case, """encoder""", """attention""" ) A__ : List[str] =layer_norm A__ : Dict =k.T A__ : Optional[int] =o.T A__ : str =q.T A__ : Any =v.T # Block i, layer 1 (MLP). A__ : List[Any] =tax_layer_norm_lookup(__snake_case, __snake_case, """encoder""", """pre_mlp_layer_norm""" ) A__ , A__ : int =tax_mlp_lookup(__snake_case, __snake_case, """encoder""", __snake_case ) A__ : Optional[int] =layer_norm if split_mlp_wi: A__ : List[str] =wi[0].T A__ : List[str] =wi[1].T else: A__ : Optional[int] =wi.T A__ : Optional[Any] =wo.T if scalable_attention: # convert the rel_embedding of each layer A__ : int =tax_relpos_bias_lookup( __snake_case, __snake_case, """encoder""" ).T A__ : Optional[int] =old["""encoder/encoder_norm/scale"""] if not scalable_attention: A__ : List[Any] =tax_relpos_bias_lookup( __snake_case, 0, """encoder""" ).T A__ : Tuple =tax_relpos_bias_lookup( __snake_case, 0, """decoder""" ).T if not is_encoder_only: # Decoder. for i in range(__snake_case ): # Block i, layer 0 (Self Attention). A__ : List[str] =tax_layer_norm_lookup(__snake_case, __snake_case, """decoder""", """pre_self_attention_layer_norm""" ) A__ , A__ , A__ , A__ : List[str] =tax_attention_lookup(__snake_case, __snake_case, """decoder""", """self_attention""" ) A__ : str =layer_norm A__ : List[str] =k.T A__ : int =o.T A__ : Tuple =q.T A__ : Optional[Any] =v.T # Block i, layer 1 (Cross Attention). A__ : int =tax_layer_norm_lookup(__snake_case, __snake_case, """decoder""", """pre_cross_attention_layer_norm""" ) A__ , A__ , A__ , A__ : Optional[Any] =tax_attention_lookup(__snake_case, __snake_case, """decoder""", """encoder_decoder_attention""" ) A__ : str =layer_norm A__ : Union[str, Any] =k.T A__ : str =o.T A__ : Any =q.T A__ : str =v.T # Block i, layer 2 (MLP). A__ : str =tax_layer_norm_lookup(__snake_case, __snake_case, """decoder""", """pre_mlp_layer_norm""" ) A__ , A__ : Optional[int] =tax_mlp_lookup(__snake_case, __snake_case, """decoder""", __snake_case ) A__ : Dict =layer_norm if split_mlp_wi: A__ : List[Any] =wi[0].T A__ : Union[str, Any] =wi[1].T else: A__ : Optional[int] =wi.T A__ : str =wo.T if scalable_attention: # convert the rel_embedding of each layer A__ : str =tax_relpos_bias_lookup(__snake_case, __snake_case, """decoder""" ).T A__ : str =old["""decoder/decoder_norm/scale"""] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: A__ : Tuple =old["""decoder/logits_dense/kernel"""].T return new def __lowerCamelCase ( __snake_case : Dict, __snake_case : bool ) -> Optional[Any]: """simple docstring""" A__ : Any =collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: A__ : Union[str, Any] =state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: A__ : List[str] =state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) A__ : Optional[Any] =state_dict["""shared.weight"""] return state_dict def __lowerCamelCase ( __snake_case : str, __snake_case : str, __snake_case : Optional[Any], __snake_case : int, __snake_case : Optional[int] ) -> Optional[int]: """simple docstring""" A__ : str =checkpoints.load_tax_checkpoint(__snake_case ) A__ : Optional[Any] =convert_tax_to_pytorch( __snake_case, num_layers=config.num_layers, is_encoder_only=__snake_case, scalable_attention=__snake_case ) A__ : str =make_state_dict(__snake_case, __snake_case ) model.load_state_dict(__snake_case, strict=__snake_case ) def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Dict, __snake_case : Optional[int], __snake_case : bool = False, __snake_case : bool = False, ) -> Dict: """simple docstring""" A__ : Tuple =MTaConfig.from_json_file(__snake_case ) print(f"Building PyTorch model from configuration: {config}" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: A__ : List[Any] =UMTaEncoderModel(__snake_case ) else: A__ : int =UMTaForConditionalGeneration(__snake_case ) # Load weights from tf checkpoint load_tax_weights_in_ta(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(__snake_case ) # Verify that we can load the checkpoint. model.from_pretrained(__snake_case ) print("""Done""" ) if __name__ == "__main__": __snake_case : str = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action="https://huggingface.co/datasets/infinityofspace/python_codestyles-mixed1-500/viewer/default/store_true", help='Check if the model is encoder-decoder model', default=False ) parser.add_argument( '--scalable_attention', action="https://huggingface.co/datasets/infinityofspace/python_codestyles-mixed1-500/viewer/default/store_true", help='Whether the model uses scaled attention (umt5 model)', default=False, ) __snake_case : Optional[Any] = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
134
1
class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self : List[str] , lowercase : int , lowercase : List[Any]=None , lowercase : List[Any]=None ): '''simple docstring''' _snake_case = data _snake_case = previous _snake_case = next_node def __str__( self : Tuple ): '''simple docstring''' return f'''{self.data}''' def A ( self : Tuple ): '''simple docstring''' return self.data def A ( self : List[str] ): '''simple docstring''' return self.next def A ( self : Dict ): '''simple docstring''' return self.previous class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self : List[str] , lowercase : List[str] ): '''simple docstring''' _snake_case = head def __iter__( self : Optional[Any] ): '''simple docstring''' return self def A ( self : Optional[int] ): '''simple docstring''' if not self.current: raise StopIteration else: _snake_case = self.current.get_data() _snake_case = self.current.get_next() return value class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self : Tuple ): '''simple docstring''' _snake_case = None # First node in list _snake_case = None # Last node in list def __str__( self : Union[str, Any] ): '''simple docstring''' _snake_case = self.head _snake_case = [] while current is not None: nodes.append(current.get_data() ) _snake_case = current.get_next() return " ".join(str(a__ ) for node in nodes ) def __contains__( self : Dict , lowercase : int ): '''simple docstring''' _snake_case = self.head while current: if current.get_data() == value: return True _snake_case = current.get_next() return False def __iter__( self : int ): '''simple docstring''' return LinkedListIterator(self.head ) def A ( self : int ): '''simple docstring''' if self.head: return self.head.get_data() return None def A ( self : List[Any] ): '''simple docstring''' if self.tail: return self.tail.get_data() return None def A ( self : Union[str, Any] , lowercase : Node ): '''simple docstring''' if self.head is None: _snake_case = node _snake_case = node else: self.insert_before_node(self.head , a__ ) def A ( self : int , lowercase : Node ): '''simple docstring''' if self.head is None: self.set_head(a__ ) else: self.insert_after_node(self.tail , a__ ) def A ( self : Any , lowercase : int ): '''simple docstring''' _snake_case = Node(a__ ) if self.head is None: self.set_head(a__ ) else: self.set_tail(a__ ) def A ( self : List[Any] , lowercase : Node , lowercase : Node ): '''simple docstring''' _snake_case = node _snake_case = node.previous if node.get_previous() is None: _snake_case = node_to_insert else: _snake_case = node_to_insert _snake_case = node_to_insert def A ( self : Optional[int] , lowercase : Node , lowercase : Node ): '''simple docstring''' _snake_case = node _snake_case = node.next if node.get_next() is None: _snake_case = node_to_insert else: _snake_case = node_to_insert _snake_case = node_to_insert def A ( self : Dict , lowercase : int , lowercase : int ): '''simple docstring''' _snake_case = 1 _snake_case = Node(a__ ) _snake_case = self.head while node: if current_position == position: self.insert_before_node(a__ , a__ ) return current_position += 1 _snake_case = node.next self.insert_after_node(self.tail , a__ ) def A ( self : Any , lowercase : int ): '''simple docstring''' _snake_case = self.head while node: if node.get_data() == item: return node _snake_case = node.get_next() raise Exception('Node not found' ) def A ( self : Dict , lowercase : str ): '''simple docstring''' if (node := self.get_node(a__ )) is not None: if node == self.head: _snake_case = self.head.get_next() if node == self.tail: _snake_case = self.tail.get_previous() self.remove_node_pointers(a__ ) @staticmethod def A ( lowercase : Node ): '''simple docstring''' if node.get_next(): _snake_case = node.previous if node.get_previous(): _snake_case = node.next _snake_case = None _snake_case = None def A ( self : Tuple ): '''simple docstring''' return self.head is None def a_ ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
360
from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def a_ ( __lowercase : np.ndarray , __lowercase : np.ndarray , __lowercase : np.ndarray , __lowercase : int , __lowercase : int ) -> np.ndarray: _snake_case = cva.getAffineTransform(__lowercase , __lowercase ) return cva.warpAffine(__lowercase , __lowercase , (rows, cols) ) if __name__ == "__main__": # read original image _lowerCamelCase : Optional[Any] = cva.imread( str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''') ) # turn image in gray scale value _lowerCamelCase : List[str] = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape _lowerCamelCase , _lowerCamelCase : List[Any] = gray_img.shape # set different points to rotate image _lowerCamelCase : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) _lowerCamelCase : Optional[Any] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) _lowerCamelCase : List[str] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) _lowerCamelCase : Dict = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list _lowerCamelCase : int = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations _lowerCamelCase : Any = plt.figure(1) _lowerCamelCase : List[Any] = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3'''] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''') plt.title(titles[i]) plt.axis('''off''') plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5) plt.show()
130
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class a_ ( unittest.TestCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Tuple: """simple docstring""" UpperCamelCase = size if size is not None else {"""shortest_edge""": 18} UpperCamelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = num_channels UpperCamelCase = image_size UpperCamelCase = min_resolution UpperCamelCase = max_resolution UpperCamelCase = do_resize UpperCamelCase = size UpperCamelCase = do_center_crop UpperCamelCase = crop_size UpperCamelCase = do_normalize UpperCamelCase = image_mean UpperCamelCase = image_std def A__ ( self ) -> Any: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class a_ ( lowerCamelCase , unittest.TestCase ): lowercase = LevitImageProcessor if is_vision_available() else None def A__ ( self ) -> str: """simple docstring""" UpperCamelCase = LevitImageProcessingTester(self ) @property def A__ ( self ) -> Union[str, Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self ) -> str: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_mean""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_std""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_normalize""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_resize""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_center_crop""" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """size""" ) ) def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def A__ ( self ) -> Tuple: """simple docstring""" pass def A__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def A__ ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def A__ ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
321
'''simple docstring''' import datasets from .evaluate import evaluate SCREAMING_SNAKE_CASE__ = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n' SCREAMING_SNAKE_CASE__ = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n' SCREAMING_SNAKE_CASE__ = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a_ ( datasets.Metric ): def A__ ( self ) -> Tuple: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": { """id""": datasets.Value("""string""" ), """prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ), }, """references""": { """id""": datasets.Value("""string""" ), """answers""": datasets.features.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), }, } ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions} UpperCamelCase = [ { """paragraphs""": [ { """qas""": [ { """answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]], """id""": ref["""id"""], } for ref in references ] } ] } ] UpperCamelCase = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE ) return score
321
1
"""simple docstring""" import os def lowerCamelCase () -> Any: lowercase :List[str] = os.path.join(os.path.dirname(a_) , '''num.txt''') with open(a_) as file_hand: return str(sum(int(a_) for line in file_hand))[:10] if __name__ == "__main__": print(solution())
358
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { '''caidas/swin2sr-classicalsr-x2-64''': ( '''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json''' ), } class __magic_name__ ( __UpperCAmelCase ): __A : Tuple = "swin2sr" __A : Dict = { "hidden_size": "embed_dim", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : List[str] , snake_case__ : List[str]=6_4 , snake_case__ : Union[str, Any]=1 , snake_case__ : Tuple=3 , snake_case__ : int=1_8_0 , snake_case__ : Union[str, Any]=[6, 6, 6, 6, 6, 6] , snake_case__ : List[str]=[6, 6, 6, 6, 6, 6] , snake_case__ : Tuple=8 , snake_case__ : List[Any]=2.0 , snake_case__ : Any=True , snake_case__ : Dict=0.0 , snake_case__ : Dict=0.0 , snake_case__ : Dict=0.1 , snake_case__ : Dict="gelu" , snake_case__ : Optional[int]=False , snake_case__ : Any=0.02 , snake_case__ : Any=1e-5 , snake_case__ : Optional[int]=2 , snake_case__ : Optional[int]=1.0 , snake_case__ : Optional[Any]="1conv" , snake_case__ : List[str]="pixelshuffle" , **snake_case__ : Tuple , ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase :Dict = image_size lowercase :List[str] = patch_size lowercase :Tuple = num_channels lowercase :int = embed_dim lowercase :Any = depths lowercase :Union[str, Any] = len(snake_case__ ) lowercase :List[str] = num_heads lowercase :int = window_size lowercase :Tuple = mlp_ratio lowercase :List[Any] = qkv_bias lowercase :Optional[int] = hidden_dropout_prob lowercase :Tuple = attention_probs_dropout_prob lowercase :Tuple = drop_path_rate lowercase :Optional[Any] = hidden_act lowercase :Union[str, Any] = use_absolute_embeddings lowercase :Dict = layer_norm_eps lowercase :Optional[Any] = initializer_range lowercase :Optional[Any] = upscale lowercase :Any = img_range lowercase :Optional[int] = resi_connection lowercase :Union[str, Any] = upsampler
172
0
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = 42 a__ = None # Automatically constructed a__ = "dict" a__ = None a__ = field(default="""Translation""" , init=_A , repr=_A ) def __call__( self : Tuple ) -> Optional[int]: """simple docstring""" return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def _lowercase ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: """simple docstring""" from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = None a__ = None a__ = None # Automatically constructed a__ = "dict" a__ = None a__ = field(default="""TranslationVariableLanguages""" , init=_A , repr=_A ) def _lowercase ( self : Optional[Any] ) -> Tuple: """simple docstring""" __magic_name__ = sorted(set(self.languages ) ) if self.languages else None __magic_name__ = len(self.languages ) if self.languages else None def __call__( self : Any ) -> Optional[Any]: """simple docstring""" return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def _lowercase ( self : str , UpperCamelCase__ : List[Any] ) -> Optional[Any]: """simple docstring""" __magic_name__ = set(self.languages ) if self.languages and set(UpperCamelCase__ ) - lang_set: raise ValueError( F'''Some languages in example ({', '.join(sorted(set(UpperCamelCase__ ) - lang_set ) )}) are not in valid set ({', '.join(UpperCamelCase__ )}).''' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. __magic_name__ = [] for lang, text in translation_dict.items(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. __magic_name__ , __magic_name__ = zip(*sorted(UpperCamelCase__ ) ) return {"language": languages, "translation": translations} def _lowercase ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: """simple docstring""" from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
88
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCAmelCase : Tuple = { 'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json', # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """pegasus""" a__ = ["""past_key_values"""] a__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int]=5_0265 , UpperCamelCase__ : Optional[int]=1024 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Union[str, Any]=4096 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : List[str]=4096 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : int=False , UpperCamelCase__ : Any=0 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Tuple=1 , **UpperCamelCase__ : Union[str, Any] , ) -> str: """simple docstring""" __magic_name__ = vocab_size __magic_name__ = max_position_embeddings __magic_name__ = d_model __magic_name__ = encoder_ffn_dim __magic_name__ = encoder_layers __magic_name__ = encoder_attention_heads __magic_name__ = decoder_ffn_dim __magic_name__ = decoder_layers __magic_name__ = decoder_attention_heads __magic_name__ = dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = activation_function __magic_name__ = init_std __magic_name__ = encoder_layerdrop __magic_name__ = decoder_layerdrop __magic_name__ = use_cache __magic_name__ = encoder_layers __magic_name__ = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , ) @property def _lowercase ( self : List[Any] ) -> int: """simple docstring""" return self.encoder_attention_heads @property def _lowercase ( self : Dict ) -> int: """simple docstring""" return self.d_model
88
1
import math def _SCREAMING_SNAKE_CASE ( lowercase : int ): '''simple docstring''' lowerCamelCase_ = [] lowerCamelCase_ = 2 lowerCamelCase_ = int(math.sqrt(lowercase ) ) # Size of every segment lowerCamelCase_ = [True] * (end + 1) lowerCamelCase_ = [] while start <= end: if temp[start] is True: in_prime.append(lowercase ) for i in range(start * start , end + 1 , lowercase ): lowerCamelCase_ = False start += 1 prime += in_prime lowerCamelCase_ = end + 1 lowerCamelCase_ = min(2 * end , lowercase ) while low <= n: lowerCamelCase_ = [True] * (high - low + 1) for each in in_prime: lowerCamelCase_ = math.floor(low / each ) * each if t < low: t += each for j in range(lowercase , high + 1 , lowercase ): lowerCamelCase_ = False for j in range(len(lowercase ) ): if temp[j] is True: prime.append(j + low ) lowerCamelCase_ = high + 1 lowerCamelCase_ = min(high + end , lowercase ) return prime print(sieve(10**6))
208
from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
208
1
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class __snake_case ( _lowerCamelCase ): def __a ( self , __UpperCamelCase ) -> List[str]: '''simple docstring''' with open(__UpperCamelCase , encoding='utf-8' ) as input_file: snake_case__ : Union[str, Any] = re.compile(r'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' ) snake_case__ : List[Any] = input_file.read() snake_case__ : Union[str, Any] = regexp.search(__UpperCamelCase ) return match def __a ( self , __UpperCamelCase ) -> int: '''simple docstring''' with open(__UpperCamelCase , encoding='utf-8' ) as input_file: snake_case__ : Any = re.compile(r'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL ) snake_case__ : Any = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` snake_case__ : Union[str, Any] = regexp.finditer(__UpperCamelCase ) snake_case__ : List[str] = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : Union[str, Any] = Path('./datasets' ) snake_case__ : Tuple = list(dataset_paths.absolute().glob('**/*.py' ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__UpperCamelCase ) ): raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" ) def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : List[Any] = Path('./datasets' ) snake_case__ : Any = list(dataset_paths.absolute().glob('**/*.py' ) ) for dataset in dataset_files: if self._no_print_statements(str(__UpperCamelCase ) ): raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
143
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" ,"""False""" ) ) is not True ,reason="""Skipping test because should only be run when releasing minor transformers version""" ,) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ] ) class __snake_case ( unittest.TestCase ): def __a ( self ) -> Tuple: '''simple docstring''' if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=__UpperCamelCase , ) assert hasattr(self , 'env' ) def __a ( self , __UpperCamelCase ) -> Optional[int]: '''simple docstring''' snake_case__ : Tuple = { 'enabled': True, 'processes_per_host': 8, } snake_case__ : Any = { 'enabled': True, 'parameters': { 'microbatches': 4, 'placement_strategy': 'spread', 'pipeline': 'interleaved', 'optimize': 'speed', 'partitions': 4, 'ddp': True, }, } snake_case__ : Optional[int] = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options} snake_case__ : int = 'trainer' if self.script == 'run_glue.py' else 'smtrainer' # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCamelCase , hyperparameters={ **self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path, 'max_steps': 500, } , metric_definitions=self.env.metric_definitions , distribution=__UpperCamelCase , py_version='py36' , ) def __a ( self , __UpperCamelCase ) -> List[Any]: '''simple docstring''' TrainingJobAnalytics(__UpperCamelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def __a ( self , __UpperCamelCase ) -> List[Any]: '''simple docstring''' snake_case__ : str = self.create_estimator(__UpperCamelCase ) # run training estimator.fit() # result dataframe snake_case__ : Dict = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis snake_case__ : Any = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] ) snake_case__ : List[str] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping snake_case__ : Any = ( Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy ) assert all(t <= self.results['eval_loss'] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile: json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __UpperCamelCase )
143
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available __snake_case :Optional[Any] = { '''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :Union[str, Any] = [ '''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ErnieForCausalLM''', '''ErnieForMaskedLM''', '''ErnieForMultipleChoice''', '''ErnieForNextSentencePrediction''', '''ErnieForPreTraining''', '''ErnieForQuestionAnswering''', '''ErnieForSequenceClassification''', '''ErnieForTokenClassification''', '''ErnieModel''', '''ErniePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys __snake_case :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
131
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case :Tuple = { '''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :List[Any] = ['''BloomTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :Any = [ '''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BloomForCausalLM''', '''BloomModel''', '''BloomPreTrainedModel''', '''BloomForSequenceClassification''', '''BloomForTokenClassification''', '''BloomForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __snake_case :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
131
1