str:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tplt.scatter(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tcolor=\"\"\"red\"\"\"\t\t)\r\n\t\t\tplt.plot(lowercase__\t\t\t\t,\t\t\t\t\tpol_reg.predict(poly_reg.fit_transform(lowercase__\t\t)\t\t)\t\t\t\t,\t\t\t\t\tcolor=\"\"\"blue\"\"\"\t\t)\r\n\t\t\tplt.title(\"\"\"Truth or Bluff (Linear Regression)\"\"\"\t\t)\r\n\t\t\tplt.xlabel(\"\"\"Position level\"\"\"\t\t)\r\n\t\t\tplt.ylabel(\"\"\"Salary\"\"\"\t\t)\r\n\t\t\tplt.show()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tviz_polymonial()\r\n\r\n\t\t\t\t# Predicting a new result with Polymonial Regression\r\n\t\t\t\tpol_reg.predict(poly_reg.fit_transform([[5.5]]))\r\n\t\t\t\t# output should be 132148.43750003\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":602,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport argparse\r\nimport math\r\nimport traceback\r\n\r\nimport dateutil.parser as date_parser\r\nimport requests\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int\t\t) -> List[str]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {}\r\n\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= job[\"\"\"started_at\"\"\"]\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= job[\"\"\"completed_at\"\"\"]\r\n\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= date_parser.parse(lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= date_parser.parse(lowercase__\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= round((end_datetime - start_datetime).total_seconds() / 60.0\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= start\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= end\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= duration_in_min\r\n\r\n\t\t\treturn job_info\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Optional[Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Optional[int]=None\t\t) -> List[str]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= None\r\n\t\t\tif token is not None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= {\"\"\"Accept\"\"\": \"\"\"application/vnd.github+json\"\"\", \"\"\"Authorization\"\"\": f\"\"\"Bearer {token}\"\"\"}\r\n\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= f\"\"\"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100\"\"\"\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= requests.get(lowercase__\t\t\t\t,\t\t\t\t\theaders=lowercase__\t\t).json()\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= {}\r\n\r\n\t\t\ttry:\r\n\t\t\t\t\t\tjob_time.update({job[\"\"\"name\"\"\"]: extract_time_from_single_job(lowercase__\t\t) for job in result[\"\"\"jobs\"\"\"]}\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= math.ceil((result[\"\"\"total_count\"\"\"] - 1_0_0) / 1_0_0\t\t)\r\n\r\n\t\t\t\t\t\tfor i in range(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= requests.get(url + f\"\"\"&page={i + 2}\"\"\"\t\t\t\t,\t\t\t\t\theaders=lowercase__\t\t).json()\r\n\t\t\t\t\t\t\t\t\tjob_time.update({job[\"\"\"name\"\"\"]: extract_time_from_single_job(lowercase__\t\t) for job in result[\"\"\"jobs\"\"\"]}\t\t)\r\n\r\n\t\t\t\t\t\treturn job_time\r\n\t\t\texcept Exception:\r\n\t\t\t\t\t\tprint(f\"\"\"Unknown error, could not fetch links:\\n{traceback.format_exc()}\"\"\"\t\t)\r\n\r\n\t\t\treturn {}\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= argparse.ArgumentParser()\r\n\t\t\t\t# Required parameters\r\n\t\t\t\tparser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= parser.parse_args()\r\n\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= get_job_time(args.workflow_run_id)\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= dict(sorted(job_time.items(), key=lambda item: item[1][\"duration\"], reverse=True))\r\n\r\n\t\t\t\tfor k, v in job_time.items():\r\n\t\t\t\t\t\t\t\tprint(F\"\"\"{k}: {v[\"duration\"]}\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 1.6021e-19 # units = C\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: float\t\t\t\t,\t\t\t\t\tlowercase__\t\t: float\t\t\t\t,\t\t\t\t\tlowercase__\t\t: float\t\t\t\t,\t\t\t\t\t) -> tuple[str, float]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif (conductivity, electron_conc, mobility).count(0\t\t) != 1:\r\n\t\t\t\t\t\traise ValueError(\"\"\"You cannot supply more or less than 2 values\"\"\"\t\t)\r\n\t\t\telif conductivity < 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"Conductivity cannot be negative\"\"\"\t\t)\r\n\t\t\telif electron_conc < 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"Electron concentration cannot be negative\"\"\"\t\t)\r\n\t\t\telif mobility < 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"mobility cannot be negative\"\"\"\t\t)\r\n\t\t\telif conductivity == 0:\r\n\t\t\t\t\t\treturn (\r\n\t\t\t\t\t\t \"conductivity\",\r\n\t\t\t\t\t\t mobility * electron_conc * ELECTRON_CHARGE,\r\n\t\t\t\t\t\t)\r\n\t\t\telif electron_conc == 0:\r\n\t\t\t\t\t\treturn (\r\n\t\t\t\t\t\t \"electron_conc\",\r\n\t\t\t\t\t\t conductivity / (mobility * ELECTRON_CHARGE),\r\n\t\t\t\t\t\t)\r\n\t\t\telse:\r\n\t\t\t\t\t\treturn (\r\n\t\t\t\t\t\t \"mobility\",\r\n\t\t\t\t\t\t conductivity / (electron_conc * ELECTRON_CHARGE),\r\n\t\t\t\t\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":603,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport warnings\r\n\r\nfrom ...utils import logging\r\nfrom .image_processing_donut import DonutImageProcessor\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, *__A\t, **__A\t) ->\t\t\t\t\tNone:\r\n\t\t\t\t\t\twarnings.warn(\r\n\t\t\t\t\t\t \"\"\"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please\"\"\"\r\n\t\t\t\t\t\t \"\"\" use DonutImageProcessor instead.\"\"\"\t, __A\t, )\r\n\t\t\t\t\t\tsuper().__init__(*__A\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport warnings\r\n\r\nfrom ...utils import logging\r\nfrom .image_processing_clip import CLIPImageProcessor\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, *__A\t, **__A\t) ->\t\t\t\t\tNone:\r\n\t\t\t\t\t\twarnings.warn(\r\n\t\t\t\t\t\t \"\"\"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please\"\"\"\r\n\t\t\t\t\t\t \"\"\" use CLIPImageProcessor instead.\"\"\"\t, __A\t, )\r\n\t\t\t\t\t\tsuper().__init__(*__A\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":604,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom copy import deepcopy\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A = None\t, __A = None\t) ->\t\t\t\t\tNone:\r\n\r\n\t\t\t\t\t\tif arr is None and size is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= size\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= [0] * size\r\n\t\t\t\t\t\telif arr is not None:\r\n\t\t\t\t\t\t\t\t\tself.init(__A\t)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"Either arr or size must be specified\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tNone:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= len(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= deepcopy(__A\t)\r\n\t\t\t\t\t\tfor i in range(1\t, self.size\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.next_(__A\t)\r\n\t\t\t\t\t\t\t\t\tif j < self.size:\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.tree[j] += self.tree[i]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tlist[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.tree[:]\r\n\t\t\t\t\t\tfor i in range(self.size - 1\t, 0\t, -1\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.next_(__A\t)\r\n\t\t\t\t\t\t\t\t\tif j < self.size:\r\n\t\t\t\t\t\t\t\t\t\t\t\tarr[j] -= arr[i]\r\n\t\t\t\t\t\treturn arr\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@staticmethod\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( __A\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn index + (index & (-index))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@staticmethod\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( __A\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn index - (index & (-index))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t) ->\t\t\t\t\tNone:\r\n\t\t\t\t\t\tif index == 0:\r\n\t\t\t\t\t\t\t\t\tself.tree[0] += value\r\n\t\t\t\t\t\t\t\t\treturn\r\n\t\t\t\t\t\twhile index < self.size:\r\n\t\t\t\t\t\t\t\t\tself.tree[index] += value\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.next_(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t) ->\t\t\t\t\tNone:\r\n\t\t\t\t\t\tself.add(__A\t, value - self.get(__A\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tif right == 0:\r\n\t\t\t\t\t\t\t\t\treturn 0\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.tree[0]\r\n\t\t\t\t\t\tright -= 1 # make right inclusive\r\n\t\t\t\t\t\twhile right > 0:\r\n\t\t\t\t\t\t\t\t\tresult += self.tree[right]\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.prev(__A\t)\r\n\t\t\t\t\t\treturn result\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn self.prefix(__A\t) - self.prefix(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn self.query(__A\t, index + 1\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tvalue -= self.tree[0]\r\n\t\t\t\t\t\tif value < 0:\r\n\t\t\t\t\t\t\t\t\treturn -1\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 1 # Largest power of 2 <= size\r\n\t\t\t\t\t\twhile j * 2 < self.size:\r\n\t\t\t\t\t\t\t\t\tj *= 2\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= 0\r\n\r\n\t\t\t\t\t\twhile j > 0:\r\n\t\t\t\t\t\t\t\t\tif i + j < self.size and self.tree[i + j] <= value:\r\n\t\t\t\t\t\t\t\t\t\t\t\tvalue -= self.tree[i + j]\r\n\t\t\t\t\t\t\t\t\t\t\t\ti += j\r\n\t\t\t\t\t\t\t\t\tj //= 2\r\n\t\t\t\t\t\treturn i\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom itertools import zip_longest\r\n\r\nimport requests\r\nfrom bsa import BeautifulSoup\r\nfrom pandas import DataFrame\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str = \"laptop\"\t\t) -> DataFrame:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= f\"\"\"https://www.amazon.in/laptop/s?k={product}\"\"\"\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {\r\n\t\t\t \"\"\"User-Agent\"\"\": \"\"\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36\"\"\",\r\n\t\t\t \"\"\"Accept-Language\"\"\": \"\"\"en-US, en;q=0.5\"\"\",\r\n\t\t\t}\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= BeautifulSoup(requests.get(lowercase__\t\t\t\t,\t\t\t\t\theaders=lowercase__\t\t).text\t\t)\r\n\t\t\t# Initialize a Pandas dataframe with the column titles\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= DataFrame(\r\n\t\t\t columns=[\r\n\t\t\t \"\"\"Product Title\"\"\",\r\n\t\t\t \"\"\"Product Link\"\"\",\r\n\t\t\t \"\"\"Current Price of the product\"\"\",\r\n\t\t\t \"\"\"Product Rating\"\"\",\r\n\t\t\t \"\"\"MRP of the product\"\"\",\r\n\t\t\t \"\"\"Discount\"\"\",\r\n\t\t\t ]\t\t)\r\n\t\t\t# Loop through each entry and store them in the dataframe\r\n\t\t\tfor item, _ in zip_longest(\r\n\t\t\t soup.find_all(\r\n\t\t\t \"\"\"div\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"s-result-item\"\"\", \"\"\"data-component-type\"\"\": \"\"\"s-search-result\"\"\"}\t\t\t\t,\t\t\t\t\t)\t\t\t\t,\t\t\t\t\tsoup.find_all(\"\"\"div\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-row a-size-base a-color-base\"\"\"}\t\t)\t\t\t\t,\t\t\t\t\t):\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= item.ha.text\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= \"\"\"https://www.amazon.in/\"\"\" + item.ha.a[\"\"\"href\"\"\"]\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= item.find(\"\"\"span\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-offscreen\"\"\"}\t\t).text\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= item.find(\"\"\"span\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-icon-alt\"\"\"}\t\t).text\r\n\t\t\t\t\t\t\t\t\texcept AttributeError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"Not available\"\"\"\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= (\r\n\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"โ‚น\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t + item.find(\r\n\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"span\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-price a-text-price\"\"\"}\t\t).text.split(\"\"\"โ‚น\"\"\"\t\t)[1]\r\n\t\t\t\t\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\texcept AttributeError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= \"\"\"\"\"\"\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= float(\r\n\t\t\t\t\t\t\t\t\t\t\t\t (\r\n\t\t\t\t\t\t\t\t\t\t\t\t (\r\n\t\t\t\t\t\t\t\t\t\t\t\t float(product_mrp.strip(\"\"\"โ‚น\"\"\"\t\t).replace(\"\"\",\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"\"\"\"\t\t)\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t - float(product_price.strip(\"\"\"โ‚น\"\"\"\t\t).replace(\"\"\",\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"\"\"\"\t\t)\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t )\r\n\t\t\t\t\t\t\t\t\t\t\t\t / float(product_mrp.strip(\"\"\"โ‚น\"\"\"\t\t).replace(\"\"\",\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"\"\"\"\t\t)\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t )\r\n\t\t\t\t\t\t\t\t\t\t\t\t * 1_0_0\t\t)\r\n\t\t\t\t\t\t\t\t\texcept ValueError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= float(\"\"\"nan\"\"\"\t\t)\r\n\t\t\t\t\t\texcept AttributeError:\r\n\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= [\r\n\t\t\t\t\t\t product_title,\r\n\t\t\t\t\t\t product_link,\r\n\t\t\t\t\t\t product_price,\r\n\t\t\t\t\t\t product_rating,\r\n\t\t\t\t\t\t product_mrp,\r\n\t\t\t\t\t\t discount,\r\n\t\t\t\t\t\t]\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\" \"\"\"\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= \"\"\" \"\"\"\r\n\t\t\tdata_frame.index += 1\r\n\t\t\treturn data_frame\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= 'headphones'\r\n\t\t\t\tget_amazon_product_data(product).to_csv(F\"\"\"Amazon Product Data for {product}.csv\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":605,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport os\r\nimport unittest\r\n\r\nfrom transformers import FunnelTokenizer, FunnelTokenizerFast\r\nfrom transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES\r\nfrom transformers.testing_utils import require_tokenizers\r\n\r\nfrom ...test_tokenization_common import TokenizerTesterMixin\r\n\r\n\r\n\r\n\r\n@require_tokenizers\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\tFunnelTokenizer\r\n\t\t\tUpperCAmelCase_ :List[Any] \t\t\t=\t\t\t\t\t\tFunnelTokenizerFast\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tTrue\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tTrue\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tsuper().setUp()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [\r\n\t\t\t\t\t\t \"\"\"\"\"\",\r\n\t\t\t\t\t\t \"\"\"\"\"\",\r\n\t\t\t\t\t\t \"\"\"\"\"\",\r\n\t\t\t\t\t\t \"\"\"want\"\"\",\r\n\t\t\t\t\t\t \"\"\"##want\"\"\",\r\n\t\t\t\t\t\t \"\"\"##ed\"\"\",\r\n\t\t\t\t\t\t \"\"\"wa\"\"\",\r\n\t\t\t\t\t\t \"\"\"un\"\"\",\r\n\t\t\t\t\t\t \"\"\"runn\"\"\",\r\n\t\t\t\t\t\t \"\"\"##ing\"\"\",\r\n\t\t\t\t\t\t \"\"\",\"\"\",\r\n\t\t\t\t\t\t \"\"\"low\"\"\",\r\n\t\t\t\t\t\t \"\"\"lowest\"\"\",\r\n\t\t\t\t\t\t]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= os.path.join(self.tmpdirname\t, VOCAB_FILES_NAMES[\"\"\"vocab_file\"\"\"]\t)\r\n\t\t\t\t\t\twith open(self.vocab_file\t, \"\"\"w\"\"\"\t, encoding=\"\"\"utf-8\"\"\"\t) as vocab_writer:\r\n\t\t\t\t\t\t\t\t\tvocab_writer.write(\"\"\"\"\"\".join([x + \"\"\"\\n\"\"\" for x in vocab_tokens]\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, **__A\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\treturn FunnelTokenizer.from_pretrained(self.tmpdirname\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, **__A\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn FunnelTokenizerFast.from_pretrained(self.tmpdirname\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"UNwant\\u00E9d,running\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= \"\"\"unwanted, running\"\"\"\r\n\t\t\t\t\t\treturn input_text, output_text\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.tokenizer_class(self.vocab_file\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer.tokenize(\"\"\"UNwant\\u00E9d,running\"\"\"\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, [\"\"\"un\"\"\", \"\"\"##want\"\"\", \"\"\"##ed\"\"\", \"\"\",\"\"\", \"\"\"runn\"\"\", \"\"\"##ing\"\"\"]\t)\r\n\t\t\t\t\t\tself.assertListEqual(tokenizer.convert_tokens_to_ids(__A\t)\t, [7, 4, 5, 10, 8, 9]\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.get_tokenizers(do_lower_case=__A\t)\r\n\t\t\t\t\t\tfor tokenizer in tokenizers:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer(\"\"\"UNwant\\u00E9d,running\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= len(inputs[\"\"\"input_ids\"\"\"]\t) - 1\r\n\t\t\t\t\t\t\t\t\tself.assertListEqual(inputs[\"\"\"token_type_ids\"\"\"]\t, [2] + [0] * sentence_len\t)\r\n\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokenizer(\"\"\"UNwant\\u00E9d,running\"\"\"\t, \"\"\"UNwant\\u00E9d,running\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\tself.assertListEqual(inputs[\"\"\"token_type_ids\"\"\"]\t, [2] + [0] * sentence_len + [1] * sentence_len\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport shutil\r\nimport tempfile\r\nimport unittest\r\n\r\nfrom transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast\r\nfrom transformers.testing_utils import require_sentencepiece, require_torchaudio\r\n\r\nfrom .test_feature_extraction_clap import floats_list\r\n\r\n\r\n\r\n\r\n@require_torchaudio\r\n@require_sentencepiece\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= \"\"\"laion/clap-htsat-unfused\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tempfile.mkdtemp()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, **__A\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\treturn RobertaTokenizer.from_pretrained(self.checkpoint\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, **__A\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\treturn ClapFeatureExtractor.from_pretrained(self.checkpoint\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tshutil.rmtree(self.tmpdirname\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_tokenizer()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_feature_extractor()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tprocessor.save_pretrained(self.tmpdirname\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= ClapProcessor.from_pretrained(self.tmpdirname\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor.tokenizer.get_vocab()\t, tokenizer.get_vocab()\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor.tokenizer\t, __A\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor.feature_extractor.to_json_string()\t, feature_extractor.to_json_string()\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor.feature_extractor\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= ClapProcessor(tokenizer=self.get_tokenizer()\t, feature_extractor=self.get_feature_extractor()\t)\r\n\t\t\t\t\t\tprocessor.save_pretrained(self.tmpdirname\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.get_tokenizer(bos_token=\"\"\"(BOS)\"\"\"\t, eos_token=\"\"\"(EOS)\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.get_feature_extractor(do_normalize=__A\t, padding_value=1.0\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= ClapProcessor.from_pretrained(\r\n\t\t\t\t\t\t self.tmpdirname\t, bos_token=\"\"\"(BOS)\"\"\"\t, eos_token=\"\"\"(EOS)\"\"\"\t, do_normalize=__A\t, padding_value=1.0\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor.tokenizer.get_vocab()\t, tokenizer_add_kwargs.get_vocab()\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor.tokenizer\t, __A\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor.feature_extractor.to_json_string()\t, feature_extractor_add_kwargs.to_json_string()\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor.feature_extractor\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.get_feature_extractor()\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= floats_list((3, 1000)\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= feature_extractor(__A\t, return_tensors=\"\"\"np\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= processor(audios=__A\t, return_tensors=\"\"\"np\"\"\"\t)\r\n\r\n\t\t\t\t\t\tfor key in input_feat_extract.keys():\r\n\t\t\t\t\t\t\t\t\tself.assertAlmostEqual(input_feat_extract[key].sum()\t, input_processor[key].sum()\t, delta=1E-2\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.get_feature_extractor()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"This is a test string\"\"\"\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= processor(text=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer(__A\t)\r\n\r\n\t\t\t\t\t\tfor key in encoded_tok.keys():\r\n\t\t\t\t\t\t\t\t\tself.assertListEqual(encoded_tok[key]\t, encoded_processor[key]\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.get_feature_extractor()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= processor.batch_decode(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tokenizer.batch_decode(__A\t)\r\n\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_feature_extractor()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tself.assertListEqual(\r\n\t\t\t\t\t\t processor.model_input_names[2:]\t, feature_extractor.model_input_names\t, msg=\"\"\"`processor` and `feature_extractor` model input names do not match\"\"\"\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":606,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport tempfile\r\nimport unittest\r\n\r\nimport numpy as np\r\n\r\nfrom diffusers import (\r\n DDIMScheduler,\r\n DPMSolverMultistepScheduler,\r\n EulerAncestralDiscreteScheduler,\r\n EulerDiscreteScheduler,\r\n LMSDiscreteScheduler,\r\n OnnxStableDiffusionPipeline,\r\n PNDMScheduler,\r\n)\r\nfrom diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu\r\n\r\nfrom ..test_pipelines_onnx_common import OnnxPipelineTesterMixin\r\n\r\n\r\nif is_onnx_available():\r\n\t\t\t\timport onnxruntime as ort\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Union[str, Any] \t\t\t=\t\t\t\t\t\t\"hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A=0\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= np.random.RandomState(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"prompt\"\"\": \"\"\"A painting of a squirrel eating a burger\"\"\",\r\n\t\t\t\t\t\t \"\"\"generator\"\"\": generator,\r\n\t\t\t\t\t\t \"\"\"num_inference_steps\"\"\": 2,\r\n\t\t\t\t\t\t \"\"\"guidance_scale\"\"\": 7.5,\r\n\t\t\t\t\t\t \"\"\"output_type\"\"\": \"\"\"numpy\"\"\",\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\treturn inputs\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint\t, provider=\"\"\"CPUExecutionProvider\"\"\"\t)\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.get_dummy_inputs()\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= pipe(**__A\t).images\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= image[0, -3:, -3:, -1]\r\n\r\n\t\t\t\t\t\tassert image.shape == (1, 128, 128, 3)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5]\t)\r\n\r\n\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t).max() < 1E-2\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint\t, provider=\"\"\"CPUExecutionProvider\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= PNDMScheduler.from_config(pipe.scheduler.config\t, skip_prk_steps=__A\t)\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.get_dummy_inputs()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= pipe(**__A\t).images\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= image[0, -3:, -3:, -1]\r\n\r\n\t\t\t\t\t\tassert image.shape == (1, 128, 128, 3)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0]\t)\r\n\r\n\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t).max() < 1E-2\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint\t, provider=\"\"\"CPUExecutionProvider\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= LMSDiscreteScheduler.from_config(pipe.scheduler.config\t)\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_dummy_inputs()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= pipe(**__A\t).images\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= image[0, -3:, -3:, -1]\r\n\r\n\t\t\t\t\t\tassert image.shape == (1, 128, 128, 3)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9]\t)\r\n\r\n\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t).max() < 1E-2\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint\t, provider=\"\"\"CPUExecutionProvider\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= EulerDiscreteScheduler.from_config(pipe.scheduler.config\t)\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.get_dummy_inputs()\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= pipe(**__A\t).images\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= image[0, -3:, -3:, -1]\r\n\r\n\t\t\t\t\t\tassert image.shape == (1, 128, 128, 3)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9]\t)\r\n\r\n\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t).max() < 1E-2\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint\t, provider=\"\"\"CPUExecutionProvider\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config\t)\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.get_dummy_inputs()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= pipe(**__A\t).images\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= image[0, -3:, -3:, -1]\r\n\r\n\t\t\t\t\t\tassert image.shape == (1, 128, 128, 3)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1]\t)\r\n\r\n\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t).max() < 1E-2\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint\t, provider=\"\"\"CPUExecutionProvider\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= DPMSolverMultistepScheduler.from_config(pipe.scheduler.config\t)\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.get_dummy_inputs()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= pipe(**__A\t).images\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= image[0, -3:, -3:, -1]\r\n\r\n\t\t\t\t\t\tassert image.shape == (1, 128, 128, 3)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0]\t)\r\n\r\n\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t).max() < 1E-2\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint\t, provider=\"\"\"CPUExecutionProvider\"\"\"\t)\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.get_dummy_inputs()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= 3 * [inputs[\"\"\"prompt\"\"\"]]\r\n\r\n\t\t\t\t\t\t# forward\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= pipe(**__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= output.images[0, -3:, -3:, -1]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.get_dummy_inputs()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= 3 * [inputs.pop(\"\"\"prompt\"\"\"\t)]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= pipe.tokenizer(\r\n\t\t\t\t\t\t __A\t, padding=\"\"\"max_length\"\"\"\t, max_length=pipe.tokenizer.model_max_length\t, truncation=__A\t, return_tensors=\"\"\"np\"\"\"\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= text_inputs[\"\"\"input_ids\"\"\"]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= pipe.text_encoder(input_ids=text_inputs.astype(np.intaa\t)\t)[0]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= prompt_embeds\r\n\r\n\t\t\t\t\t\t# forward\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= pipe(**__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= output.images[0, -3:, -3:, -1]\r\n\r\n\t\t\t\t\t\tassert np.abs(image_slice_a.flatten() - image_slice_a.flatten()\t).max() < 1E-4\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint\t, provider=\"\"\"CPUExecutionProvider\"\"\"\t)\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.get_dummy_inputs()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 3 * [\"\"\"this is a negative prompt\"\"\"]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= negative_prompt\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= 3 * [inputs[\"\"\"prompt\"\"\"]]\r\n\r\n\t\t\t\t\t\t# forward\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= pipe(**__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= output.images[0, -3:, -3:, -1]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.get_dummy_inputs()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= 3 * [inputs.pop(\"\"\"prompt\"\"\"\t)]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= []\r\n\t\t\t\t\t\tfor p in [prompt, negative_prompt]:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= pipe.tokenizer(\r\n\t\t\t\t\t\t\t\t\t __A\t, padding=\"\"\"max_length\"\"\"\t, max_length=pipe.tokenizer.model_max_length\t, truncation=__A\t, return_tensors=\"\"\"np\"\"\"\t, )\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= text_inputs[\"\"\"input_ids\"\"\"]\r\n\r\n\t\t\t\t\t\t\t\t\tembeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa\t)\t)[0]\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[str] \t\t\t\t\t= embeds\r\n\r\n\t\t\t\t\t\t# forward\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= pipe(**__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= output.images[0, -3:, -3:, -1]\r\n\r\n\t\t\t\t\t\tassert np.abs(image_slice_a.flatten() - image_slice_a.flatten()\t).max() < 1E-4\r\n\r\n\r\n\r\n\r\n@nightly\r\n@require_onnxruntime\r\n@require_torch_gpu\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\treturn (\r\n\t\t\t\t\t\t \"CUDAExecutionProvider\",\r\n\t\t\t\t\t\t {\r\n\t\t\t\t\t\t \"gpu_mem_limit\": \"15000000000\", # 15GB\r\n\t\t\t\t\t\t \"arena_extend_strategy\": \"kSameAsRequested\",\r\n\t\t\t\t\t\t },\r\n\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= ort.SessionOptions()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= False\r\n\t\t\t\t\t\treturn options\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\t# using the PNDM scheduler by default\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= OnnxStableDiffusionPipeline.from_pretrained(\r\n\t\t\t\t\t\t \"\"\"CompVis/stable-diffusion-v1-4\"\"\"\t, revision=\"\"\"onnx\"\"\"\t, safety_checker=__A\t, feature_extractor=__A\t, provider=self.gpu_provider\t, sess_options=self.gpu_options\t, )\r\n\t\t\t\t\t\tsd_pipe.set_progress_bar_config(disable=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= \"\"\"A painting of a squirrel eating a burger\"\"\"\r\n\t\t\t\t\t\tnp.random.seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= sd_pipe([prompt]\t, guidance_scale=6.0\t, num_inference_steps=10\t, output_type=\"\"\"np\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= output.images\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= image[0, -3:, -3:, -1]\r\n\r\n\t\t\t\t\t\tassert image.shape == (1, 512, 512, 3)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0]\t)\r\n\r\n\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t).max() < 1E-3\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= DDIMScheduler.from_pretrained(\r\n\t\t\t\t\t\t \"\"\"runwayml/stable-diffusion-v1-5\"\"\"\t, subfolder=\"\"\"scheduler\"\"\"\t, revision=\"\"\"onnx\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= OnnxStableDiffusionPipeline.from_pretrained(\r\n\t\t\t\t\t\t \"\"\"runwayml/stable-diffusion-v1-5\"\"\"\t, revision=\"\"\"onnx\"\"\"\t, scheduler=__A\t, safety_checker=__A\t, feature_extractor=__A\t, provider=self.gpu_provider\t, sess_options=self.gpu_options\t, )\r\n\t\t\t\t\t\tsd_pipe.set_progress_bar_config(disable=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= \"\"\"open neural network exchange\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= np.random.RandomState(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= sd_pipe([prompt]\t, guidance_scale=7.5\t, num_inference_steps=10\t, generator=__A\t, output_type=\"\"\"np\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= output.images\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= image[0, -3:, -3:, -1]\r\n\r\n\t\t\t\t\t\tassert image.shape == (1, 512, 512, 3)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6]\t)\r\n\r\n\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t).max() < 1E-3\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= LMSDiscreteScheduler.from_pretrained(\r\n\t\t\t\t\t\t \"\"\"runwayml/stable-diffusion-v1-5\"\"\"\t, subfolder=\"\"\"scheduler\"\"\"\t, revision=\"\"\"onnx\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= OnnxStableDiffusionPipeline.from_pretrained(\r\n\t\t\t\t\t\t \"\"\"runwayml/stable-diffusion-v1-5\"\"\"\t, revision=\"\"\"onnx\"\"\"\t, scheduler=__A\t, safety_checker=__A\t, feature_extractor=__A\t, provider=self.gpu_provider\t, sess_options=self.gpu_options\t, )\r\n\t\t\t\t\t\tsd_pipe.set_progress_bar_config(disable=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= \"\"\"open neural network exchange\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= np.random.RandomState(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= sd_pipe([prompt]\t, guidance_scale=7.5\t, num_inference_steps=10\t, generator=__A\t, output_type=\"\"\"np\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= output.images\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= image[0, -3:, -3:, -1]\r\n\r\n\t\t\t\t\t\tassert image.shape == (1, 512, 512, 3)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1]\t)\r\n\r\n\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t).max() < 1E-3\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 0\r\n\r\n\t\t\t\t\t\tdef test_callback_fn(__A\t, __A\t, __A\t) -> None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= True\r\n\t\t\t\t\t\t\t\t\tnonlocal number_of_steps\r\n\t\t\t\t\t\t\t\t\tnumber_of_steps += 1\r\n\t\t\t\t\t\t\t\t\tif step == 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\tassert latents.shape == (1, 4, 64, 64)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= latents[0, -3:, -3:, -1]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= np.array(\r\n\t\t\t\t\t\t\t\t\t\t\t\t [-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7]\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\tassert np.abs(latents_slice.flatten() - expected_slice\t).max() < 1E-3\r\n\t\t\t\t\t\t\t\t\telif step == 5:\r\n\t\t\t\t\t\t\t\t\t\t\t\tassert latents.shape == (1, 4, 64, 64)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= latents[0, -3:, -3:, -1]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= np.array(\r\n\t\t\t\t\t\t\t\t\t\t\t\t [-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5]\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\tassert np.abs(latents_slice.flatten() - expected_slice\t).max() < 1E-3\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= False\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= OnnxStableDiffusionPipeline.from_pretrained(\r\n\t\t\t\t\t\t \"\"\"runwayml/stable-diffusion-v1-5\"\"\"\t, revision=\"\"\"onnx\"\"\"\t, safety_checker=__A\t, feature_extractor=__A\t, provider=self.gpu_provider\t, sess_options=self.gpu_options\t, )\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= \"\"\"Andromeda galaxy in a bottle\"\"\"\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= np.random.RandomState(0\t)\r\n\t\t\t\t\t\tpipe(\r\n\t\t\t\t\t\t prompt=__A\t, num_inference_steps=5\t, guidance_scale=7.5\t, generator=__A\t, callback=__A\t, callback_steps=1\t, )\r\n\t\t\t\t\t\tassert test_callback_fn.has_been_called\r\n\t\t\t\t\t\tassert number_of_steps == 6\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= OnnxStableDiffusionPipeline.from_pretrained(\r\n\t\t\t\t\t\t \"\"\"runwayml/stable-diffusion-v1-5\"\"\"\t, revision=\"\"\"onnx\"\"\"\t, safety_checker=__A\t, feature_extractor=__A\t, provider=self.gpu_provider\t, sess_options=self.gpu_options\t, )\r\n\t\t\t\t\t\tassert isinstance(__A\t, __A\t)\r\n\t\t\t\t\t\tassert pipe.safety_checker is None\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= pipe(\"\"\"example prompt\"\"\"\t, num_inference_steps=2\t).images[0]\r\n\t\t\t\t\t\tassert image is not None\r\n\r\n\t\t\t\t\t\t# check that there's no error when saving a pipeline with one of the models being None\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdirname:\r\n\t\t\t\t\t\t\t\t\tpipe.save_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= OnnxStableDiffusionPipeline.from_pretrained(__A\t)\r\n\r\n\t\t\t\t\t\t# sanity check that the pipeline still works\r\n\t\t\t\t\t\tassert pipe.safety_checker is None\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= pipe(\"\"\"example prompt\"\"\"\t, num_inference_steps=2\t).images[0]\r\n\t\t\t\t\t\tassert image is not None\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport os\r\nfrom math import logaa\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str = \"base_exp.txt\"\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :float \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 0\r\n\t\t\tfor i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__\t\t)\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\t\t)\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= list(map(lowercase__\t\t\t\t,\t\t\t\t\tline.split(\"\"\",\"\"\"\t\t)\t\t)\t\t)\r\n\t\t\t\t\t\tif x * logaa(lowercase__\t\t) > largest:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= x * logaa(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= i + 1\r\n\t\t\treturn result\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tprint(solution())\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":607,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport os\r\nimport shutil\r\nimport tempfile\r\nfrom unittest import TestCase\r\nfrom unittest.mock import patch\r\n\r\nimport numpy as np\r\nfrom datasets import Dataset\r\n\r\nfrom transformers.models.realm.configuration_realm import RealmConfig\r\nfrom transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever\r\nfrom transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tempfile.mkdtemp()\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= 5\r\n\r\n\t\t\t\t\t\t# Realm tok\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= [\r\n\t\t\t\t\t\t \"\"\"[UNK]\"\"\",\r\n\t\t\t\t\t\t \"\"\"[CLS]\"\"\",\r\n\t\t\t\t\t\t \"\"\"[SEP]\"\"\",\r\n\t\t\t\t\t\t \"\"\"[PAD]\"\"\",\r\n\t\t\t\t\t\t \"\"\"[MASK]\"\"\",\r\n\t\t\t\t\t\t \"\"\"test\"\"\",\r\n\t\t\t\t\t\t \"\"\"question\"\"\",\r\n\t\t\t\t\t\t \"\"\"this\"\"\",\r\n\t\t\t\t\t\t \"\"\"is\"\"\",\r\n\t\t\t\t\t\t \"\"\"the\"\"\",\r\n\t\t\t\t\t\t \"\"\"first\"\"\",\r\n\t\t\t\t\t\t \"\"\"second\"\"\",\r\n\t\t\t\t\t\t \"\"\"third\"\"\",\r\n\t\t\t\t\t\t \"\"\"fourth\"\"\",\r\n\t\t\t\t\t\t \"\"\"fifth\"\"\",\r\n\t\t\t\t\t\t \"\"\"record\"\"\",\r\n\t\t\t\t\t\t \"\"\"want\"\"\",\r\n\t\t\t\t\t\t \"\"\"##want\"\"\",\r\n\t\t\t\t\t\t \"\"\"##ed\"\"\",\r\n\t\t\t\t\t\t \"\"\"wa\"\"\",\r\n\t\t\t\t\t\t \"\"\"un\"\"\",\r\n\t\t\t\t\t\t \"\"\"runn\"\"\",\r\n\t\t\t\t\t\t \"\"\"##ing\"\"\",\r\n\t\t\t\t\t\t \"\"\",\"\"\",\r\n\t\t\t\t\t\t \"\"\"low\"\"\",\r\n\t\t\t\t\t\t \"\"\"lowest\"\"\",\r\n\t\t\t\t\t\t]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= os.path.join(self.tmpdirname\t, \"\"\"realm_tokenizer\"\"\"\t)\r\n\t\t\t\t\t\tos.makedirs(__A\t, exist_ok=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= os.path.join(__A\t, VOCAB_FILES_NAMES[\"\"\"vocab_file\"\"\"]\t)\r\n\t\t\t\t\t\twith open(self.vocab_file\t, \"\"\"w\"\"\"\t, encoding=\"\"\"utf-8\"\"\"\t) as vocab_writer:\r\n\t\t\t\t\t\t\t\t\tvocab_writer.write(\"\"\"\"\"\".join([x + \"\"\"\\n\"\"\" for x in vocab_tokens]\t)\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= os.path.join(self.tmpdirname\t, \"\"\"realm_block_records\"\"\"\t)\r\n\t\t\t\t\t\tos.makedirs(__A\t, exist_ok=__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tRealmTokenizer:\r\n\t\t\t\t\t\treturn RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname\t, \"\"\"realm_tokenizer\"\"\"\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tshutil.rmtree(self.tmpdirname\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= RealmConfig(num_block_records=self.num_block_records\t)\r\n\t\t\t\t\t\treturn config\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= Dataset.from_dict(\r\n\t\t\t\t\t\t {\r\n\t\t\t\t\t\t \"\"\"id\"\"\": [\"\"\"0\"\"\", \"\"\"1\"\"\"],\r\n\t\t\t\t\t\t \"\"\"question\"\"\": [\"\"\"foo\"\"\", \"\"\"bar\"\"\"],\r\n\t\t\t\t\t\t \"\"\"answers\"\"\": [[\"\"\"Foo\"\"\", \"\"\"Bar\"\"\"], [\"\"\"Bar\"\"\"]],\r\n\t\t\t\t\t\t }\t)\r\n\t\t\t\t\t\treturn dataset\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= np.array(\r\n\t\t\t\t\t\t [\r\n\t\t\t\t\t\t b\"\"\"This is the first record\"\"\",\r\n\t\t\t\t\t\t b\"\"\"This is the second record\"\"\",\r\n\t\t\t\t\t\t b\"\"\"This is the third record\"\"\",\r\n\t\t\t\t\t\t b\"\"\"This is the fourth record\"\"\",\r\n\t\t\t\t\t\t b\"\"\"This is the fifth record\"\"\",\r\n\t\t\t\t\t\t b\"\"\"This is a longer longer longer record\"\"\",\r\n\t\t\t\t\t\t ]\t, dtype=__A\t, )\r\n\t\t\t\t\t\treturn block_records\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= RealmRetriever(\r\n\t\t\t\t\t\t block_records=self.get_dummy_block_records()\t, tokenizer=self.get_tokenizer()\t, )\r\n\t\t\t\t\t\treturn retriever\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.get_config()\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.get_dummy_retriever()\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= retriever.tokenizer\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= np.array([0, 3]\t, dtype=\"\"\"long\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= tokenizer([\"\"\"Test question\"\"\"]\t).input_ids\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tokenizer(\r\n\t\t\t\t\t\t [\"\"\"the fourth\"\"\"]\t, add_special_tokens=__A\t, return_token_type_ids=__A\t, return_attention_mask=__A\t, ).input_ids\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= config.reader_seq_len\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :str \t\t\t\t\t= retriever(\r\n\t\t\t\t\t\t __A\t, __A\t, answer_ids=__A\t, max_length=__A\t, return_tensors=\"\"\"np\"\"\"\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(len(__A\t)\t, 2\t)\r\n\t\t\t\t\t\tself.assertEqual(len(__A\t)\t, 2\t)\r\n\t\t\t\t\t\tself.assertEqual(len(__A\t)\t, 2\t)\r\n\t\t\t\t\t\tself.assertEqual(concat_inputs.input_ids.shape\t, (2, 10)\t)\r\n\t\t\t\t\t\tself.assertEqual(concat_inputs.attention_mask.shape\t, (2, 10)\t)\r\n\t\t\t\t\t\tself.assertEqual(concat_inputs.token_type_ids.shape\t, (2, 10)\t)\r\n\t\t\t\t\t\tself.assertEqual(concat_inputs.special_tokens_mask.shape\t, (2, 10)\t)\r\n\t\t\t\t\t\tself.assertEqual(\r\n\t\t\t\t\t\t tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]\t)\t, [\"\"\"[CLS]\"\"\", \"\"\"test\"\"\", \"\"\"question\"\"\", \"\"\"[SEP]\"\"\", \"\"\"this\"\"\", \"\"\"is\"\"\", \"\"\"the\"\"\", \"\"\"first\"\"\", \"\"\"record\"\"\", \"\"\"[SEP]\"\"\"]\t, )\r\n\t\t\t\t\t\tself.assertEqual(\r\n\t\t\t\t\t\t tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]\t)\t, [\"\"\"[CLS]\"\"\", \"\"\"test\"\"\", \"\"\"question\"\"\", \"\"\"[SEP]\"\"\", \"\"\"this\"\"\", \"\"\"is\"\"\", \"\"\"the\"\"\", \"\"\"fourth\"\"\", \"\"\"record\"\"\", \"\"\"[SEP]\"\"\"]\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.get_config()\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.get_dummy_retriever()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= retriever.tokenizer\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= np.array([0, 3, 5]\t, dtype=\"\"\"long\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= tokenizer([\"\"\"Test question\"\"\"]\t).input_ids\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= tokenizer(\r\n\t\t\t\t\t\t [\"\"\"the fourth\"\"\", \"\"\"longer longer\"\"\"]\t, add_special_tokens=__A\t, return_token_type_ids=__A\t, return_attention_mask=__A\t, ).input_ids\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= config.reader_seq_len\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Any \t\t\t\t\t= retriever(\r\n\t\t\t\t\t\t __A\t, __A\t, answer_ids=__A\t, max_length=__A\t, return_tensors=\"\"\"np\"\"\"\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual([False, True, True]\t, __A\t)\r\n\t\t\t\t\t\tself.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]]\t, __A\t)\r\n\t\t\t\t\t\tself.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]]\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.get_dummy_retriever()\r\n\t\t\t\t\t\tretriever.save_pretrained(os.path.join(self.tmpdirname\t, \"\"\"realm_block_records\"\"\"\t)\t)\r\n\r\n\t\t\t\t\t\t# Test local path\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= retriever.from_pretrained(os.path.join(self.tmpdirname\t, \"\"\"realm_block_records\"\"\"\t)\t)\r\n\t\t\t\t\t\tself.assertEqual(retriever.block_records[0]\t, b\"\"\"This is the first record\"\"\"\t)\r\n\r\n\t\t\t\t\t\t# Test mocked remote path\r\n\t\t\t\t\t\twith patch(\"\"\"transformers.models.realm.retrieval_realm.hf_hub_download\"\"\"\t) as mock_hf_hub_download:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= os.path.join(\r\n\t\t\t\t\t\t\t\t\t os.path.join(self.tmpdirname\t, \"\"\"realm_block_records\"\"\"\t)\t, _REALM_BLOCK_RECORDS_FILENAME\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= RealmRetriever.from_pretrained(\"\"\"google/realm-cc-news-pretrained-openqa\"\"\"\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(retriever.block_records[0]\t, b\"\"\"This is the first record\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport itertools\r\nimport math\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int\t\t) -> bool:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif 1 < number < 4:\r\n\t\t\t\t\t\t# 2 and 3 are primes\r\n\t\t\t\t\t\treturn True\r\n\t\t\telif number < 2 or number % 2 == 0 or number % 3 == 0:\r\n\t\t\t\t\t\t# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes\r\n\t\t\t\t\t\treturn False\r\n\r\n\t\t\t# All primes number are in format of 6k +/- 1\r\n\t\t\tfor i in range(5\t\t\t\t,\t\t\t\t\tint(math.sqrt(lowercase__\t\t) + 1\t\t)\t\t\t\t,\t\t\t\t\t6\t\t):\r\n\t\t\t\t\t\tif number % i == 0 or number % (i + 2) == 0:\r\n\t\t\t\t\t\t\t\t\treturn False\r\n\t\t\treturn True\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> Dict:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 2\r\n\t\t\twhile True:\r\n\t\t\t\t\t\tif is_prime(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\tyield num\r\n\t\t\t\t\t\tnum += 1\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int = 1_0_0_0_1\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\treturn next(itertools.islice(prime_generator()\t\t\t\t,\t\t\t\t\tnth - 1\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tprint(F\"\"\"{solution() = }\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":608,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport argparse\r\nimport json\r\nimport os\r\n\r\nimport fairseq\r\nimport torch\r\nfrom fairseq.data import Dictionary\r\n\r\nfrom transformers import (\r\n UniSpeechConfig,\r\n UniSpeechForCTC,\r\n UniSpeechForPreTraining,\r\n WavaVecaFeatureExtractor,\r\n WavaVecaPhonemeCTCTokenizer,\r\n WavaVecaProcessor,\r\n logging,\r\n)\r\n\r\n\r\nlogging.set_verbosity_info()\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'post_extract_proj': 'feature_projection.projection',\r\n 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',\r\n 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',\r\n 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',\r\n 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',\r\n 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',\r\n 'self_attn_layer_norm': 'encoder.layers.*.layer_norm',\r\n 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',\r\n 'fc2': 'encoder.layers.*.feed_forward.output_dense',\r\n 'final_layer_norm': 'encoder.layers.*.final_layer_norm',\r\n 'encoder.layer_norm': 'encoder.layer_norm',\r\n 'w2v_model.layer_norm': 'feature_projection.layer_norm',\r\n 'quantizer.weight_proj': 'quantizer.weight_proj',\r\n 'quantizer.vars': 'quantizer.codevectors',\r\n 'project_q': 'project_q',\r\n 'final_proj': 'project_hid',\r\n 'w2v_encoder.proj': 'ctc_proj',\r\n 'mask_emb': 'masked_spec_embed',\r\n}\r\n__UpperCAmelCase\t\t\t\t\t\t\t= [\r\n 'ctc_proj',\r\n 'quantizer.weight_proj',\r\n 'quantizer.codevectors',\r\n 'project_q',\r\n 'project_hid',\r\n]\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Dict\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Any\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[str]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[str]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Any\t\t) -> str:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tfor attribute in key.split(\"\"\".\"\"\"\t\t):\r\n\t\t\t\t\t\tif is_finetuned:\r\n\t\t\t\t\t\t\t\t\tif attribute in [\"quantizer\", \"project_q\", \"project_hid\"]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t# those layers are only relevant for pretraining and should be dropped\r\n\t\t\t\t\t\t\t\t\t\t\t\treturn\r\n\r\n\t\t\t\t\t\t\t\t\tif attribute == \"ctc_proj\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= \"\"\"lm_head\"\"\"\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= getattr(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\tif weight_type is not None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= getattr(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t).shape\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= hf_pointer.shape\r\n\r\n\t\t\tassert hf_shape == value.shape, (\r\n\t\t\t f\"\"\"Shape of hf {key + \".\" + weight_type if weight_type is not None else \"\"} is {hf_shape}, but should be\"\"\"\r\n\t\t\t f\"\"\" {value.shape} for {full_name}\"\"\"\r\n\t\t\t)\r\n\r\n\t\t\tif weight_type == \"weight\":\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= value\r\n\t\t\telif weight_type == \"weight_g\":\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= value\r\n\t\t\telif weight_type == \"weight_v\":\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= value\r\n\t\t\telif weight_type == \"bias\":\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= value\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= value\r\n\r\n\t\t\tlogger.info(f\"\"\"{key + \".\" + weight_type if weight_type is not None else \"\"} was initialized from {full_name}.\"\"\"\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Optional[int]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Optional[Any]\t\t) -> Dict:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= []\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= fairseq_model.state_dict()\r\n\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= hf_model.unispeech.feature_extractor\r\n\r\n\t\t\tfor name, value in fairseq_dict.items():\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= False\r\n\t\t\t\t\t\tif \"conv_layers\" in name:\r\n\t\t\t\t\t\t\t\t\tload_conv_layer(\r\n\t\t\t\t\t\t\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\thf_model.config.feat_extract_norm == \"\"\"group\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= True\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tfor key, mapped_key in MAPPING.items():\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= \"\"\"unispeech.\"\"\" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key\r\n\t\t\t\t\t\t\t\t\t\t\t\tif key in name or key.split(\"\"\"w2v_model.\"\"\"\t\t)[-1] == name.split(\"\"\".\"\"\"\t\t)[0]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= True\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif \"*\" in mapped_key:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= name.split(lowercase__\t\t)[0].split(\"\"\".\"\"\"\t\t)[-2]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= mapped_key.replace(\"\"\"*\"\"\"\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif \"weight_g\" in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= \"\"\"weight_g\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif \"weight_v\" in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= \"\"\"weight_v\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif \"bias\" in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"bias\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif \"weight\" in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# TODO: don't match quantizer.weight_proj\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= \"\"\"weight\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= None\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tset_recursively(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\tif not is_used:\r\n\t\t\t\t\t\t\t\t\tunused_weights.append(lowercase__\t\t)\r\n\r\n\t\t\tlogger.warning(f\"\"\"Unused weights: {unused_weights}\"\"\"\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Any\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Optional[int]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Dict\t\t\t\t,\t\t\t\t\tlowercase__\t\t: str\t\t) -> Dict:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= full_name.split(\"\"\"conv_layers.\"\"\"\t\t)[-1]\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= name.split(\"\"\".\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= int(items[0]\t\t)\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= int(items[1]\t\t)\r\n\r\n\t\t\tif type_id == 0:\r\n\t\t\t\t\t\tif \"bias\" in name:\r\n\t\t\t\t\t\t\t\t\tassert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (\r\n\t\t\t\t\t\t\t\t\t f\"\"\"{full_name} has size {value.shape}, but\"\"\"\r\n\t\t\t\t\t\t\t\t\t f\"\"\" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.\"\"\"\r\n\t\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= value\r\n\t\t\t\t\t\t\t\t\tlogger.info(f\"\"\"Feat extract conv layer {layer_id} was initialized from {full_name}.\"\"\"\t\t)\r\n\t\t\t\t\t\telif \"weight\" in name:\r\n\t\t\t\t\t\t\t\t\tassert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (\r\n\t\t\t\t\t\t\t\t\t f\"\"\"{full_name} has size {value.shape}, but\"\"\"\r\n\t\t\t\t\t\t\t\t\t f\"\"\" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.\"\"\"\r\n\t\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= value\r\n\t\t\t\t\t\t\t\t\tlogger.info(f\"\"\"Feat extract conv layer {layer_id} was initialized from {full_name}.\"\"\"\t\t)\r\n\t\t\telif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):\r\n\t\t\t\t\t\tif \"bias\" in name:\r\n\t\t\t\t\t\t\t\t\tassert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (\r\n\t\t\t\t\t\t\t\t\t f\"\"\"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was\"\"\"\r\n\t\t\t\t\t\t\t\t\t \" found.\"\r\n\t\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= value\r\n\t\t\t\t\t\t\t\t\tlogger.info(f\"\"\"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.\"\"\"\t\t)\r\n\t\t\t\t\t\telif \"weight\" in name:\r\n\t\t\t\t\t\t\t\t\tassert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (\r\n\t\t\t\t\t\t\t\t\t f\"\"\"{full_name} has size {value.shape}, but\"\"\"\r\n\t\t\t\t\t\t\t\t\t f\"\"\" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.\"\"\"\r\n\t\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= value\r\n\t\t\t\t\t\t\t\t\tlogger.info(f\"\"\"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.\"\"\"\t\t)\r\n\t\t\telse:\r\n\t\t\t\t\t\tunused_weights.append(lowercase__\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n@torch.no_grad()\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Any\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Dict\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Tuple=None\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int=None\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Optional[Any]=True\t\t) -> Any:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif config_path is not None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= UniSpeechConfig.from_pretrained(lowercase__\t\t)\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= UniSpeechConfig()\r\n\r\n\t\t\tif is_finetuned:\r\n\t\t\t\t\t\tif dict_path:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= Dictionary.load_from_json(lowercase__\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# important change bos & pad token id since CTC symbol is and\r\n\t\t\t\t\t\t\t\t\t# not as in fairseq\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= target_dict.pad_index\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= target_dict.bos_index\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= target_dict.eos_index\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= len(target_dict.symbols\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= os.path.join(lowercase__\t\t\t\t,\t\t\t\t\t\"\"\"vocab.json\"\"\"\t\t)\r\n\t\t\t\t\t\t\t\t\tif not os.path.isdir(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tlogger.error(\"\"\"--pytorch_dump_folder_path ({}) should be a directory\"\"\".format(lowercase__\t\t)\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\treturn\r\n\t\t\t\t\t\t\t\t\tos.makedirs(lowercase__\t\t\t\t,\t\t\t\t\texist_ok=lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= target_dict.indices\r\n\r\n\t\t\t\t\t\t\t\t\t# fairseq has the and switched\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 4_2\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 4_3\r\n\t\t\t\t\t\t\t\t\twith open(lowercase__\t\t\t\t,\t\t\t\t\t\"\"\"w\"\"\"\t\t\t\t,\t\t\t\t\tencoding=\"\"\"utf-8\"\"\"\t\t) as vocab_handle:\r\n\t\t\t\t\t\t\t\t\t\t\t\tjson.dump(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= WavaVecaPhonemeCTCTokenizer(\r\n\t\t\t\t\t\t\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tunk_token=target_dict.unk_word\t\t\t\t,\t\t\t\t\tpad_token=target_dict.pad_word\t\t\t\t,\t\t\t\t\tbos_token=target_dict.bos_word\t\t\t\t,\t\t\t\t\teos_token=target_dict.eos_word\t\t\t\t,\t\t\t\t\tword_delimiter_token=\"\"\"|\"\"\"\t\t\t\t,\t\t\t\t\tdo_lower_case=lowercase__\t\t\t\t,\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= True if config.feat_extract_norm == \"\"\"layer\"\"\" else False\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= WavaVecaFeatureExtractor(\r\n\t\t\t\t\t\t\t\t\t feature_size=1\t\t\t\t,\t\t\t\t\tsampling_rate=1_6_0_0_0\t\t\t\t,\t\t\t\t\tpadding_value=0\t\t\t\t,\t\t\t\t\tdo_normalize=lowercase__\t\t\t\t,\t\t\t\t\treturn_attention_mask=lowercase__\t\t\t\t,\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= WavaVecaProcessor(feature_extractor=lowercase__\t\t\t\t,\t\t\t\t\ttokenizer=lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tprocessor.save_pretrained(lowercase__\t\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= UniSpeechForCTC(lowercase__\t\t)\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= UniSpeechForPreTraining(lowercase__\t\t)\r\n\r\n\t\t\tif is_finetuned:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Any \t\t\t\t\t= fairseq.checkpoint_utils.load_model_ensemble_and_task(\r\n\t\t\t\t\t\t [checkpoint_path]\t\t\t\t,\t\t\t\t\targ_overrides={\"\"\"data\"\"\": \"\"\"/\"\"\".join(dict_path.split(\"\"\"/\"\"\"\t\t)[:-1]\t\t), \"\"\"w2v_path\"\"\": checkpoint_path}\t\t)\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :int \t\t\t\t\t= fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path]\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= model[0].eval()\r\n\r\n\t\t\trecursively_load_weights(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\thf_unispeech.save_pretrained(lowercase__\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= argparse.ArgumentParser()\r\n\t\t\t\tparser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')\r\n\t\t\t\tparser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')\r\n\t\t\t\tparser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')\r\n\t\t\t\tparser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')\r\n\t\t\t\tparser.add_argument(\r\n\t\t\t\t '--not_finetuned', action="https://huggingface.co/datasets/infinityofspace/python_codestyles-single-500/viewer/default/store_true", help='Whether the model to convert is a fine-tuned model or not'\r\n\t\t\t\t)\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= parser.parse_args()\r\n\t\t\t\tconvert_unispeech_checkpoint(\r\n\t\t\t\t args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned\r\n\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int = 5_0\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [1] * (length + 1)\r\n\r\n\t\t\tfor row_length in range(3\t\t\t\t,\t\t\t\t\tlength + 1\t\t):\r\n\t\t\t\t\t\tfor block_length in range(3\t\t\t\t,\t\t\t\t\trow_length + 1\t\t):\r\n\t\t\t\t\t\t\t\t\tfor block_start in range(row_length - block_length\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tways_number[row_length] += ways_number[\r\n\t\t\t\t\t\t\t\t\t\t\t\t row_length - block_start - block_length - 1\r\n\t\t\t\t\t\t\t\t\t\t\t\t]\r\n\r\n\t\t\t\t\t\t\t\t\tways_number[row_length] += 1\r\n\r\n\t\t\treturn ways_number[length]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tprint(F\"\"\"{solution() = }\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":609,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport math\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> None:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= input(\"\"\"Enter message: \"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= int(input(f\"\"\"Enter key [2-{len(lowercase__\t\t) - 1}]: \"\"\"\t\t)\t\t)\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= input(\"\"\"Encryption/Decryption [e/d]: \"\"\"\t\t)\r\n\r\n\t\t\tif mode.lower().startswith(\"\"\"e\"\"\"\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= encrypt_message(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\telif mode.lower().startswith(\"\"\"d\"\"\"\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= decrypt_message(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Append pipe symbol (vertical bar) to identify spaces at the end.\r\n\t\t\tprint(f\"\"\"Output:\\n{text + \"|\"}\"\"\"\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int\t\t\t\t,\t\t\t\t\tlowercase__\t\t: str\t\t) -> str:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= [\"\"\"\"\"\"] * key\r\n\t\t\tfor col in range(lowercase__\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= col\r\n\t\t\t\t\t\twhile pointer < len(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\tcipher_text[col] += message[pointer]\r\n\t\t\t\t\t\t\t\t\tpointer += key\r\n\t\t\treturn \"\".join(lowercase__\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int\t\t\t\t,\t\t\t\t\tlowercase__\t\t: str\t\t) -> str:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= math.ceil(len(lowercase__\t\t) / key\t\t)\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= key\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= (num_cols * num_rows) - len(lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= [\"\"\"\"\"\"] * num_cols\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= 0\r\n\r\n\t\t\tfor symbol in message:\r\n\t\t\t\t\t\tplain_text[col] += symbol\r\n\t\t\t\t\t\tcol += 1\r\n\r\n\t\t\t\t\t\tif (\r\n\t\t\t\t\t\t (col == num_cols)\r\n\t\t\t\t\t\t or (col == num_cols - 1)\r\n\t\t\t\t\t\t and (row >= num_rows - num_shaded_boxes)\r\n\t\t\t\t\t\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= 0\r\n\t\t\t\t\t\t\t\t\trow += 1\r\n\r\n\t\t\treturn \"\".join(lowercase__\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\t\t\t\tmain()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/\r\n\r\nimport gc\r\nimport random\r\nimport tempfile\r\nimport unittest\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom PIL import Image\r\nfrom transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer\r\n\r\nfrom diffusers import (\r\n AutoencoderKL,\r\n ControlNetModel,\r\n DDIMScheduler,\r\n StableDiffusionControlNetImgaImgPipeline,\r\n UNetaDConditionModel,\r\n)\r\nfrom diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel\r\nfrom diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device\r\nfrom diffusers.utils.import_utils import is_xformers_available\r\nfrom diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu\r\n\r\nfrom ..pipeline_params import (\r\n IMAGE_TO_IMAGE_IMAGE_PARAMS,\r\n TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,\r\n TEXT_GUIDED_IMAGE_VARIATION_PARAMS,\r\n)\r\nfrom ..test_pipelines_common import (\r\n PipelineKarrasSchedulerTesterMixin,\r\n PipelineLatentTesterMixin,\r\n PipelineTesterMixin,\r\n)\r\n\r\n\r\nenable_full_determinism()\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tA__ ,\t\t\tA__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tStableDiffusionControlNetImgaImgPipeline\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_PARAMS - {\"height\", \"width\"}\r\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\tIMAGE_TO_IMAGE_IMAGE_PARAMS.union({\"control_image\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[Any] \t\t\t=\t\t\t\t\t\tIMAGE_TO_IMAGE_IMAGE_PARAMS\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= UNetaDConditionModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, sample_size=32\t, in_channels=4\t, out_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, up_block_types=(\"\"\"CrossAttnUpBlock2D\"\"\", \"\"\"UpBlock2D\"\"\")\t, cross_attention_dim=32\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= ControlNetModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, in_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, cross_attention_dim=32\t, conditioning_embedding_out_channels=(16, 32)\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= DDIMScheduler(\r\n\t\t\t\t\t\t beta_start=0.0_0_0_8_5\t, beta_end=0.0_1_2\t, beta_schedule=\"\"\"scaled_linear\"\"\"\t, clip_sample=__A\t, set_alpha_to_one=__A\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoencoderKL(\r\n\t\t\t\t\t\t block_out_channels=[32, 64]\t, in_channels=3\t, out_channels=3\t, down_block_types=[\"\"\"DownEncoderBlock2D\"\"\", \"\"\"DownEncoderBlock2D\"\"\"]\t, up_block_types=[\"\"\"UpDecoderBlock2D\"\"\", \"\"\"UpDecoderBlock2D\"\"\"]\t, latent_channels=4\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= CLIPTextConfig(\r\n\t\t\t\t\t\t bos_token_id=0\t, eos_token_id=2\t, hidden_size=32\t, intermediate_size=37\t, layer_norm_eps=1E-05\t, num_attention_heads=4\t, num_hidden_layers=5\t, pad_token_id=1\t, vocab_size=1000\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= CLIPTextModel(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= CLIPTokenizer.from_pretrained(\"\"\"hf-internal-testing/tiny-random-clip\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"unet\"\"\": unet,\r\n\t\t\t\t\t\t \"\"\"controlnet\"\"\": controlnet,\r\n\t\t\t\t\t\t \"\"\"scheduler\"\"\": scheduler,\r\n\t\t\t\t\t\t \"\"\"vae\"\"\": vae,\r\n\t\t\t\t\t\t \"\"\"text_encoder\"\"\": text_encoder,\r\n\t\t\t\t\t\t \"\"\"tokenizer\"\"\": tokenizer,\r\n\t\t\t\t\t\t \"\"\"safety_checker\"\"\": None,\r\n\t\t\t\t\t\t \"\"\"feature_extractor\"\"\": None,\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\treturn components\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=0\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tif str(__A\t).startswith(\"\"\"mps\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.manual_seed(__A\t)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= torch.Generator(device=__A\t).manual_seed(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 2\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= randn_tensor(\r\n\t\t\t\t\t\t (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor)\t, generator=__A\t, device=torch.device(__A\t)\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= floats_tensor(control_image.shape\t, rng=random.Random(__A\t)\t).to(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= image.cpu().permute(0\t, 2\t, 3\t, 1\t)[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= Image.fromarray(np.uinta(__A\t)\t).convert(\"\"\"RGB\"\"\"\t).resize((64, 64)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"prompt\"\"\": \"\"\"A painting of a squirrel eating a burger\"\"\",\r\n\t\t\t\t\t\t \"\"\"generator\"\"\": generator,\r\n\t\t\t\t\t\t \"\"\"num_inference_steps\"\"\": 2,\r\n\t\t\t\t\t\t \"\"\"guidance_scale\"\"\": 6.0,\r\n\t\t\t\t\t\t \"\"\"output_type\"\"\": \"\"\"numpy\"\"\",\r\n\t\t\t\t\t\t \"\"\"image\"\"\": image,\r\n\t\t\t\t\t\t \"\"\"control_image\"\"\": control_image,\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\treturn inputs\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn self._test_attention_slicing_forward_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@unittest.skipIf(\r\n\t\t\t torch_device != \"\"\"cuda\"\"\" or not is_xformers_available()\t, reason=\"\"\"XFormers attention is only available with CUDA and `xformers` installed\"\"\"\t, )\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tself._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tself._test_inference_batch_single_identical(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tA__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\tStableDiffusionControlNetImgaImgPipeline\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_PARAMS - {\"height\", \"width\"}\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tfrozenset([]\t\t\t\t\t\t\t) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= UNetaDConditionModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, sample_size=32\t, in_channels=4\t, out_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, up_block_types=(\"\"\"CrossAttnUpBlock2D\"\"\", \"\"\"UpBlock2D\"\"\")\t, cross_attention_dim=32\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\r\n\t\t\t\t\t\tdef init_weights(__A\t):\r\n\t\t\t\t\t\t\t\t\tif isinstance(__A\t, torch.nn.Convad\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\ttorch.nn.init.normal(m.weight\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tm.bias.data.fill_(1.0\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= ControlNetModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, in_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, cross_attention_dim=32\t, conditioning_embedding_out_channels=(16, 32)\t, )\r\n\t\t\t\t\t\tcontrolneta.controlnet_down_blocks.apply(__A\t)\r\n\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= ControlNetModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, in_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, cross_attention_dim=32\t, conditioning_embedding_out_channels=(16, 32)\t, )\r\n\t\t\t\t\t\tcontrolneta.controlnet_down_blocks.apply(__A\t)\r\n\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= DDIMScheduler(\r\n\t\t\t\t\t\t beta_start=0.0_0_0_8_5\t, beta_end=0.0_1_2\t, beta_schedule=\"\"\"scaled_linear\"\"\"\t, clip_sample=__A\t, set_alpha_to_one=__A\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= AutoencoderKL(\r\n\t\t\t\t\t\t block_out_channels=[32, 64]\t, in_channels=3\t, out_channels=3\t, down_block_types=[\"\"\"DownEncoderBlock2D\"\"\", \"\"\"DownEncoderBlock2D\"\"\"]\t, up_block_types=[\"\"\"UpDecoderBlock2D\"\"\", \"\"\"UpDecoderBlock2D\"\"\"]\t, latent_channels=4\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= CLIPTextConfig(\r\n\t\t\t\t\t\t bos_token_id=0\t, eos_token_id=2\t, hidden_size=32\t, intermediate_size=37\t, layer_norm_eps=1E-05\t, num_attention_heads=4\t, num_hidden_layers=5\t, pad_token_id=1\t, vocab_size=1000\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= CLIPTextModel(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= CLIPTokenizer.from_pretrained(\"\"\"hf-internal-testing/tiny-random-clip\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= MultiControlNetModel([controlneta, controlneta]\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"unet\"\"\": unet,\r\n\t\t\t\t\t\t \"\"\"controlnet\"\"\": controlnet,\r\n\t\t\t\t\t\t \"\"\"scheduler\"\"\": scheduler,\r\n\t\t\t\t\t\t \"\"\"vae\"\"\": vae,\r\n\t\t\t\t\t\t \"\"\"text_encoder\"\"\": text_encoder,\r\n\t\t\t\t\t\t \"\"\"tokenizer\"\"\": tokenizer,\r\n\t\t\t\t\t\t \"\"\"safety_checker\"\"\": None,\r\n\t\t\t\t\t\t \"\"\"feature_extractor\"\"\": None,\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\treturn components\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=0\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tif str(__A\t).startswith(\"\"\"mps\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= torch.manual_seed(__A\t)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.Generator(device=__A\t).manual_seed(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= 2\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= [\r\n\t\t\t\t\t\t randn_tensor(\r\n\t\t\t\t\t\t (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor)\t, generator=__A\t, device=torch.device(__A\t)\t, ),\r\n\t\t\t\t\t\t randn_tensor(\r\n\t\t\t\t\t\t (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor)\t, generator=__A\t, device=torch.device(__A\t)\t, ),\r\n\t\t\t\t\t\t]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= floats_tensor(control_image[0].shape\t, rng=random.Random(__A\t)\t).to(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= image.cpu().permute(0\t, 2\t, 3\t, 1\t)[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= Image.fromarray(np.uinta(__A\t)\t).convert(\"\"\"RGB\"\"\"\t).resize((64, 64)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"prompt\"\"\": \"\"\"A painting of a squirrel eating a burger\"\"\",\r\n\t\t\t\t\t\t \"\"\"generator\"\"\": generator,\r\n\t\t\t\t\t\t \"\"\"num_inference_steps\"\"\": 2,\r\n\t\t\t\t\t\t \"\"\"guidance_scale\"\"\": 6.0,\r\n\t\t\t\t\t\t \"\"\"output_type\"\"\": \"\"\"numpy\"\"\",\r\n\t\t\t\t\t\t \"\"\"image\"\"\": image,\r\n\t\t\t\t\t\t \"\"\"control_image\"\"\": control_image,\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\treturn inputs\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.get_dummy_components()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.pipeline_class(**__A\t)\r\n\t\t\t\t\t\tpipe.to(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 1_0.0\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 4\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= pipe(**__A\t)[0]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= pipe(**__A\t, control_guidance_start=0.1\t, control_guidance_end=0.2\t)[0]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= pipe(**__A\t, control_guidance_start=[0.1, 0.3]\t, control_guidance_end=[0.2, 0.7]\t)[0]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= pipe(**__A\t, control_guidance_start=0.4\t, control_guidance_end=[0.5, 0.8]\t)[0]\r\n\r\n\t\t\t\t\t\t# make sure that all outputs are different\r\n\t\t\t\t\t\tassert np.sum(np.abs(output_a - output_a\t)\t) > 1E-3\r\n\t\t\t\t\t\tassert np.sum(np.abs(output_a - output_a\t)\t) > 1E-3\r\n\t\t\t\t\t\tassert np.sum(np.abs(output_a - output_a\t)\t) > 1E-3\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\treturn self._test_attention_slicing_forward_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@unittest.skipIf(\r\n\t\t\t torch_device != \"\"\"cuda\"\"\" or not is_xformers_available()\t, reason=\"\"\"XFormers attention is only available with CUDA and `xformers` installed\"\"\"\t, )\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tself._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tself._test_inference_batch_single_identical(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.get_dummy_components()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.pipeline_class(**__A\t)\r\n\t\t\t\t\t\tpipe.to(__A\t)\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__A\t)\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdir:\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\t# save_pretrained is not implemented for Multi-ControlNet\r\n\t\t\t\t\t\t\t\t\t\t\t\tpipe.save_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\texcept NotImplementedError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n@slow\r\n@require_torch_gpu\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tsuper().tearDown()\r\n\t\t\t\t\t\tgc.collect()\r\n\t\t\t\t\t\ttorch.cuda.empty_cache()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= ControlNetModel.from_pretrained(\"\"\"lllyasviel/sd-controlnet-canny\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= StableDiffusionControlNetImgaImgPipeline.from_pretrained(\r\n\t\t\t\t\t\t \"\"\"runwayml/stable-diffusion-v1-5\"\"\"\t, safety_checker=__A\t, controlnet=__A\t)\r\n\t\t\t\t\t\tpipe.enable_model_cpu_offload()\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= torch.Generator(device=\"\"\"cpu\"\"\"\t).manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"evil space-punk bird\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= load_image(\r\n\t\t\t\t\t\t \"\"\"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png\"\"\"\t).resize((512, 512)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= load_image(\r\n\t\t\t\t\t\t \"\"\"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png\"\"\"\t).resize((512, 512)\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= pipe(\r\n\t\t\t\t\t\t __A\t, __A\t, control_image=__A\t, generator=__A\t, output_type=\"\"\"np\"\"\"\t, num_inference_steps=50\t, strength=0.6\t, )\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= output.images[0]\r\n\r\n\t\t\t\t\t\tassert image.shape == (512, 512, 3)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= load_numpy(\r\n\t\t\t\t\t\t \"\"\"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy\"\"\"\t)\r\n\r\n\t\t\t\t\t\tassert np.abs(expected_image - image\t).max() < 9E-2\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":610,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import Optional\r\n\r\nfrom torch import nn\r\n\r\nfrom .transformer_ad import TransformeraDModel, TransformeraDModelOutput\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( nn.Module\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A = 16\t, __A = 88\t, __A = None\t, __A = 1\t, __A = 0.0\t, __A = 32\t, __A = None\t, __A = False\t, __A = None\t, __A = None\t, __A = \"geglu\"\t, __A = None\t, ) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tsuper().__init__()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= nn.ModuleList(\r\n\t\t\t\t\t\t [\r\n\t\t\t\t\t\t TransformeraDModel(\r\n\t\t\t\t\t\t num_attention_heads=__A\t, attention_head_dim=__A\t, in_channels=__A\t, num_layers=__A\t, dropout=__A\t, norm_num_groups=__A\t, cross_attention_dim=__A\t, attention_bias=__A\t, sample_size=__A\t, num_vector_embeds=__A\t, activation_fn=__A\t, num_embeds_ada_norm=__A\t, )\r\n\t\t\t\t\t\t for _ in range(2\t)\r\n\t\t\t\t\t\t ]\t)\r\n\r\n\t\t\t\t\t\t# Variables that can be set by a pipeline:\r\n\r\n\t\t\t\t\t\t# The ratio of transformer1 to transformer2's output states to be combined during inference\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 0.5\r\n\r\n\t\t\t\t\t\t# The shape of `encoder_hidden_states` is expected to be\r\n\t\t\t\t\t\t# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= [77, 257]\r\n\r\n\t\t\t\t\t\t# Which transformer to use to encode which condition.\r\n\t\t\t\t\t\t# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= [1, 0]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A=None\t, __A=None\t, __A=None\t, __A = True\t, ) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= hidden_states\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= []\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= 0\r\n\t\t\t\t\t\t# attention_mask is not used yet\r\n\t\t\t\t\t\tfor i in range(2\t):\r\n\t\t\t\t\t\t\t\t\t# for each of the two transformers, pass the corresponding condition tokens\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.transformer_index_for_condition[i]\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.transformers[transformer_index](\r\n\t\t\t\t\t\t\t\t\t __A\t, encoder_hidden_states=__A\t, timestep=__A\t, cross_attention_kwargs=__A\t, return_dict=__A\t, )[0]\r\n\t\t\t\t\t\t\t\t\tencoded_states.append(encoded_state - input_states\t)\r\n\t\t\t\t\t\t\t\t\ttokens_start += self.condition_lengths[i]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= output_states + input_states\r\n\r\n\t\t\t\t\t\tif not return_dict:\r\n\t\t\t\t\t\t\t\t\treturn (output_states,)\r\n\r\n\t\t\t\t\t\treturn TransformeraDModelOutput(sample=__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import Optional\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom torch import nn\r\nfrom transformers import GPTaConfig, GPTaLMHeadModel\r\nfrom transformers.modeling_utils import ModuleUtilsMixin\r\n\r\nfrom ...configuration_utils import ConfigMixin, register_to_config\r\nfrom ...models import ModelMixin\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tA__ ,\t\t\tA__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\t[r\"h\\.\\d+\\.attn\\.bias\", r\"h\\.\\d+\\.attn\\.masked_bias\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@register_to_config\r\n\t\t\tdef __init__( self\t, __A\t, __A\t, __A = None\t, __A = 5_0257\t, __A = 1024\t, __A = 768\t, __A = 12\t, __A = 12\t, __A = None\t, __A = \"gelu_new\"\t, __A = 0.1\t, __A = 0.1\t, __A = 0.1\t, __A = 1E-5\t, __A = 0.0_2\t, __A = True\t, __A = True\t, __A = False\t, __A = False\t, ) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tsuper().__init__()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= prefix_length\r\n\r\n\t\t\t\t\t\tif prefix_inner_dim != n_embd and prefix_hidden_dim is None:\r\n\t\t\t\t\t\t\t\t\traise ValueError(\r\n\t\t\t\t\t\t\t\t\t f\"\"\"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and\"\"\"\r\n\t\t\t\t\t\t\t\t\t f\"\"\" `n_embd`: {n_embd} are not equal.\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= prefix_inner_dim\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= prefix_hidden_dim\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= (\r\n\t\t\t\t\t\t nn.Linear(self.prefix_inner_dim\t, self.prefix_hidden_dim\t)\r\n\t\t\t\t\t\t if self.prefix_hidden_dim is not None\r\n\t\t\t\t\t\t else nn.Identity()\r\n\t\t\t\t\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= (\r\n\t\t\t\t\t\t nn.Linear(self.prefix_hidden_dim\t, __A\t) if self.prefix_hidden_dim is not None else nn.Identity()\r\n\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= GPTaConfig(\r\n\t\t\t\t\t\t vocab_size=__A\t, n_positions=__A\t, n_embd=__A\t, n_layer=__A\t, n_head=__A\t, n_inner=__A\t, activation_function=__A\t, resid_pdrop=__A\t, embd_pdrop=__A\t, attn_pdrop=__A\t, layer_norm_epsilon=__A\t, initializer_range=__A\t, scale_attn_weights=__A\t, use_cache=__A\t, scale_attn_by_inverse_layer_idx=__A\t, reorder_and_upcast_attn=__A\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= GPTaLMHeadModel(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A = None\t, __A = None\t, ) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.transformer.transformer.wte(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.encode_prefix(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.decode_prefix(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= torch.cat((prefix_embeds, embedding_text)\t, dim=1\t)\r\n\r\n\t\t\t\t\t\tif labels is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.get_dummy_token(input_ids.shape[0]\t, input_ids.device\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= torch.cat((dummy_token, input_ids)\t, dim=1\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.transformer(inputs_embeds=__A\t, labels=__A\t, attention_mask=__A\t)\r\n\t\t\t\t\t\tif self.prefix_hidden_dim is not None:\r\n\t\t\t\t\t\t\t\t\treturn out, hidden\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\treturn out\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t) ->\t\t\t\t\ttorch.Tensor:\r\n\t\t\t\t\t\treturn torch.zeros(__A\t, self.prefix_length\t, dtype=torch.intaa\t, device=__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\treturn self.encode_prefix(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@torch.no_grad()\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.split(__A\t, 1\t, dim=0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= []\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= []\r\n\t\t\t\t\t\tfor feature in features:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.decode_prefix(feature.to(__A\t)\t) # back to the clip feature\r\n\t\t\t\t\t\t\t\t\t# Only support beam search for now\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.generate_beam(\r\n\t\t\t\t\t\t\t\t\t input_embeds=__A\t, device=__A\t, eos_token_id=__A\t)\r\n\t\t\t\t\t\t\t\t\tgenerated_tokens.append(output_tokens[0]\t)\r\n\t\t\t\t\t\t\t\t\tgenerated_seq_lengths.append(seq_lengths[0]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.stack(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= torch.stack(__A\t)\r\n\t\t\t\t\t\treturn generated_tokens, generated_seq_lengths\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@torch.no_grad()\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A=None\t, __A=None\t, __A=None\t, __A = 5\t, __A = 67\t, __A = 1.0\t, __A = None\t, ) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= eos_token_id\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= None\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= None\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= torch.ones(__A\t, device=__A\t, dtype=torch.int\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= torch.zeros(__A\t, device=__A\t, dtype=torch.bool\t)\r\n\r\n\t\t\t\t\t\tif input_embeds is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= input_embeds\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.transformer.transformer.wte(__A\t)\r\n\r\n\t\t\t\t\t\tfor i in range(__A\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.transformer(inputs_embeds=__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= outputs.logits\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= logits[:, -1, :] / (temperature if temperature > 0 else 1.0)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= logits.softmax(-1\t).log()\r\n\r\n\t\t\t\t\t\t\t\t\tif scores is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Any \t\t\t\t\t= logits.topk(__A\t, -1\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= generated.expand(__A\t, *generated.shape[1:]\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[str] \t\t\t\t\t= next_tokens.permute(1\t, 0\t), scores.squeeze(0\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tif tokens is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= next_tokens\r\n\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tokens.expand(__A\t, *tokens.shape[1:]\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= torch.cat((tokens, next_tokens)\t, dim=1\t)\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= -float(np.inf\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 0\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= scores[:, None] + logits\r\n\t\t\t\t\t\t\t\t\t\t\t\tseq_lengths[~is_stopped] += 1\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= scores_sum / seq_lengths[:, None]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Tuple \t\t\t\t\t= scores_sum_average.view(-1\t).topk(__A\t, -1\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= next_tokens // scores_sum.shape[1]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= seq_lengths[next_tokens_source]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= next_tokens % scores_sum.shape[1]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= next_tokens.unsqueeze(1\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokens[next_tokens_source]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.cat((tokens, next_tokens)\t, dim=1\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= generated[next_tokens_source]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= scores_sum_average * seq_lengths\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= is_stopped[next_tokens_source]\r\n\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.transformer.transformer.wte(next_tokens.squeeze()\t).view(generated.shape[0]\t, 1\t, -1\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.cat((generated, next_token_embed)\t, dim=1\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= is_stopped + next_tokens.eq(__A\t).squeeze()\r\n\t\t\t\t\t\t\t\t\tif is_stopped.all():\r\n\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= scores / seq_lengths\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= scores.argsort(descending=__A\t)\r\n\t\t\t\t\t\t# tokens tensors are already padded to max_seq_length\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= [tokens[i] for i in order]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= torch.stack(__A\t, dim=0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.tensor([seq_lengths[i] for i in order]\t, dtype=seq_lengths.dtype\t)\r\n\t\t\t\t\t\treturn output_texts, seq_lengths\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":611,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'configuration_mobilenet_v2': [\r\n 'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',\r\n 'MobileNetV2Config',\r\n 'MobileNetV2OnnxConfig',\r\n ],\r\n}\r\n\r\ntry:\r\n\t\t\t\tif not is_vision_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= ['MobileNetV2FeatureExtractor']\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= ['MobileNetV2ImageProcessor']\r\n\r\n\r\ntry:\r\n\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\r\n\t\t\t\t 'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',\r\n\t\t\t\t 'MobileNetV2ForImageClassification',\r\n\t\t\t\t 'MobileNetV2ForSemanticSegmentation',\r\n\t\t\t\t 'MobileNetV2Model',\r\n\t\t\t\t 'MobileNetV2PreTrainedModel',\r\n\t\t\t\t 'load_tf_weights_in_mobilenet_v2',\r\n\t\t\t\t]\r\n\r\n\r\nif TYPE_CHECKING:\r\n\t\t\t\tfrom .configuration_mobilenet_va import (\r\n\t\t\t\t MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,\r\n\t\t\t\t MobileNetVaConfig,\r\n\t\t\t\t MobileNetVaOnnxConfig,\r\n\t\t\t\t)\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_vision_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor\r\n\t\t\t\t\t\t\t\tfrom .image_processing_mobilenet_va import MobileNetVaImageProcessor\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .modeling_mobilenet_va import (\r\n\t\t\t\t\t\t\t\t MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t MobileNetVaForImageClassification,\r\n\t\t\t\t\t\t\t\t MobileNetVaForSemanticSegmentation,\r\n\t\t\t\t\t\t\t\t MobileNetVaModel,\r\n\t\t\t\t\t\t\t\t MobileNetVaPreTrainedModel,\r\n\t\t\t\t\t\t\t\t load_tf_weights_in_mobilenet_va,\r\n\t\t\t\t\t\t\t\t)\r\n\r\n\r\nelse:\r\n\t\t\t\timport sys\r\n\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport copy\r\nfrom collections import OrderedDict\r\nfrom typing import Dict, Mapping\r\n\r\nfrom packaging import version\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...onnx import OnnxConfig\r\nfrom ...utils import logging\r\nfrom ..auto import CONFIG_MAPPING\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',\r\n # See all DETR models at https://huggingface.co/models?filter=detr\r\n}\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\t\"detr\"\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\t[\"past_key_values\"]\r\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\t{\r\n\t\t\t \"hidden_size\": \"d_model\",\r\n\t\t\t \"num_attention_heads\": \"encoder_attention_heads\",\r\n\t\t\t}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A=True\t, __A=None\t, __A=3\t, __A=100\t, __A=6\t, __A=2048\t, __A=8\t, __A=6\t, __A=2048\t, __A=8\t, __A=0.0\t, __A=0.0\t, __A=True\t, __A=\"relu\"\t, __A=256\t, __A=0.1\t, __A=0.0\t, __A=0.0\t, __A=0.0_2\t, __A=1.0\t, __A=False\t, __A=\"sine\"\t, __A=\"resnet50\"\t, __A=True\t, __A=False\t, __A=1\t, __A=5\t, __A=2\t, __A=1\t, __A=1\t, __A=5\t, __A=2\t, __A=0.1\t, **__A\t, ) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tif backbone_config is not None and use_timm_backbone:\r\n\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"You can't specify both `backbone_config` and `use_timm_backbone`.\"\"\"\t)\r\n\r\n\t\t\t\t\t\tif not use_timm_backbone:\r\n\t\t\t\t\t\t\t\t\tif backbone_config is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlogger.info(\"\"\"`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= CONFIG_MAPPING[\"\"\"resnet\"\"\"](out_features=[\"\"\"stage4\"\"\"]\t)\r\n\t\t\t\t\t\t\t\t\telif isinstance(__A\t, __A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= backbone_config.get(\"\"\"model_type\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= CONFIG_MAPPING[backbone_model_type]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= config_class.from_dict(__A\t)\r\n\t\t\t\t\t\t\t\t\t# set timm attributes to None\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= None, None, None\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= use_timm_backbone\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= backbone_config\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= num_channels\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= num_queries\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= d_model\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= encoder_ffn_dim\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= encoder_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= encoder_attention_heads\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= decoder_ffn_dim\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= decoder_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= decoder_attention_heads\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= dropout\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= attention_dropout\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= activation_dropout\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= activation_function\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= init_std\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= init_xavier_std\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= encoder_layerdrop\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= decoder_layerdrop\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= encoder_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= auxiliary_loss\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= position_embedding_type\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= backbone\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= use_pretrained_backbone\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= dilation\r\n\t\t\t\t\t\t# Hungarian matcher\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= class_cost\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= bbox_cost\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= giou_cost\r\n\t\t\t\t\t\t# Loss coefficients\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= mask_loss_coefficient\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= dice_loss_coefficient\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= bbox_loss_coefficient\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= giou_loss_coefficient\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= eos_coefficient\r\n\t\t\t\t\t\tsuper().__init__(is_encoder_decoder=__A\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn self.encoder_attention_heads\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn self.d_model\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@classmethod\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( cls\t, __A\t, **__A\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\treturn cls(backbone_config=__A\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict[str, any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= copy.deepcopy(self.__dict__\t)\r\n\t\t\t\t\t\tif output[\"backbone_config\"] is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.backbone_config.to_dict()\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.__class__.model_type\r\n\t\t\t\t\t\treturn output\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[Any] \t\t\t=\t\t\t\t\t\tversion.parse(\"1.11\"\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tMapping[str, Mapping[int, str]]:\r\n\t\t\t\t\t\treturn OrderedDict(\r\n\t\t\t\t\t\t [\r\n\t\t\t\t\t\t (\"\"\"pixel_values\"\"\", {0: \"\"\"batch\"\"\", 1: \"\"\"num_channels\"\"\", 2: \"\"\"height\"\"\", 3: \"\"\"width\"\"\"}),\r\n\t\t\t\t\t\t (\"\"\"pixel_mask\"\"\", {0: \"\"\"batch\"\"\"}),\r\n\t\t\t\t\t\t ]\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tfloat:\r\n\t\t\t\t\t\treturn 1E-5\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn 12\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":612,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport unittest\r\nfrom pathlib import Path\r\nfrom tempfile import TemporaryDirectory\r\n\r\nfrom transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available\r\nfrom transformers.models.gpta.tokenization_gpta import GPTaTokenizer\r\nfrom transformers.testing_utils import require_keras_nlp, require_tf, slow\r\n\r\n\r\nif is_tf_available():\r\n\t\t\t\timport tensorflow as tf\r\n\r\nif is_keras_nlp_available():\r\n\t\t\t\tfrom transformers.models.gpta import TFGPTaTokenizer\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= ['gpt2']\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 'gpt2'\r\n\r\nif is_tf_available():\r\n\r\n\r\n\r\n\r\n\t\t\t\tclass _SCREAMING_SNAKE_CASE ( tf.Module\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tdef __init__( self\t, __A\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\t\t\t\t\tsuper().__init__()\r\n\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer\r\n\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= AutoConfig.from_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= TFGPTaLMHeadModel.from_config(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t@tf.function(input_signature=(tf.TensorSpec((None,)\t, tf.string\t, name=\"\"\"text\"\"\"\t),)\t)\r\n\t\t\t\t\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.tokenizer(__A\t)\r\n\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tokenized[\"\"\"input_ids\"\"\"].to_tensor()\r\n\r\n\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= tf.cast(input_ids_dense > 0\t, tf.intaa\t)\r\n\t\t\t\t\t\t\t\t\t\t# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])\r\n\r\n\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.model(input_ids=__A\t, attention_mask=__A\t)[\"\"\"logits\"\"\"]\r\n\r\n\t\t\t\t\t\t\t\t\t\treturn outputs\r\n\r\n\r\n\r\n\r\n@require_tf\r\n@require_keras_nlp\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tsuper().setUp()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= [GPTaTokenizer.from_pretrained(__A\t) for checkpoint in (TOKENIZER_CHECKPOINTS)]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= [TFGPTaTokenizer.from_pretrained(__A\t) for checkpoint in TOKENIZER_CHECKPOINTS]\r\n\t\t\t\t\t\tassert len(self.tokenizers\t) == len(self.tf_tokenizers\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= [\r\n\t\t\t\t\t\t \"\"\"This is a straightforward English test sentence.\"\"\",\r\n\t\t\t\t\t\t \"\"\"This one has some weird characters\\rto\\nsee\\r\\nif those\\u00E9break things.\"\"\",\r\n\t\t\t\t\t\t \"\"\"Now we're going to add some Chinese: ไธ€ ไบŒ ไธ‰ ไธ€ไบŒไธ‰\"\"\",\r\n\t\t\t\t\t\t \"\"\"And some much more rare Chinese: ้ฝ‰ ๅ ƒ ้ฝ‰ๅ ƒ\"\"\",\r\n\t\t\t\t\t\t \"\"\"Je vais aussi รฉcrire en franรงais pour tester les accents\"\"\",\r\n\t\t\t\t\t\t \"\"\"Classical Irish also has some unusual characters, so in they go: Gaelaฤ‹, ๊ผ\"\"\",\r\n\t\t\t\t\t\t]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= list(zip(self.test_sentences\t, self.test_sentences[::-1]\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tfor tokenizer, tf_tokenizer in zip(self.tokenizers\t, self.tf_tokenizers\t):\r\n\t\t\t\t\t\t\t\t\tfor test_inputs in self.test_sentences:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= tokenizer([test_inputs]\t, return_tensors=\"\"\"tf\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tf_tokenizer([test_inputs]\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\tfor key in python_outputs.keys():\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# convert them to numpy to avoid messing with ragged tensors\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= python_outputs[key].numpy()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= tf_outputs[key].numpy()\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape\t)\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(tf.reduce_all(tf.cast(__A\t, tf.intaa\t) == tf_outputs_values\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tfor tf_tokenizer in self.tf_tokenizers:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tf.function(__A\t)\r\n\t\t\t\t\t\t\t\t\tfor test_inputs in self.test_sentences:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tf.constant(__A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= compiled_tokenizer(__A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tf_tokenizer(__A\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\tfor key in eager_outputs.keys():\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tfor tf_tokenizer in self.tf_tokenizers:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= ModelToSave(tokenizer=__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= tf.convert_to_tensor([self.test_sentences[0]]\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= model.serving(__A\t) # Build model with some sample inputs\r\n\t\t\t\t\t\t\t\t\twith TemporaryDirectory() as tempdir:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= Path(__A\t) / \"\"\"saved.model\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\ttf.saved_model.save(__A\t, __A\t, signatures={\"\"\"serving_default\"\"\": model.serving}\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tf.saved_model.load(__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= loaded_model.signatures[\"\"\"serving_default\"\"\"](__A\t)[\"\"\"output_0\"\"\"]\r\n\t\t\t\t\t\t\t\t\t# We may see small differences because the loaded model is compiled, so we need an epsilon for the test\r\n\t\t\t\t\t\t\t\t\tself.assertTrue(tf.reduce_all(out == loaded_output\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tfor tf_tokenizer in self.tf_tokenizers:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= tf.convert_to_tensor([self.test_sentences[0]]\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tf_tokenizer(__A\t) # Build model with some sample inputs\r\n\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= tf_tokenizer.get_config()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= TFGPTaTokenizer.from_config(__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= model_from_config(__A\t)\r\n\r\n\t\t\t\t\t\t\t\t\tfor key in from_config_output.keys():\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(tf.reduce_all(from_config_output[key] == out[key]\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tfor tf_tokenizer in self.tf_tokenizers:\r\n\t\t\t\t\t\t\t\t\t# for the test to run\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= 12_3123\r\n\r\n\t\t\t\t\t\t\t\t\tfor max_length in [3, 5, 1024]:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tf.convert_to_tensor([self.test_sentences[0]]\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= tf_tokenizer(__A\t, max_length=__A\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= out[\"\"\"input_ids\"\"\"].numpy().shape[1]\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\tassert out_length == max_length\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import (\r\n OptionalDependencyNotAvailable,\r\n _LazyModule,\r\n is_tf_available,\r\n is_torch_available,\r\n is_vision_available,\r\n)\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}\r\n\r\ntry:\r\n\t\t\t\tif not is_vision_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= ['DeiTFeatureExtractor']\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= ['DeiTImageProcessor']\r\n\r\ntry:\r\n\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\r\n\t\t\t\t 'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',\r\n\t\t\t\t 'DeiTForImageClassification',\r\n\t\t\t\t 'DeiTForImageClassificationWithTeacher',\r\n\t\t\t\t 'DeiTForMaskedImageModeling',\r\n\t\t\t\t 'DeiTModel',\r\n\t\t\t\t 'DeiTPreTrainedModel',\r\n\t\t\t\t]\r\n\r\ntry:\r\n\t\t\t\tif not is_tf_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\r\n\t\t\t\t 'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',\r\n\t\t\t\t 'TFDeiTForImageClassification',\r\n\t\t\t\t 'TFDeiTForImageClassificationWithTeacher',\r\n\t\t\t\t 'TFDeiTForMaskedImageModeling',\r\n\t\t\t\t 'TFDeiTModel',\r\n\t\t\t\t 'TFDeiTPreTrainedModel',\r\n\t\t\t\t]\r\n\r\n\r\nif TYPE_CHECKING:\r\n\t\t\t\tfrom .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_vision_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .feature_extraction_deit import DeiTFeatureExtractor\r\n\t\t\t\t\t\t\t\tfrom .image_processing_deit import DeiTImageProcessor\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .modeling_deit import (\r\n\t\t\t\t\t\t\t\t DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t DeiTForImageClassification,\r\n\t\t\t\t\t\t\t\t DeiTForImageClassificationWithTeacher,\r\n\t\t\t\t\t\t\t\t DeiTForMaskedImageModeling,\r\n\t\t\t\t\t\t\t\t DeiTModel,\r\n\t\t\t\t\t\t\t\t DeiTPreTrainedModel,\r\n\t\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_tf_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .modeling_tf_deit import (\r\n\t\t\t\t\t\t\t\t TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t TFDeiTForImageClassification,\r\n\t\t\t\t\t\t\t\t TFDeiTForImageClassificationWithTeacher,\r\n\t\t\t\t\t\t\t\t TFDeiTForMaskedImageModeling,\r\n\t\t\t\t\t\t\t\t TFDeiTModel,\r\n\t\t\t\t\t\t\t\t TFDeiTPreTrainedModel,\r\n\t\t\t\t\t\t\t\t)\r\n\r\n\r\nelse:\r\n\t\t\t\timport sys\r\n\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":613,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\nfrom collections import deque\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :list[dict] \t\t\t\t\t= []\r\n\t\t\t\t\t\tself.adlist.append(\r\n\t\t\t\t\t\t {\"\"\"value\"\"\": \"\"\"\"\"\", \"\"\"next_states\"\"\": [], \"\"\"fail_state\"\"\": 0, \"\"\"output\"\"\": []}\t)\r\n\r\n\t\t\t\t\t\tfor keyword in keywords:\r\n\t\t\t\t\t\t\t\t\tself.add_keyword(__A\t)\r\n\t\t\t\t\t\tself.set_fail_transitions()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t) ->\t\t\t\t\tint | None:\r\n\t\t\t\t\t\tfor state in self.adlist[current_state][\"next_states\"]:\r\n\t\t\t\t\t\t\t\t\tif char == self.adlist[state][\"value\"]:\r\n\t\t\t\t\t\t\t\t\t\t\t\treturn state\r\n\t\t\t\t\t\treturn None\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tNone:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 0\r\n\t\t\t\t\t\tfor character in keyword:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.find_next_state(__A\t, __A\t)\r\n\t\t\t\t\t\t\t\t\tif next_state is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.adlist.append(\r\n\t\t\t\t\t\t\t\t\t\t\t\t {\r\n\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"value\"\"\": character,\r\n\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"next_states\"\"\": [],\r\n\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"fail_state\"\"\": 0,\r\n\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"output\"\"\": [],\r\n\t\t\t\t\t\t\t\t\t\t\t\t }\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.adlist[current_state][\"next_states\"].append(len(self.adlist\t) - 1\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= len(self.adlist\t) - 1\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= next_state\r\n\t\t\t\t\t\tself.adlist[current_state][\"output\"].append(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tNone:\r\n\t\t\t\t\t\tlowerCAmelCase_ :deque \t\t\t\t\t= deque()\r\n\t\t\t\t\t\tfor node in self.adlist[0][\"next_states\"]:\r\n\t\t\t\t\t\t\t\t\tq.append(__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= 0\r\n\t\t\t\t\t\twhile q:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= q.popleft()\r\n\t\t\t\t\t\t\t\t\tfor child in self.adlist[r][\"next_states\"]:\r\n\t\t\t\t\t\t\t\t\t\t\t\tq.append(__A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.adlist[r][\"\"\"fail_state\"\"\"]\r\n\t\t\t\t\t\t\t\t\t\t\t\twhile (\r\n\t\t\t\t\t\t\t\t\t\t\t\t self.find_next_state(__A\t, self.adlist[child][\"\"\"value\"\"\"]\t) is None\r\n\t\t\t\t\t\t\t\t\t\t\t\t and state != 0\r\n\t\t\t\t\t\t\t\t\t\t\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.adlist[state][\"\"\"fail_state\"\"\"]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.find_next_state(\r\n\t\t\t\t\t\t\t\t\t\t\t\t __A\t, self.adlist[child][\"\"\"value\"\"\"]\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tif self.adlist[child][\"fail_state\"] is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 0\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= (\r\n\t\t\t\t\t\t\t\t\t\t\t\t self.adlist[child][\"\"\"output\"\"\"]\r\n\t\t\t\t\t\t\t\t\t\t\t\t + self.adlist[self.adlist[child][\"\"\"fail_state\"\"\"]][\"\"\"output\"\"\"]\r\n\t\t\t\t\t\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tdict[str, list[int]]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :dict \t\t\t\t\t= {} # returns a dict with keywords and list of its occurrences\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 0\r\n\t\t\t\t\t\tfor i in range(len(__A\t)\t):\r\n\t\t\t\t\t\t\t\t\twhile (\r\n\t\t\t\t\t\t\t\t\t self.find_next_state(__A\t, string[i]\t) is None\r\n\t\t\t\t\t\t\t\t\t and current_state != 0\r\n\t\t\t\t\t\t\t\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.adlist[current_state][\"\"\"fail_state\"\"\"]\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.find_next_state(__A\t, string[i]\t)\r\n\t\t\t\t\t\t\t\t\tif next_state is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= 0\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= next_state\r\n\t\t\t\t\t\t\t\t\t\t\t\tfor key in self.adlist[current_state][\"output\"]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif key not in result:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= []\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tresult[key].append(i - len(__A\t) + 1\t)\r\n\t\t\t\t\t\treturn result\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'configuration_squeezebert': [\r\n 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',\r\n 'SqueezeBertConfig',\r\n 'SqueezeBertOnnxConfig',\r\n ],\r\n 'tokenization_squeezebert': ['SqueezeBertTokenizer'],\r\n}\r\n\r\ntry:\r\n\t\t\t\tif not is_tokenizers_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= ['SqueezeBertTokenizerFast']\r\n\r\ntry:\r\n\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\r\n\t\t\t\t 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',\r\n\t\t\t\t 'SqueezeBertForMaskedLM',\r\n\t\t\t\t 'SqueezeBertForMultipleChoice',\r\n\t\t\t\t 'SqueezeBertForQuestionAnswering',\r\n\t\t\t\t 'SqueezeBertForSequenceClassification',\r\n\t\t\t\t 'SqueezeBertForTokenClassification',\r\n\t\t\t\t 'SqueezeBertModel',\r\n\t\t\t\t 'SqueezeBertModule',\r\n\t\t\t\t 'SqueezeBertPreTrainedModel',\r\n\t\t\t\t]\r\n\r\n\r\nif TYPE_CHECKING:\r\n\t\t\t\tfrom .configuration_squeezebert import (\r\n\t\t\t\t SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,\r\n\t\t\t\t SqueezeBertConfig,\r\n\t\t\t\t SqueezeBertOnnxConfig,\r\n\t\t\t\t)\r\n\t\t\t\tfrom .tokenization_squeezebert import SqueezeBertTokenizer\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_tokenizers_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .tokenization_squeezebert_fast import SqueezeBertTokenizerFast\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .modeling_squeezebert import (\r\n\t\t\t\t\t\t\t\t SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t SqueezeBertForMaskedLM,\r\n\t\t\t\t\t\t\t\t SqueezeBertForMultipleChoice,\r\n\t\t\t\t\t\t\t\t SqueezeBertForQuestionAnswering,\r\n\t\t\t\t\t\t\t\t SqueezeBertForSequenceClassification,\r\n\t\t\t\t\t\t\t\t SqueezeBertForTokenClassification,\r\n\t\t\t\t\t\t\t\t SqueezeBertModel,\r\n\t\t\t\t\t\t\t\t SqueezeBertModule,\r\n\t\t\t\t\t\t\t\t SqueezeBertPreTrainedModel,\r\n\t\t\t\t\t\t\t\t)\r\n\r\nelse:\r\n\t\t\t\timport sys\r\n\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":614,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: list\t\t) -> bool:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif not isinstance(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t):\r\n\t\t\t\t\t\traise ValueError(\"\"\"Input series is not valid, valid series - [2, 4, 6]\"\"\"\t\t)\r\n\t\t\tif len(lowercase__\t\t) == 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"Input list must be a non empty list\"\"\"\t\t)\r\n\t\t\tif len(lowercase__\t\t) == 1:\r\n\t\t\t\t\t\treturn True\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= series[1] - series[0]\r\n\t\t\tfor index in range(len(lowercase__\t\t) - 1\t\t):\r\n\t\t\t\t\t\tif series[index + 1] - series[index] != common_diff:\r\n\t\t\t\t\t\t\t\t\treturn False\r\n\t\t\treturn True\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: list\t\t) -> float:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif not isinstance(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t):\r\n\t\t\t\t\t\traise ValueError(\"\"\"Input series is not valid, valid series - [2, 4, 6]\"\"\"\t\t)\r\n\t\t\tif len(lowercase__\t\t) == 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"Input list must be a non empty list\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= 0\r\n\t\t\tfor val in series:\r\n\t\t\t\t\t\tanswer += val\r\n\t\t\treturn answer / len(lowercase__\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 2_56\r\n# Modulus to hash a string\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 1_00_00_03\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str\t\t\t\t,\t\t\t\t\tlowercase__\t\t: str\t\t) -> bool:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= len(lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= len(lowercase__\t\t)\r\n\t\t\tif p_len > t_len:\r\n\t\t\t\t\t\treturn False\r\n\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= 1\r\n\r\n\t\t\t# Calculating the hash of pattern and substring of text\r\n\t\t\tfor i in range(lowercase__\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= (ord(pattern[i]\t\t) + p_hash * alphabet_size) % modulus\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= (ord(text[i]\t\t) + text_hash * alphabet_size) % modulus\r\n\t\t\t\t\t\tif i == p_len - 1:\r\n\t\t\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= (modulus_power * alphabet_size) % modulus\r\n\r\n\t\t\tfor i in range(0\t\t\t\t,\t\t\t\t\tt_len - p_len + 1\t\t):\r\n\t\t\t\t\t\tif text_hash == p_hash and text[i : i + p_len] == pattern:\r\n\t\t\t\t\t\t\t\t\treturn True\r\n\t\t\t\t\t\tif i == t_len - p_len:\r\n\t\t\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\t# Calculate the https://en.wikipedia.org/wiki/Rolling_hash\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= (\r\n\t\t\t\t\t\t (text_hash - ord(text[i]\t\t) * modulus_power) * alphabet_size\r\n\t\t\t\t\t\t + ord(text[i + p_len]\t\t)\r\n\t\t\t\t\t\t) % modulus\r\n\t\t\treturn False\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> None:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"abc1abc12\"\"\"\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= \"\"\"alskfjaldsabc1abc1abc12k23adsfabcabc\"\"\"\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"alskfjaldsk23adsfabcabc\"\"\"\r\n\t\t\tassert rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t) and not rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Test 2)\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= \"\"\"ABABX\"\"\"\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"ABABZABABYABABX\"\"\"\r\n\t\t\tassert rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Test 3)\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= \"\"\"AAAB\"\"\"\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= \"\"\"ABAAAAAB\"\"\"\r\n\t\t\tassert rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Test 4)\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= \"\"\"abcdabcy\"\"\"\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= \"\"\"abcxabcdabxabcdabcdabcy\"\"\"\r\n\t\t\tassert rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Test 5)\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"Lรผ\"\"\"\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"Lรผsai\"\"\"\r\n\t\t\tassert rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"Lue\"\"\"\r\n\t\t\tassert not rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\tprint(\"\"\"Success.\"\"\"\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\ttest_rabin_karp()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":615,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\nimport unittest\r\n\r\nimport numpy as np\r\n\r\nfrom transformers import BlipTextConfig\r\nfrom transformers.testing_utils import require_tf, slow\r\nfrom transformers.utils import is_tf_available\r\n\r\nfrom ...test_configuration_common import ConfigTester\r\nfrom ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask\r\n\r\n\r\nif is_tf_available():\r\n\t\t\t\timport tensorflow as tf\r\n\r\n\t\t\t\tfrom transformers import TFBlipTextModel\r\n\t\t\t\tfrom transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A=12\t, __A=7\t, __A=True\t, __A=True\t, __A=True\t, __A=99\t, __A=32\t, __A=32\t, __A=2\t, __A=4\t, __A=37\t, __A=0.1\t, __A=0.1\t, __A=512\t, __A=0.0_2\t, __A=0\t, __A=None\t, ) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= parent\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= batch_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= seq_length\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= is_training\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= use_input_mask\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= use_labels\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= vocab_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= hidden_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= projection_dim\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= num_hidden_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= num_attention_heads\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= intermediate_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= dropout\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= attention_dropout\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= max_position_embeddings\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= initializer_range\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= scope\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= bos_token_id\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= ids_tensor([self.batch_size, self.seq_length]\t, self.vocab_size\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= None\r\n\t\t\t\t\t\tif self.use_input_mask:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= random_attention_mask([self.batch_size, self.seq_length]\t)\r\n\r\n\t\t\t\t\t\tif input_mask is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= input_mask.numpy()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= input_mask.shape\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= np.random.randint(1\t, seq_length - 1\t, size=(batch_size,)\t)\r\n\t\t\t\t\t\t\t\t\tfor batch_idx, start_index in enumerate(__A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= 1\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= 0\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_config()\r\n\r\n\t\t\t\t\t\treturn config, input_ids, tf.convert_to_tensor(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\treturn BlipTextConfig(\r\n\t\t\t\t\t\t vocab_size=self.vocab_size\t, hidden_size=self.hidden_size\t, projection_dim=self.projection_dim\t, num_hidden_layers=self.num_hidden_layers\t, num_attention_heads=self.num_attention_heads\t, intermediate_size=self.intermediate_size\t, dropout=self.dropout\t, attention_dropout=self.attention_dropout\t, max_position_embeddings=self.max_position_embeddings\t, initializer_range=self.initializer_range\t, bos_token_id=self.bos_token_id\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= TFBlipTextModel(config=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= model(__A\t, attention_mask=__A\t, training=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= model(__A\t, training=__A\t)\r\n\t\t\t\t\t\tself.parent.assertEqual(result.last_hidden_state.shape\t, (self.batch_size, self.seq_length, self.hidden_size)\t)\r\n\t\t\t\t\t\tself.parent.assertEqual(result.pooler_output.shape\t, (self.batch_size, self.hidden_size)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.prepare_config_and_inputs()\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :int \t\t\t\t\t= config_and_inputs\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {\"\"\"input_ids\"\"\": input_ids, \"\"\"attention_mask\"\"\": input_mask}\r\n\t\t\t\t\t\treturn config, inputs_dict\r\n\r\n\r\n\r\n\r\n@require_tf\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Any \t\t\t=\t\t\t\t\t\t(TFBlipTextModel,) if is_tf_available() else ()\r\n\t\t\tUpperCAmelCase_ :Any \t\t\t=\t\t\t\t\t\tFalse\r\n\t\t\tUpperCAmelCase_ :List[Any] \t\t\t=\t\t\t\t\t\tFalse\r\n\t\t\tUpperCAmelCase_ :Optional[Any] \t\t\t=\t\t\t\t\t\tFalse\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= BlipTextModelTester(self\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= ConfigTester(self\t, config_class=__A\t, hidden_size=37\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tself.config_tester.run_common_tests()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\tself.model_tester.create_and_check_model(*__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@unittest.skip(reason=\"\"\"Blip does not use inputs_embeds\"\"\"\t)\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@unittest.skip(reason=\"\"\"BlipTextModel has no base class and is not available in MODEL_MAPPING\"\"\"\t)\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@unittest.skip(reason=\"\"\"BlipTextModel has no base class and is not available in MODEL_MAPPING\"\"\"\t)\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tfor model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= TFBlipTextModel.from_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\tself.assertIsNotNone(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A=True\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tsuper().test_pt_tf_model_equivalence(allow_missing_keys=__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport argparse\r\nimport os\r\n\r\nimport evaluate\r\nimport torch\r\nfrom datasets import load_dataset\r\nfrom torch.optim import AdamW\r\nfrom torch.utils.data import DataLoader\r\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\r\n\r\nfrom accelerate import Accelerator, DistributedType\r\nfrom accelerate.local_sgd import LocalSGD\r\n\r\n\r\n########################################################################\r\n# This is a fully working simple example to use Accelerate\r\n# with LocalSGD, which is a method to synchronize model\r\n# parameters every K batches. It is different, but complementary\r\n# to gradient accumulation.\r\n#\r\n# This example trains a Bert base model on GLUE MRPC\r\n# in any of the following settings (with the same script):\r\n# - single CPU or single GPU\r\n# - multi GPUS (using PyTorch distributed mode)\r\n# - (multi) TPUs\r\n# - fp16 (mixed-precision) or fp32 (normal precision)\r\n#\r\n# To run it in each of these various modes, follow the instructions\r\n# in the readme for examples:\r\n# https://github.com/huggingface/accelerate/tree/main/examples\r\n#\r\n########################################################################\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 16\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 32\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Accelerator\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int = 1_6\t\t) -> str:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoTokenizer.from_pretrained(\"\"\"bert-base-cased\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= load_dataset(\"\"\"glue\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"mrpc\"\"\"\t\t)\r\n\r\n\t\t\tdef tokenize_function(lowercase__\t\t: int\t\t):\r\n\t\t\t\t\t\t# max_length=None => use the model max length (it's actually the default)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= tokenizer(examples[\"\"\"sentence1\"\"\"]\t\t\t\t,\t\t\t\t\texamples[\"\"\"sentence2\"\"\"]\t\t\t\t,\t\t\t\t\ttruncation=lowercase__\t\t\t\t,\t\t\t\t\tmax_length=lowercase__\t\t)\r\n\t\t\t\t\t\treturn outputs\r\n\r\n\t\t\t# Apply the method we just defined to all the examples in all the splits of the dataset\r\n\t\t\t# starting with the main process first:\r\n\t\t\twith accelerator.main_process_first():\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= datasets.map(\r\n\t\t\t\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tbatched=lowercase__\t\t\t\t,\t\t\t\t\tremove_columns=[\"\"\"idx\"\"\", \"\"\"sentence1\"\"\", \"\"\"sentence2\"\"\"]\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\r\n\t\t\t# transformers library\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokenized_datasets.rename_column(\"\"\"label\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"labels\"\"\"\t\t)\r\n\r\n\t\t\tdef collate_fn(lowercase__\t\t: Dict\t\t):\r\n\t\t\t\t\t\t# On TPU it's best to pad everything to the same length or training will be very slow.\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None\r\n\t\t\t\t\t\t# When using mixed precision we want round multiples of 8/16\r\n\t\t\t\t\t\tif accelerator.mixed_precision == \"fp8\":\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 1_6\r\n\t\t\t\t\t\telif accelerator.mixed_precision != \"no\":\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 8\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= None\r\n\r\n\t\t\t\t\t\treturn tokenizer.pad(\r\n\t\t\t\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tpadding=\"\"\"longest\"\"\"\t\t\t\t,\t\t\t\t\tmax_length=lowercase__\t\t\t\t,\t\t\t\t\tpad_to_multiple_of=lowercase__\t\t\t\t,\t\t\t\t\treturn_tensors=\"\"\"pt\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t# Instantiate dataloaders.\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= DataLoader(\r\n\t\t\t tokenized_datasets[\"\"\"train\"\"\"]\t\t\t\t,\t\t\t\t\tshuffle=lowercase__\t\t\t\t,\t\t\t\t\tcollate_fn=lowercase__\t\t\t\t,\t\t\t\t\tbatch_size=lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= DataLoader(\r\n\t\t\t tokenized_datasets[\"\"\"validation\"\"\"]\t\t\t\t,\t\t\t\t\tshuffle=lowercase__\t\t\t\t,\t\t\t\t\tcollate_fn=lowercase__\t\t\t\t,\t\t\t\t\tbatch_size=lowercase__\t\t)\r\n\r\n\t\t\treturn train_dataloader, eval_dataloader\r\n\r\n\r\n# For testing only\r\nif os.environ.get('TESTING_MOCKED_DATALOADERS', None) == \"1\":\r\n\t\t\t\tfrom accelerate.test_utils.training import mocked_dataloaders\r\n\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= mocked_dataloaders # noqa: F811\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: List[Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Optional[int]\t\t) -> Optional[Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif os.environ.get(\"\"\"TESTING_MOCKED_DATALOADERS\"\"\"\t\t\t\t,\t\t\t\t\tlowercase__\t\t) == \"1\":\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= 2\r\n\t\t\t# New Code #\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= int(args.gradient_accumulation_steps\t\t)\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= int(args.local_sgd_steps\t\t)\r\n\t\t\t# Initialize accelerator\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= Accelerator(\r\n\t\t\t cpu=args.cpu\t\t\t\t,\t\t\t\t\tmixed_precision=args.mixed_precision\t\t\t\t,\t\t\t\t\tgradient_accumulation_steps=lowercase__\t\t)\r\n\t\t\tif accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:\r\n\t\t\t\t\t\traise NotImplementedError(\"\"\"LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)\"\"\"\t\t)\r\n\t\t\t# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= config[\"\"\"lr\"\"\"]\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= int(config[\"\"\"num_epochs\"\"\"]\t\t)\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= int(config[\"\"\"seed\"\"\"]\t\t)\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= int(config[\"\"\"batch_size\"\"\"]\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= evaluate.load(\"\"\"glue\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"mrpc\"\"\"\t\t)\r\n\r\n\t\t\tset_seed(lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= get_dataloaders(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t# Instantiate the model (we build the model here so that the seed also control new weights initialization)\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= AutoModelForSequenceClassification.from_pretrained(\"\"\"bert-base-cased\"\"\"\t\t\t\t,\t\t\t\t\treturn_dict=lowercase__\t\t)\r\n\r\n\t\t\t# We could avoid this line since the accelerator is set with `device_placement=True` (default value).\r\n\t\t\t# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\r\n\t\t\t# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= model.to(accelerator.device\t\t)\r\n\r\n\t\t\t# Instantiate optimizer\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= AdamW(params=model.parameters()\t\t\t\t,\t\t\t\t\tlr=lowercase__\t\t)\r\n\r\n\t\t\t# Instantiate scheduler\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= get_linear_schedule_with_warmup(\r\n\t\t\t optimizer=lowercase__\t\t\t\t,\t\t\t\t\tnum_warmup_steps=1_0_0\t\t\t\t,\t\t\t\t\tnum_training_steps=(len(lowercase__\t\t) * num_epochs)\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t# Prepare everything\r\n\t\t\t# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\r\n\t\t\t# prepare method.\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= accelerator.prepare(\r\n\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Now we train the model\r\n\t\t\tfor epoch in range(lowercase__\t\t):\r\n\t\t\t\t\t\tmodel.train()\r\n\t\t\t\t\t\twith LocalSGD(\r\n\t\t\t\t\t\t accelerator=lowercase__\t\t\t\t,\t\t\t\t\tmodel=lowercase__\t\t\t\t,\t\t\t\t\tlocal_sgd_steps=lowercase__\t\t\t\t,\t\t\t\t\tenabled=local_sgd_steps is not None\t\t) as local_sgd:\r\n\t\t\t\t\t\t\t\t\tfor step, batch in enumerate(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t# We could avoid this line since we set the accelerator with `device_placement=True`.\r\n\t\t\t\t\t\t\t\t\t\t\t\tbatch.to(accelerator.device\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t# New code #\r\n\t\t\t\t\t\t\t\t\t\t\t\t# We use the new `accumulate` context manager to perform gradient accumulation\r\n\t\t\t\t\t\t\t\t\t\t\t\t# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.\r\n\t\t\t\t\t\t\t\t\t\t\t\twith accelerator.accumulate(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= model(**lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= output.loss\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\taccelerator.backward(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toptimizer.step()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlr_scheduler.step()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toptimizer.zero_grad()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# LocalSGD-specific line\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlocal_sgd.step()\r\n\r\n\t\t\t\t\t\tmodel.eval()\r\n\t\t\t\t\t\tfor step, batch in enumerate(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\t# We could avoid this line since we set the accelerator with `device_placement=True`.\r\n\t\t\t\t\t\t\t\t\tbatch.to(accelerator.device\t\t)\r\n\t\t\t\t\t\t\t\t\twith torch.no_grad():\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= model(**lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= outputs.logits.argmax(dim=-1\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= accelerator.gather_for_metrics((predictions, batch[\"\"\"labels\"\"\"])\t\t)\r\n\t\t\t\t\t\t\t\t\tmetric.add_batch(\r\n\t\t\t\t\t\t\t\t\t predictions=lowercase__\t\t\t\t,\t\t\t\t\treferences=lowercase__\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= metric.compute()\r\n\t\t\t\t\t\t# Use accelerator.print to print only on the main process.\r\n\t\t\t\t\t\taccelerator.print(f\"\"\"epoch {epoch}:\"\"\"\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> Tuple:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= argparse.ArgumentParser(description=\"\"\"Simple example of training script.\"\"\"\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--mixed_precision\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=lowercase__\t\t\t\t,\t\t\t\t\tchoices=[\"\"\"no\"\"\", \"\"\"fp16\"\"\", \"\"\"bf16\"\"\", \"\"\"fp8\"\"\"]\t\t\t\t,\t\t\t\t\thelp=\"\"\"Whether to use mixed precision. Choose\"\"\"\r\n\t\t\t \"\"\"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\"\"\r\n\t\t\t \"\"\"and an Nvidia Ampere GPU.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\t# New Code #\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--gradient_accumulation_steps\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=1\t\t\t\t,\t\t\t\t\thelp=\"\"\"The number of minibatches to be ran before gradients are accumulated.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--local_sgd_steps\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=8\t\t\t\t,\t\t\t\t\thelp=\"\"\"Number of local SGD steps or None to disable local SGD\"\"\"\t\t)\r\n\t\t\tparser.add_argument(\"\"\"--cpu\"\"\"\t\t\t\t,\t\t\t\t\taction=\"\"\"store_true\"\"\"\t\t\t\t,\t\t\t\t\thelp=\"\"\"If passed, will train on the CPU.\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= parser.parse_args()\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= {\"\"\"lr\"\"\": 2E-5, \"\"\"num_epochs\"\"\": 3, \"\"\"seed\"\"\": 4_2, \"\"\"batch_size\"\"\": 1_6}\r\n\t\t\ttraining_function(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tmain()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":616,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport os\r\nimport re\r\nimport unicodedata\r\nfrom shutil import copyfile\r\nfrom typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union\r\n\r\nimport sentencepiece as spm\r\n\r\nfrom ...tokenization_utils import PreTrainedTokenizer\r\nfrom ...utils import is_torch_available, logging\r\n\r\n\r\nif is_torch_available():\r\n\t\t\t\timport torch\r\n\r\n\r\nif TYPE_CHECKING:\r\n\t\t\t\tfrom transformers.pipelines.conversational import Conversation\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {'vocab_file': 'spiece.model'}\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'vocab_file': {\r\n 'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',\r\n 'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',\r\n 'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',\r\n 'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',\r\n 'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',\r\n }\r\n}\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'AI-Sweden/gpt-sw3-126m': 20_48,\r\n 'AI-Sweden/gpt-sw3-350m': 20_48,\r\n 'AI-Sweden/gpt-sw3-1.6b': 20_48,\r\n 'AI-Sweden/gpt-sw3-6.7b': 20_48,\r\n 'AI-Sweden/gpt-sw3-20b': 20_48,\r\n}\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tVOCAB_FILES_NAMES\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\tPRETRAINED_VOCAB_FILES_MAP\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tPRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\t[\"input_ids\", \"attention_mask\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A=False\t, __A=False\t, __A=False\t, __A=None\t, __A=None\t, __A=None\t, __A=None\t, __A = None\t, **__A\t, ) ->\t\t\t\t\tNone:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= {} if sp_model_kwargs is None else sp_model_kwargs\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= kwargs.get(\"\"\"name_or_path\"\"\"\t)\r\n\t\t\t\t\t\tif name_or_path is None:\r\n\t\t\t\t\t\t\t\t\tlogger.warning(\r\n\t\t\t\t\t\t\t\t\t \"\"\"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,\"\"\"\r\n\t\t\t\t\t\t\t\t\t \"\"\" you are testing the model, this can safely be ignored\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= \"\"\"None\"\"\"\r\n\r\n\t\t\t\t\t\t# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= \"\"\"<|endoftext|>\"\"\" if eos_token is None else eos_token\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= \"\"\"\"\"\" if unk_token is None else unk_token\r\n\t\t\t\t\t\tif \"gpt-sw3-7b\" in name_or_path:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= unk_token if pad_token is None else pad_token\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= eos_token if bos_token is None else bos_token\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= \"\"\"\"\"\" if pad_token is None else pad_token\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= \"\"\"\"\"\" if bos_token is None else bos_token\r\n\r\n\t\t\t\t\t\tsuper().__init__(\r\n\t\t\t\t\t\t do_lower_case=__A\t, remove_space=__A\t, keep_accents=__A\t, bos_token=__A\t, eos_token=__A\t, unk_token=__A\t, pad_token=__A\t, sp_model_kwargs=self.sp_model_kwargs\t, **__A\t, )\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= do_lower_case\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= remove_space\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= keep_accents\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= vocab_file\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= spm.SentencePieceProcessor(**self.sp_model_kwargs\t)\r\n\t\t\t\t\t\tself.sp_model.Load(__A\t)\r\n\r\n\t\t\t\t\t\t# Used for whitespace normalization in input texts\r\n\t\t\t\t\t\t# fmt : off\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {\"\"\" \"\"\", \"\"\"โ€‰\"\"\", \"\"\"โ€Š\"\"\", \"\"\"โ€ฏ\"\"\", \"\"\"โ€…\"\"\", \"\"\"ใ€€\"\"\", \"\"\"โ€‚\"\"\", \"\"\" \"\"\", \"\"\"โ€ˆ\"\"\", \"\"\"โ€ƒ\"\"\", \"\"\"๏ฟผ\"\"\", \"\"\"ย„\"\"\"}\r\n\t\t\t\t\t\t# fmt : on\r\n\r\n\t\t\t\t\t\t# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= re.compile(\r\n\t\t\t\t\t\t f\"\"\"[{\"\".join(map(__A\t, list(range(0\t, 9\t)\t) + list(range(11\t, 32\t)\t) + list(range(127\t, 160\t)\t) + [160, 173, 8203]\t)\t)}]\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __getstate__( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.__dict__.copy()\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= None\r\n\t\t\t\t\t\treturn state\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __setstate__( self\t, __A\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= d\r\n\r\n\t\t\t\t\t\t# for backward compatibility\r\n\t\t\t\t\t\tif not hasattr(self\t, \"\"\"sp_model_kwargs\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= {}\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= spm.SentencePieceProcessor(**self.sp_model_kwargs\t)\r\n\t\t\t\t\t\tself.sp_model.Load(self.vocab_file\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\t# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn len(self.sp_model\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.non_printing_characters_re.sub(\"\"\"\"\"\"\t, __A\t)\r\n\r\n\t\t\t\t\t\t# Normalize whitespaces\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= \"\"\"\"\"\".join([char if char not in self.whitespaces else \"\"\" \"\"\" for char in text]\t)\r\n\r\n\t\t\t\t\t\t# NFC Unicode normalization\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= unicodedata.normalize(\"\"\"NFC\"\"\"\t, __A\t)\r\n\t\t\t\t\t\treturn text\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, **__A\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.preprocess_text(__A\t)\r\n\t\t\t\t\t\treturn self.sp_model.encode(__A\t, out_type=__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn self.sp_model.PieceToId(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\treturn self.sp_model.IdToPiece(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@staticmethod\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( __A\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\treturn out_string\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= []\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= False\r\n\t\t\t\t\t\tfor token in tokens:\r\n\t\t\t\t\t\t\t\t\t# make sure that special tokens are not decoded using sentencepiece model\r\n\t\t\t\t\t\t\t\t\tif token in self.all_special_tokens:\r\n\t\t\t\t\t\t\t\t\t\t\t\t# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document\r\n\t\t\t\t\t\t\t\t\t\t\t\tif not prev_is_special:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tout_string += \" \"\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\tout_string += self.sp_model.decode(__A\t) + token\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= True\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= []\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tcurrent_sub_tokens.append(__A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= False\r\n\t\t\t\t\t\tout_string += self.sp_model.decode(__A\t)\r\n\r\n\t\t\t\t\t\treturn out_string\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict[str, int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= {self.convert_ids_to_tokens(__A\t): i for i in range(self.vocab_size\t)}\r\n\t\t\t\t\t\tvocab.update(self.added_tokens_encoder\t)\r\n\t\t\t\t\t\treturn vocab\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A = None\t) ->\t\t\t\t\tTuple[str]:\r\n\t\t\t\t\t\tif not os.path.isdir(__A\t):\r\n\t\t\t\t\t\t\t\t\tlogger.error(f\"\"\"Vocabulary path ({save_directory}) should be a directory\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\treturn\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= os.path.join(\r\n\t\t\t\t\t\t __A\t, (filename_prefix + \"\"\"-\"\"\" if filename_prefix else \"\"\"\"\"\") + VOCAB_FILES_NAMES[\"\"\"vocab_file\"\"\"]\t)\r\n\r\n\t\t\t\t\t\tif os.path.abspath(self.vocab_file\t) != os.path.abspath(__A\t) and os.path.isfile(self.vocab_file\t):\r\n\t\t\t\t\t\t\t\t\tcopyfile(self.vocab_file\t, __A\t)\r\n\t\t\t\t\t\telif not os.path.isfile(self.vocab_file\t):\r\n\t\t\t\t\t\t\t\t\twith open(__A\t, \"\"\"wb\"\"\"\t) as fi:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.sp_model.serialized_model_proto()\r\n\t\t\t\t\t\t\t\t\t\t\t\tfi.write(__A\t)\r\n\r\n\t\t\t\t\t\treturn (out_vocab_file,)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A = False\t) ->\t\t\t\t\tUnion[List[int], List[List[int]], \"torch.Tensor\"]:\r\n\r\n\t\t\t\t\t\tif isinstance(__A\t, __A\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.preprocess_text(__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.sp_model.encode(__A\t)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= [self.preprocess_text(__A\t) for t in text]\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.sp_model.encode(__A\t)\r\n\r\n\t\t\t\t\t\tif return_tensors is True or return_tensors == \"pt\":\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= torch.tensor(__A\t)\r\n\r\n\t\t\t\t\t\treturn token_ids\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\treturn self.sp_model.decode(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tList[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= [f\"\"\"User: {text}\"\"\" if is_user else f\"\"\"Bot: {text}\"\"\" for is_user, text in conversation.iter_texts()]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= (\r\n\t\t\t\t\t\t f\"\"\"{self.eos_token}{self.bos_token}\"\"\" + f\"\"\"{self.bos_token}\"\"\".join(__A\t) + f\"\"\"{self.bos_token}Bot:\"\"\"\r\n\t\t\t\t\t\t)\r\n\t\t\t\t\t\treturn self.encode(text=__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport argparse\r\nimport json\r\nimport os\r\n\r\nimport evaluate\r\nimport torch\r\nfrom datasets import load_dataset\r\nfrom torch.optim import AdamW\r\nfrom torch.utils.data import DataLoader\r\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\r\n\r\nfrom accelerate import Accelerator, DistributedType\r\nfrom accelerate.utils.deepspeed import DummyOptim, DummyScheduler\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 16\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 32\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Accelerator\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int = 1_6\t\t\t\t,\t\t\t\t\tlowercase__\t\t: str = \"bert-base-cased\"\t\t) -> Union[str, Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoTokenizer.from_pretrained(lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= load_dataset(\"\"\"glue\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"mrpc\"\"\"\t\t)\r\n\r\n\t\t\tdef tokenize_function(lowercase__\t\t: List[str]\t\t):\r\n\t\t\t\t\t\t# max_length=None => use the model max length (it's actually the default)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokenizer(examples[\"\"\"sentence1\"\"\"]\t\t\t\t,\t\t\t\t\texamples[\"\"\"sentence2\"\"\"]\t\t\t\t,\t\t\t\t\ttruncation=lowercase__\t\t\t\t,\t\t\t\t\tmax_length=lowercase__\t\t)\r\n\t\t\t\t\t\treturn outputs\r\n\r\n\t\t\t# Apply the method we just defined to all the examples in all the splits of the dataset\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= datasets.map(\r\n\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tbatched=lowercase__\t\t\t\t,\t\t\t\t\tremove_columns=[\"\"\"idx\"\"\", \"\"\"sentence1\"\"\", \"\"\"sentence2\"\"\"]\t\t\t\t,\t\t\t\t\tload_from_cache_file=lowercase__\t\t)\r\n\r\n\t\t\t# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\r\n\t\t\t# transformers library\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenized_datasets.rename_column(\"\"\"label\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"labels\"\"\"\t\t)\r\n\r\n\t\t\tdef collate_fn(lowercase__\t\t: Union[str, Any]\t\t):\r\n\t\t\t\t\t\t# On TPU it's best to pad everything to the same length or training will be very slow.\r\n\t\t\t\t\t\tif accelerator.distributed_type == DistributedType.TPU:\r\n\t\t\t\t\t\t\t\t\treturn tokenizer.pad(lowercase__\t\t\t\t,\t\t\t\t\tpadding=\"\"\"max_length\"\"\"\t\t\t\t,\t\t\t\t\tmax_length=1_2_8\t\t\t\t,\t\t\t\t\treturn_tensors=\"\"\"pt\"\"\"\t\t)\r\n\t\t\t\t\t\treturn tokenizer.pad(lowercase__\t\t\t\t,\t\t\t\t\tpadding=\"\"\"longest\"\"\"\t\t\t\t,\t\t\t\t\treturn_tensors=\"\"\"pt\"\"\"\t\t)\r\n\r\n\t\t\t# Instantiate dataloaders.\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= DataLoader(\r\n\t\t\t tokenized_datasets[\"\"\"train\"\"\"]\t\t\t\t,\t\t\t\t\tshuffle=lowercase__\t\t\t\t,\t\t\t\t\tcollate_fn=lowercase__\t\t\t\t,\t\t\t\t\tbatch_size=lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= DataLoader(\r\n\t\t\t tokenized_datasets[\"\"\"validation\"\"\"]\t\t\t\t,\t\t\t\t\tshuffle=lowercase__\t\t\t\t,\t\t\t\t\tcollate_fn=lowercase__\t\t\t\t,\t\t\t\t\tbatch_size=lowercase__\t\t)\r\n\r\n\t\t\treturn train_dataloader, eval_dataloader\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Optional[Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Union[str, Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Tuple\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t) -> List[str]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tmodel.eval()\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= 0\r\n\t\t\tfor step, batch in enumerate(lowercase__\t\t):\r\n\t\t\t\t\t\t# We could avoid this line since we set the accelerator with `device_placement=True`.\r\n\t\t\t\t\t\tbatch.to(accelerator.device\t\t)\r\n\t\t\t\t\t\twith torch.no_grad():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= model(**lowercase__\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= outputs.logits.argmax(dim=-1\t\t)\r\n\t\t\t\t\t\t# It is slightly faster to call this once, than multiple times\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= accelerator.gather(\r\n\t\t\t\t\t\t (predictions, batch[\"\"\"labels\"\"\"])\t\t) # If we are in a multiprocess environment, the last batch has duplicates\r\n\t\t\t\t\t\tif accelerator.use_distributed:\r\n\t\t\t\t\t\t\t\t\tif step == len(lowercase__\t\t) - 1:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= predictions[: len(eval_dataloader.dataset\t\t) - samples_seen]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= references[: len(eval_dataloader.dataset\t\t) - samples_seen]\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tsamples_seen += references.shape[0]\r\n\t\t\t\t\t\tmetric.add_batch(\r\n\t\t\t\t\t\t predictions=lowercase__\t\t\t\t,\t\t\t\t\treferences=lowercase__\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= metric.compute()\r\n\t\t\treturn eval_metric[\"accuracy\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[str]\t\t) -> Any:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= Accelerator()\r\n\r\n\t\t\t# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= config[\"\"\"lr\"\"\"]\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= int(config[\"\"\"num_epochs\"\"\"]\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= int(config[\"\"\"seed\"\"\"]\t\t)\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= int(config[\"\"\"batch_size\"\"\"]\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= args.model_name_or_path\r\n\r\n\t\t\tset_seed(lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Dict \t\t\t\t\t= get_dataloaders(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Instantiate the model (we build the model here so that the seed also control new weights initialization)\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= AutoModelForSequenceClassification.from_pretrained(lowercase__\t\t\t\t,\t\t\t\t\treturn_dict=lowercase__\t\t)\r\n\r\n\t\t\t# Instantiate optimizer\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= (\r\n\t\t\t AdamW\r\n\t\t\t if accelerator.state.deepspeed_plugin is None\r\n\t\t\t or \"\"\"optimizer\"\"\" not in accelerator.state.deepspeed_plugin.deepspeed_config\r\n\t\t\t else DummyOptim\r\n\t\t\t)\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= optimizer_cls(params=model.parameters()\t\t\t\t,\t\t\t\t\tlr=lowercase__\t\t)\r\n\r\n\t\t\tif accelerator.state.deepspeed_plugin is not None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= accelerator.state.deepspeed_plugin.deepspeed_config[\r\n\t\t\t\t\t\t \"\"\"gradient_accumulation_steps\"\"\"\r\n\t\t\t\t\t\t]\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= 1\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= (len(lowercase__\t\t) * num_epochs) // gradient_accumulation_steps\r\n\r\n\t\t\t# Instantiate scheduler\r\n\t\t\tif (\r\n\t\t\t accelerator.state.deepspeed_plugin is None\r\n\t\t\t or \"scheduler\" not in accelerator.state.deepspeed_plugin.deepspeed_config\r\n\t\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= get_linear_schedule_with_warmup(\r\n\t\t\t\t\t\t optimizer=lowercase__\t\t\t\t,\t\t\t\t\tnum_warmup_steps=0\t\t\t\t,\t\t\t\t\tnum_training_steps=lowercase__\t\t\t\t,\t\t\t\t\t)\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= DummyScheduler(lowercase__\t\t\t\t,\t\t\t\t\ttotal_num_steps=lowercase__\t\t\t\t,\t\t\t\t\twarmup_num_steps=0\t\t)\r\n\r\n\t\t\t# Prepare everything\r\n\t\t\t# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\r\n\t\t\t# prepare method.\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= accelerator.prepare(\r\n\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# We need to keep track of how many total steps we have iterated over\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 0\r\n\t\t\t# We also need to keep track of the stating epoch so files are named properly\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= evaluate.load(\"\"\"glue\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"mrpc\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= num_epochs\r\n\r\n\t\t\tif args.partial_train_epoch is not None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= args.partial_train_epoch\r\n\r\n\t\t\tif args.resume_from_checkpoint:\r\n\t\t\t\t\t\taccelerator.load_state(args.resume_from_checkpoint\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= args.resume_from_checkpoint.split(\"\"\"epoch_\"\"\"\t\t)[1]\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"\"\"\"\r\n\t\t\t\t\t\tfor char in epoch_string:\r\n\t\t\t\t\t\t\t\t\tif char.isdigit():\r\n\t\t\t\t\t\t\t\t\t\t\t\tstate_epoch_num += char\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= int(lowercase__\t\t) + 1\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= evaluation_loop(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t\t\t\taccelerator.print(\"\"\"resumed checkpoint performance:\"\"\"\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t\t\t\taccelerator.print(\"\"\"resumed checkpoint's scheduler's lr:\"\"\"\t\t\t\t,\t\t\t\t\tlr_scheduler.get_lr()[0]\t\t)\r\n\t\t\t\t\t\taccelerator.print(\"\"\"resumed optimizers's lr:\"\"\"\t\t\t\t,\t\t\t\t\toptimizer.param_groups[0][\"\"\"lr\"\"\"]\t\t)\r\n\t\t\t\t\t\twith open(os.path.join(args.output_dir\t\t\t\t,\t\t\t\t\tf\"\"\"state_{starting_epoch-1}.json\"\"\"\t\t)\t\t\t\t,\t\t\t\t\t\"\"\"r\"\"\"\t\t) as f:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= json.load(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tassert resumed_state[\"accuracy\"] == accuracy, \"Accuracy mismatch, loading from checkpoint failed\"\r\n\t\t\t\t\t\t\t\t\tassert (\r\n\t\t\t\t\t\t\t\t\t resumed_state[\"lr\"] == lr_scheduler.get_lr()[0]\r\n\t\t\t\t\t\t\t\t\t), \"Scheduler learning rate mismatch, loading from checkpoint failed\"\r\n\t\t\t\t\t\t\t\t\tassert (\r\n\t\t\t\t\t\t\t\t\t resumed_state[\"optimizer_lr\"] == optimizer.param_groups[0][\"lr\"]\r\n\t\t\t\t\t\t\t\t\t), \"Optimizer learning rate mismatch, loading from checkpoint failed\"\r\n\t\t\t\t\t\t\t\t\tassert resumed_state[\"epoch\"] == starting_epoch - 1, \"Epoch mismatch, loading from checkpoint failed\"\r\n\t\t\t\t\t\t\t\t\treturn\r\n\r\n # Now we train the model\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= {}\r\n\t\t\tfor epoch in range(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t):\r\n\t\t\t\t\t\tmodel.train()\r\n\t\t\t\t\t\tfor step, batch in enumerate(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= model(**lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= outputs.loss\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= loss / gradient_accumulation_steps\r\n\t\t\t\t\t\t\t\t\taccelerator.backward(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tif step % gradient_accumulation_steps == 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\toptimizer.step()\r\n\t\t\t\t\t\t\t\t\t\t\t\tlr_scheduler.step()\r\n\t\t\t\t\t\t\t\t\t\t\t\toptimizer.zero_grad()\r\n\r\n\t\t\t\t\t\t\t\t\toverall_step += 1\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= f\"\"\"epoch_{epoch}\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= os.path.join(args.output_dir\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t\t\t\taccelerator.save_state(lowercase__\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= evaluation_loop(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= accuracy\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= lr_scheduler.get_lr()[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= optimizer.param_groups[0][\"\"\"lr\"\"\"]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= epoch\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= overall_step\r\n\t\t\t\t\t\taccelerator.print(f\"\"\"epoch {epoch}:\"\"\"\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t\t\t\taccelerator.wait_for_everyone()\r\n\t\t\t\t\t\tif accelerator.is_main_process:\r\n\t\t\t\t\t\t\t\t\twith open(os.path.join(args.output_dir\t\t\t\t,\t\t\t\t\tf\"\"\"state_{epoch}.json\"\"\"\t\t)\t\t\t\t,\t\t\t\t\t\"\"\"w\"\"\"\t\t) as f:\r\n\t\t\t\t\t\t\t\t\t\t\t\tjson.dump(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= argparse.ArgumentParser(description=\"\"\"Simple example of training script tracking peak GPU memory usage.\"\"\"\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--model_name_or_path\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=\"\"\"bert-base-cased\"\"\"\t\t\t\t,\t\t\t\t\thelp=\"\"\"Path to pretrained model or model identifier from huggingface.co/models.\"\"\"\t\t\t\t,\t\t\t\t\trequired=lowercase__\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--output_dir\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=\"\"\".\"\"\"\t\t\t\t,\t\t\t\t\thelp=\"\"\"Optional save directory where all checkpoint folders will be stored. Default is the current working directory.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--resume_from_checkpoint\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=lowercase__\t\t\t\t,\t\t\t\t\thelp=\"\"\"If the training should continue from a checkpoint folder.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--partial_train_epoch\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=lowercase__\t\t\t\t,\t\t\t\t\thelp=\"\"\"If passed, the training will stop after this number of epochs.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--num_epochs\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=2\t\t\t\t,\t\t\t\t\thelp=\"\"\"Number of train epochs.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= parser.parse_args()\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= {\"\"\"lr\"\"\": 2E-5, \"\"\"num_epochs\"\"\": args.num_epochs, \"\"\"seed\"\"\": 4_2, \"\"\"batch_size\"\"\": 1_6}\r\n\r\n\t\t\ttraining_function(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tmain()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":617,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom .configuration_bert_masked import MaskedBertConfig\r\nfrom .modeling_bert_masked import (\r\n MaskedBertForMultipleChoice,\r\n MaskedBertForQuestionAnswering,\r\n MaskedBertForSequenceClassification,\r\n MaskedBertForTokenClassification,\r\n MaskedBertModel,\r\n)\r\nfrom .modules import *\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport baseaa\r\nimport io\r\nimport json\r\nimport os\r\nfrom copy import deepcopy\r\n\r\nfrom ..optimizer import AcceleratedOptimizer\r\nfrom ..scheduler import AcceleratedScheduler\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tif isinstance(__A\t, __A\t):\r\n\t\t\t\t\t\t\t\t\t# Don't modify user's data should they want to reuse it (e.g. in tests), because once we\r\n\t\t\t\t\t\t\t\t\t# modified it, it will not be accepted here again, since `auto` values would have been overridden\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= deepcopy(__A\t)\r\n\t\t\t\t\t\telif os.path.exists(__A\t):\r\n\t\t\t\t\t\t\t\t\twith io.open(__A\t, \"\"\"r\"\"\"\t, encoding=\"\"\"utf-8\"\"\"\t) as f:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= json.load(__A\t)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= baseaa.urlsafe_baadecode(__A\t).decode(\"\"\"utf-8\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= json.loads(__A\t)\r\n\t\t\t\t\t\t\t\t\texcept (UnicodeDecodeError, AttributeError, ValueError):\r\n\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\r\n\t\t\t\t\t\t\t\t\t\t\t\t f\"\"\"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= config\r\n\r\n\t\t\t\t\t\tself.set_stage_and_offload()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\t# zero stage - this is done as early as possible, before model is created, to allow\r\n\t\t\t\t\t\t# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object\r\n\t\t\t\t\t\t# during ``zero.Init()`` which needs to know the dtype, and some other hparams.\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.get_value(\"\"\"zero_optimization.stage\"\"\"\t, -1\t)\r\n\r\n\t\t\t\t\t\t# offload\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= False\r\n\t\t\t\t\t\tif self.is_zeroa() or self.is_zeroa():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= set([\"\"\"cpu\"\"\", \"\"\"nvme\"\"\"]\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= set(\r\n\t\t\t\t\t\t\t\t\t [\r\n\t\t\t\t\t\t\t\t\t self.get_value(\"\"\"zero_optimization.offload_optimizer.device\"\"\"\t),\r\n\t\t\t\t\t\t\t\t\t self.get_value(\"\"\"zero_optimization.offload_param.device\"\"\"\t),\r\n\t\t\t\t\t\t\t\t\t ]\t)\r\n\t\t\t\t\t\t\t\t\tif len(offload_devices & offload_devices_valid\t) > 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= True\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.config\r\n\r\n\t\t\t\t\t\t# find the config node of interest if it exists\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= ds_key_long.split(\"\"\".\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= nodes.pop()\r\n\t\t\t\t\t\tfor node in nodes:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= config.get(__A\t)\r\n\t\t\t\t\t\t\t\t\tif config is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\treturn None, ds_key\r\n\r\n\t\t\t\t\t\treturn config, ds_key\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=None\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.find_config_node(__A\t)\r\n\t\t\t\t\t\tif config is None:\r\n\t\t\t\t\t\t\t\t\treturn default\r\n\t\t\t\t\t\treturn config.get(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=False\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.config\r\n\r\n\t\t\t\t\t\t# find the config node of interest if it exists\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= ds_key_long.split(\"\"\".\"\"\"\t)\r\n\t\t\t\t\t\tfor node in nodes:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= config\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= config.get(__A\t)\r\n\t\t\t\t\t\t\t\t\tif config is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\tif must_exist:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(f\"\"\"Can't find {ds_key_long} entry in the config: {self.config}\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn\r\n\r\n # if found remove it\r\n\t\t\t\t\t\tif parent_config is not None:\r\n\t\t\t\t\t\t\t\t\tparent_config.pop(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.get_value(__A\t)\r\n\t\t\t\t\t\treturn False if value is None else bool(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.get_value(__A\t)\r\n\t\t\t\t\t\treturn False if value is None else not bool(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\treturn self._stage == 2\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\treturn self._stage == 3\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\treturn self._offload\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= engine\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, **__A\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\t# runs backpropagation and handles mixed precision\r\n\t\t\t\t\t\tself.engine.backward(__A\t, **__A\t)\r\n\r\n\t\t\t\t\t\t# Deepspeed's `engine.step` performs the following operations:\r\n\t\t\t\t\t\t# - gradient accumulation check\r\n\t\t\t\t\t\t# - gradient clipping\r\n\t\t\t\t\t\t# - optimizer step\r\n\t\t\t\t\t\t# - zero grad\r\n\t\t\t\t\t\t# - checking overflow\r\n\t\t\t\t\t\t# - lr_scheduler step (only if engine.lr_scheduler is not None)\r\n\t\t\t\t\t\tself.engine.step()\r\n\t\t\t\t\t\t# and this plugin overrides the above calls with no-ops when Accelerate runs under\r\n\t\t\t\t\t\t# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple\r\n\t\t\t\t\t\t# training loop that works transparently under many training regimes.\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tsuper().__init__(__A\t, device_placement=__A\t, scaler=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= hasattr(self.optimizer\t, \"\"\"overflow\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A=None\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tpass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tpass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tif self.__has_overflow__:\r\n\t\t\t\t\t\t\t\t\treturn self.optimizer.overflow\r\n\t\t\t\t\t\treturn False\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tsuper().__init__(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tpass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A=0.0_0_1\t, __A=0\t, **__A\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= params\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= lr\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= weight_decay\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= kwargs\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A=None\t, __A=0\t, **__A\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= optimizer\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= total_num_steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= warmup_num_steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= kwargs\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":618,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport json\r\nimport logging\r\nimport math\r\nimport os\r\nimport sys\r\nfrom dataclasses import dataclass, field\r\nfrom typing import Optional\r\n\r\nfrom datasets import Dataset, load_dataset\r\n\r\nimport transformers\r\nfrom transformers import (\r\n CONFIG_MAPPING,\r\n MODEL_FOR_MASKED_LM_MAPPING,\r\n AutoConfig,\r\n AutoModelForMaskedLM,\r\n AutoTokenizer,\r\n DataCollatorForWholeWordMask,\r\n HfArgumentParser,\r\n Trainer,\r\n TrainingArguments,\r\n set_seed,\r\n)\r\nfrom transformers.trainer_utils import get_last_checkpoint, is_main_process\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.getLogger(__name__)\r\n__UpperCAmelCase\t\t\t\t\t\t\t= list(MODEL_FOR_MASKED_LM_MAPPING.keys())\r\n__UpperCAmelCase\t\t\t\t\t\t\t= tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\r\n\r\n\r\n\r\n\r\n@dataclass\r\nclass _SCREAMING_SNAKE_CASE :\r\n\t\t\tUpperCAmelCase_ :Optional[str] \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\r\n\t\t\t \"help\": (\r\n\t\t\t \"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.\"\r\n\t\t\t )\r\n\t\t\t } ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[str] \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\"help\": \"If training from scratch, pass a model type from the list: \" + \", \".join(A__\t\t\t\t\t\t\t)} ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[str] \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\r\n\t\t\t \"help\": (\r\n\t\t\t \"Override some existing default config settings when a model is trained from scratch. Example: \"\r\n\t\t\t \"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index\"\r\n\t\t\t )\r\n\t\t\t } ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[str] \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[str] \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[str] \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\"help\": \"Where do you want to store the pretrained models downloaded from huggingface.co\"} ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :bool \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\"help\": \"Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.\"} ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=\"main\" ,\t\t\tmetadata={\"help\": \"The specific model version to use (can be a branch name, tag name or commit id).\"} ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :bool \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\r\n\t\t\t \"help\": (\r\n\t\t\t \"Will use the token generated when running `huggingface-cli login` (necessary to use this script \"\r\n\t\t\t \"with private models).\"\r\n\t\t\t )\r\n\t\t\t } ,\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tif self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):\r\n\t\t\t\t\t\t\t\t\traise ValueError(\r\n\t\t\t\t\t\t\t\t\t \"\"\"--config_overrides can't be used in combination with --config_name or --model_name_or_path\"\"\"\t)\r\n\r\n\r\n\r\n\r\n@dataclass\r\nclass _SCREAMING_SNAKE_CASE :\r\n\t\t\tUpperCAmelCase_ :Optional[str] \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\"help\": \"The name of the dataset to use (via the datasets library).\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[str] \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\"help\": \"The configuration name of the dataset to use (via the datasets library).\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[str] \t\t\t=\t\t\t\t\t\tfield(default=A__ ,\t\t\tmetadata={\"help\": \"The input training data file (a text file).\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[str] \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\"help\": \"An optional input evaluation data file to evaluate the perplexity on (a text file).\"} ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[str] \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\"help\": \"An optional input train ref data file for whole word masking in Chinese.\"} ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[str] \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\"help\": \"An optional input validation ref data file for whole word masking in Chinese.\"} ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :bool \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\"help\": \"Overwrite the cached training and evaluation sets\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=5 ,\t\t\tmetadata={\r\n\t\t\t \"help\": \"The percentage of the train set used as validation set in case there's no validation split\"\r\n\t\t\t } ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\r\n\t\t\t \"help\": (\r\n\t\t\t \"The maximum total input sequence length after tokenization. Sequences longer \"\r\n\t\t\t \"than this will be truncated. Default to the max input length of the model.\"\r\n\t\t\t )\r\n\t\t\t } ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\"help\": \"The number of processes to use for the preprocessing.\"} ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :float \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=0.1_5 ,\t\t\tmetadata={\"help\": \"Ratio of tokens to mask for masked language modeling loss\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :bool \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\r\n\t\t\t \"help\": (\r\n\t\t\t \"Whether to pad all samples to `max_seq_length`. \"\r\n\t\t\t \"If False, will pad the samples dynamically when batching to the maximum length in the batch.\"\r\n\t\t\t )\r\n\t\t\t } ,\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tif self.train_file is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.train_file.split(\"\"\".\"\"\"\t)[-1]\r\n\t\t\t\t\t\t\t\t\tassert extension in [\"csv\", \"json\", \"txt\"], \"`train_file` should be a csv, a json or a txt file.\"\r\n\t\t\t\t\t\tif self.validation_file is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.validation_file.split(\"\"\".\"\"\"\t)[-1]\r\n\t\t\t\t\t\t\t\t\tassert extension in [\"csv\", \"json\", \"txt\"], \"`validation_file` should be a csv, a json or a txt file.\"\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Tuple\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Any\t\t) -> str:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\twith open(lowercase__\t\t\t\t,\t\t\t\t\t\"\"\"r\"\"\"\t\t\t\t,\t\t\t\t\tencoding=\"\"\"utf-8\"\"\"\t\t) as f:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= [json.loads(lowercase__\t\t) for line in f.read().splitlines() if (len(lowercase__\t\t) > 0 and not line.isspace())]\r\n\t\t\tassert len(lowercase__\t\t) == len(lowercase__\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {c: dataset[c] for c in dataset.column_names}\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= refs\r\n\t\t\treturn Dataset.from_dict(lowercase__\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> Dict:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)\t\t)\r\n\t\t\tif len(sys.argv\t\t) == 2 and sys.argv[1].endswith(\"\"\".json\"\"\"\t\t):\r\n\t\t\t\t\t\t# If we pass only one argument to the script and it's the path to a json file,\r\n\t\t\t\t\t\t# let's parse it to get our arguments.\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :int \t\t\t\t\t= parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]\t\t)\t\t)\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :int \t\t\t\t\t= parser.parse_args_into_dataclasses()\r\n\r\n\t\t\t# Detecting last checkpoint.\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= None\r\n\t\t\tif os.path.isdir(training_args.output_dir\t\t) and training_args.do_train and not training_args.overwrite_output_dir:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= get_last_checkpoint(training_args.output_dir\t\t)\r\n\t\t\t\t\t\tif last_checkpoint is None and len(os.listdir(training_args.output_dir\t\t)\t\t) > 0:\r\n\t\t\t\t\t\t\t\t\traise ValueError(\r\n\t\t\t\t\t\t\t\t\t f\"\"\"Output directory ({training_args.output_dir}) already exists and is not empty. \"\"\"\r\n\t\t\t\t\t\t\t\t\t \"\"\"Use --overwrite_output_dir to overcome.\"\"\"\t\t)\r\n\t\t\t\t\t\telif last_checkpoint is not None:\r\n\t\t\t\t\t\t\t\t\tlogger.info(\r\n\t\t\t\t\t\t\t\t\t f\"\"\"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change \"\"\"\r\n\t\t\t\t\t\t\t\t\t \"\"\"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.\"\"\"\t\t)\r\n\r\n # Setup logging\r\n\t\t\tlogging.basicConfig(\r\n\t\t\t format=\"\"\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\"\"\"\t\t\t\t,\t\t\t\t\tdatefmt=\"\"\"%m/%d/%Y %H:%M:%S\"\"\"\t\t\t\t,\t\t\t\t\thandlers=[logging.StreamHandler(sys.stdout\t\t)]\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tlogger.setLevel(logging.INFO if is_main_process(training_args.local_rank\t\t) else logging.WARN\t\t)\r\n\r\n\t\t\t# Log on each process the small summary:\r\n\t\t\tlogger.warning(\r\n\t\t\t f\"\"\"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}\"\"\"\r\n\t\t\t + f\"\"\"distributed training: {bool(training_args.local_rank != -1\t\t)}, 16-bits training: {training_args.fpaa}\"\"\"\t\t)\r\n\t\t\t# Set the verbosity to info of the Transformers logger (on main process only):\r\n\t\t\tif is_main_process(training_args.local_rank\t\t):\r\n\t\t\t\t\t\ttransformers.utils.logging.set_verbosity_info()\r\n\t\t\t\t\t\ttransformers.utils.logging.enable_default_handler()\r\n\t\t\t\t\t\ttransformers.utils.logging.enable_explicit_format()\r\n\t\t\tlogger.info(\"\"\"Training/evaluation parameters %s\"\"\"\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Set seed before initializing model.\r\n\t\t\tset_seed(training_args.seed\t\t)\r\n\r\n\t\t\t# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)\r\n\t\t\t# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/\r\n\t\t\t# (the dataset will be downloaded automatically from the datasets Hub).\r\n\t\t\t#\r\n\t\t\t# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called\r\n\t\t\t# 'text' is found. You can easily tweak this behavior (see below).\r\n\t\t\t#\r\n\t\t\t# In distributed training, the load_dataset function guarantee that only one local process can concurrently\r\n\t\t\t# download the dataset.\r\n\t\t\tif data_args.dataset_name is not None:\r\n\t\t\t\t\t\t# Downloading and loading a dataset from the hub.\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= load_dataset(data_args.dataset_name\t\t\t\t,\t\t\t\t\tdata_args.dataset_config_name\t\t)\r\n\t\t\t\t\t\tif \"validation\" not in datasets.keys():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= load_dataset(\r\n\t\t\t\t\t\t\t\t\t data_args.dataset_name\t\t\t\t,\t\t\t\t\tdata_args.dataset_config_name\t\t\t\t,\t\t\t\t\tsplit=f\"\"\"train[:{data_args.validation_split_percentage}%]\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= load_dataset(\r\n\t\t\t\t\t\t\t\t\t data_args.dataset_name\t\t\t\t,\t\t\t\t\tdata_args.dataset_config_name\t\t\t\t,\t\t\t\t\tsplit=f\"\"\"train[{data_args.validation_split_percentage}%:]\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= {}\r\n\t\t\t\t\t\tif data_args.train_file is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= data_args.train_file\r\n\t\t\t\t\t\tif data_args.validation_file is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= data_args.validation_file\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= data_args.train_file.split(\"\"\".\"\"\"\t\t)[-1]\r\n\t\t\t\t\t\tif extension == \"txt\":\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= \"\"\"text\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= load_dataset(lowercase__\t\t\t\t,\t\t\t\t\tdata_files=lowercase__\t\t)\r\n\t\t\t# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\r\n\t\t\t# https://huggingface.co/docs/datasets/loading_datasets.html.\r\n\r\n\t\t\t# Load pretrained model and tokenizer\r\n\t\t\t#\r\n\t\t\t# Distributed training:\r\n\t\t\t# The .from_pretrained methods guarantee that only one local process can concurrently\r\n\t\t\t# download model & vocab.\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= {\r\n\t\t\t \"\"\"cache_dir\"\"\": model_args.cache_dir,\r\n\t\t\t \"\"\"revision\"\"\": model_args.model_revision,\r\n\t\t\t \"\"\"use_auth_token\"\"\": True if model_args.use_auth_token else None,\r\n\t\t\t}\r\n\t\t\tif model_args.config_name:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= AutoConfig.from_pretrained(model_args.config_name\t\t\t\t,\t\t\t\t\t**lowercase__\t\t)\r\n\t\t\telif model_args.model_name_or_path:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= AutoConfig.from_pretrained(model_args.model_name_or_path\t\t\t\t,\t\t\t\t\t**lowercase__\t\t)\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= CONFIG_MAPPING[model_args.model_type]()\r\n\t\t\t\t\t\tlogger.warning(\"\"\"You are instantiating a new config instance from scratch.\"\"\"\t\t)\r\n\t\t\t\t\t\tif model_args.config_overrides is not None:\r\n\t\t\t\t\t\t\t\t\tlogger.info(f\"\"\"Overriding config: {model_args.config_overrides}\"\"\"\t\t)\r\n\t\t\t\t\t\t\t\t\tconfig.update_from_string(model_args.config_overrides\t\t)\r\n\t\t\t\t\t\t\t\t\tlogger.info(f\"\"\"New config: {config}\"\"\"\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= {\r\n\t\t\t \"\"\"cache_dir\"\"\": model_args.cache_dir,\r\n\t\t\t \"\"\"use_fast\"\"\": model_args.use_fast_tokenizer,\r\n\t\t\t \"\"\"revision\"\"\": model_args.model_revision,\r\n\t\t\t \"\"\"use_auth_token\"\"\": True if model_args.use_auth_token else None,\r\n\t\t\t}\r\n\t\t\tif model_args.tokenizer_name:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= AutoTokenizer.from_pretrained(model_args.tokenizer_name\t\t\t\t,\t\t\t\t\t**lowercase__\t\t)\r\n\t\t\telif model_args.model_name_or_path:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoTokenizer.from_pretrained(model_args.model_name_or_path\t\t\t\t,\t\t\t\t\t**lowercase__\t\t)\r\n\t\t\telse:\r\n\t\t\t\t\t\traise ValueError(\r\n\t\t\t\t\t\t \"\"\"You are instantiating a new tokenizer from scratch. This is not supported by this script.\"\"\"\r\n\t\t\t\t\t\t \"\"\"You can do it from another script, save it, and load it from here, using --tokenizer_name.\"\"\"\t\t)\r\n\r\n\t\t\tif model_args.model_name_or_path:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= AutoModelForMaskedLM.from_pretrained(\r\n\t\t\t\t\t\t model_args.model_name_or_path\t\t\t\t,\t\t\t\t\tfrom_tf=bool(\"\"\".ckpt\"\"\" in model_args.model_name_or_path\t\t)\t\t\t\t,\t\t\t\t\tconfig=lowercase__\t\t\t\t,\t\t\t\t\tcache_dir=model_args.cache_dir\t\t\t\t,\t\t\t\t\trevision=model_args.model_revision\t\t\t\t,\t\t\t\t\tuse_auth_token=True if model_args.use_auth_token else None\t\t\t\t,\t\t\t\t\t)\r\n\t\t\telse:\r\n\t\t\t\t\t\tlogger.info(\"\"\"Training new model from scratch\"\"\"\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= AutoModelForMaskedLM.from_config(lowercase__\t\t)\r\n\r\n\t\t\tmodel.resize_token_embeddings(len(lowercase__\t\t)\t\t)\r\n\r\n\t\t\t# Preprocessing the datasets.\r\n\t\t\t# First we tokenize all the texts.\r\n\t\t\tif training_args.do_train:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= datasets[\"\"\"train\"\"\"].column_names\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= datasets[\"\"\"validation\"\"\"].column_names\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"text\"\"\" if \"\"\"text\"\"\" in column_names else column_names[0]\r\n\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= \"\"\"max_length\"\"\" if data_args.pad_to_max_length else False\r\n\r\n\t\t\tdef tokenize_function(lowercase__\t\t: Optional[Any]\t\t):\r\n\t\t\t\t\t\t# Remove empty lines\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= [line for line in examples[\"\"\"text\"\"\"] if len(lowercase__\t\t) > 0 and not line.isspace()]\r\n\t\t\t\t\t\treturn tokenizer(examples[\"\"\"text\"\"\"]\t\t\t\t,\t\t\t\t\tpadding=lowercase__\t\t\t\t,\t\t\t\t\ttruncation=lowercase__\t\t\t\t,\t\t\t\t\tmax_length=data_args.max_seq_length\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= datasets.map(\r\n\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tbatched=lowercase__\t\t\t\t,\t\t\t\t\tnum_proc=data_args.preprocessing_num_workers\t\t\t\t,\t\t\t\t\tremove_columns=[text_column_name]\t\t\t\t,\t\t\t\t\tload_from_cache_file=not data_args.overwrite_cache\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t# Add the chinese references if provided\r\n\t\t\tif data_args.train_ref_file is not None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= add_chinese_references(tokenized_datasets[\"\"\"train\"\"\"]\t\t\t\t,\t\t\t\t\tdata_args.train_ref_file\t\t)\r\n\t\t\tif data_args.validation_ref_file is not None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= add_chinese_references(\r\n\t\t\t\t\t\t tokenized_datasets[\"\"\"validation\"\"\"]\t\t\t\t,\t\t\t\t\tdata_args.validation_ref_file\t\t)\r\n\t\t\t# If we have ref files, need to avoid it removed by trainer\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= data_args.train_ref_file or data_args.validation_ref_file\r\n\t\t\tif has_ref:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= False\r\n\r\n\t\t\t# Data collator\r\n\t\t\t# This one will take care of randomly masking the tokens.\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= DataCollatorForWholeWordMask(tokenizer=lowercase__\t\t\t\t,\t\t\t\t\tmlm_probability=data_args.mlm_probability\t\t)\r\n\r\n\t\t\t# Initialize our Trainer\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= Trainer(\r\n\t\t\t model=lowercase__\t\t\t\t,\t\t\t\t\targs=lowercase__\t\t\t\t,\t\t\t\t\ttrain_dataset=tokenized_datasets[\"\"\"train\"\"\"] if training_args.do_train else None\t\t\t\t,\t\t\t\t\teval_dataset=tokenized_datasets[\"\"\"validation\"\"\"] if training_args.do_eval else None\t\t\t\t,\t\t\t\t\ttokenizer=lowercase__\t\t\t\t,\t\t\t\t\tdata_collator=lowercase__\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t# Training\r\n\t\t\tif training_args.do_train:\r\n\t\t\t\t\t\tif last_checkpoint is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= last_checkpoint\r\n\t\t\t\t\t\telif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path\t\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= model_args.model_name_or_path\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= None\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= trainer.train(resume_from_checkpoint=lowercase__\t\t)\r\n\t\t\t\t\t\ttrainer.save_model() # Saves the tokenizer too for easy upload\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= os.path.join(training_args.output_dir\t\t\t\t,\t\t\t\t\t\"\"\"train_results.txt\"\"\"\t\t)\r\n\t\t\t\t\t\tif trainer.is_world_process_zero():\r\n\t\t\t\t\t\t\t\t\twith open(lowercase__\t\t\t\t,\t\t\t\t\t\"\"\"w\"\"\"\t\t) as writer:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlogger.info(\"\"\"***** Train results *****\"\"\"\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tfor key, value in sorted(train_result.metrics.items()\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.info(f\"\"\" {key} = {value}\"\"\"\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twriter.write(f\"\"\"{key} = {value}\\n\"\"\"\t\t)\r\n\r\n # Need to save the state, since Trainer.save_model saves only the tokenizer with the model\r\n\t\t\t\t\t\t\t\t\ttrainer.state.save_to_json(os.path.join(training_args.output_dir\t\t\t\t,\t\t\t\t\t\"\"\"trainer_state.json\"\"\"\t\t)\t\t)\r\n\r\n # Evaluation\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {}\r\n\t\t\tif training_args.do_eval:\r\n\t\t\t\t\t\tlogger.info(\"\"\"*** Evaluate ***\"\"\"\t\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= trainer.evaluate()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= math.exp(eval_output[\"\"\"eval_loss\"\"\"]\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= perplexity\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= os.path.join(training_args.output_dir\t\t\t\t,\t\t\t\t\t\"\"\"eval_results_mlm_wwm.txt\"\"\"\t\t)\r\n\t\t\t\t\t\tif trainer.is_world_process_zero():\r\n\t\t\t\t\t\t\t\t\twith open(lowercase__\t\t\t\t,\t\t\t\t\t\"\"\"w\"\"\"\t\t) as writer:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlogger.info(\"\"\"***** Eval results *****\"\"\"\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tfor key, value in sorted(results.items()\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.info(f\"\"\" {key} = {value}\"\"\"\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twriter.write(f\"\"\"{key} = {value}\\n\"\"\"\t\t)\r\n\r\n\t\t\treturn results\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: List[Any]\t\t) -> Optional[Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tmain()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tmain()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n# Copyright 2023 The HuggingFace Inc. team. All rights reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\nimport numpy as np\r\nimport torch\r\n\r\nfrom ..models.clipseg import CLIPSegForImageSegmentation\r\nfrom ..utils import is_vision_available, requires_backends\r\nfrom .base import PipelineTool\r\n\r\n\r\nif is_vision_available():\r\n\t\t\t\tfrom PIL import Image\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Dict \t\t\t=\t\t\t\t\t\t(\r\n\t\t\t \"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.\"\r\n\t\t\t \"It takes two arguments named `image` which should be the original image, and `label` which should be a text \"\r\n\t\t\t \"describing the elements what should be identified in the segmentation mask. The tool returns the mask.\"\r\n\t\t\t)\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\t\"CIDAS/clipseg-rd64-refined\"\r\n\t\t\tUpperCAmelCase_ :List[Any] \t\t\t=\t\t\t\t\t\t\"image_segmenter\"\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tCLIPSegForImageSegmentation\r\n\r\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\t[\"image\", \"text\"]\r\n\t\t\tUpperCAmelCase_ :Dict \t\t\t=\t\t\t\t\t\t[\"image\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, *__A\t, **__A\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\trequires_backends(self\t, [\"\"\"vision\"\"\"]\t)\r\n\t\t\t\t\t\tsuper().__init__(*__A\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\treturn self.pre_processor(text=[label]\t, images=[image]\t, padding=__A\t, return_tensors=\"\"\"pt\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\twith torch.no_grad():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.model(**__A\t).logits\r\n\t\t\t\t\t\treturn logits\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= outputs.cpu().detach().numpy()\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 0\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= 1\r\n\t\t\t\t\t\treturn Image.fromarray((array * 255).astype(np.uinta\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":619,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport tempfile\r\n\r\nimport torch\r\n\r\nfrom diffusers import (\r\n DEISMultistepScheduler,\r\n DPMSolverMultistepScheduler,\r\n DPMSolverSinglestepScheduler,\r\n UniPCMultistepScheduler,\r\n)\r\n\r\nfrom .test_schedulers import SchedulerCommonTest\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Union[str, Any] \t\t\t=\t\t\t\t\t\t(UniPCMultistepScheduler,)\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\t((\"num_inference_steps\", 25),)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, **__A\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"num_train_timesteps\"\"\": 1000,\r\n\t\t\t\t\t\t \"\"\"beta_start\"\"\": 0.0_0_0_1,\r\n\t\t\t\t\t\t \"\"\"beta_end\"\"\": 0.0_2,\r\n\t\t\t\t\t\t \"\"\"beta_schedule\"\"\": \"\"\"linear\"\"\",\r\n\t\t\t\t\t\t \"\"\"solver_order\"\"\": 2,\r\n\t\t\t\t\t\t \"\"\"solver_type\"\"\": \"\"\"bh2\"\"\",\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\tconfig.update(**__A\t)\r\n\t\t\t\t\t\treturn config\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A=0\t, **__A\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= dict(self.forward_default_kwargs\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= kwargs.pop(\"\"\"num_inference_steps\"\"\"\t, __A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.dummy_sample\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 0.1 * sample\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= [residual + 0.2, residual + 0.1_5, residual + 0.1_0]\r\n\r\n\t\t\t\t\t\tfor scheduler_class in self.scheduler_classes:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.get_scheduler_config(**__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= scheduler_class(**__A\t)\r\n\t\t\t\t\t\t\t\t\tscheduler.set_timesteps(__A\t)\r\n\t\t\t\t\t\t\t\t\t# copy over dummy past residuals\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= dummy_past_residuals[: scheduler.config.solver_order]\r\n\r\n\t\t\t\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdirname:\r\n\t\t\t\t\t\t\t\t\t\t\t\tscheduler.save_config(__A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= scheduler_class.from_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tnew_scheduler.set_timesteps(__A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t# copy over dummy past residuals\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= dummy_past_residuals[: new_scheduler.config.solver_order]\r\n\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= sample, sample\r\n\t\t\t\t\t\t\t\t\tfor t in range(__A\t, time_step + scheduler.config.solver_order + 1\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= scheduler.step(__A\t, __A\t, __A\t, **__A\t).prev_sample\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= new_scheduler.step(__A\t, __A\t, __A\t, **__A\t).prev_sample\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\tassert torch.sum(torch.abs(output - new_output\t)\t) < 1E-5, \"Scheduler outputs are not identical\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A=0\t, **__A\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= dict(self.forward_default_kwargs\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= kwargs.pop(\"\"\"num_inference_steps\"\"\"\t, __A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.dummy_sample\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 0.1 * sample\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= [residual + 0.2, residual + 0.1_5, residual + 0.1_0]\r\n\r\n\t\t\t\t\t\tfor scheduler_class in self.scheduler_classes:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.get_scheduler_config()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= scheduler_class(**__A\t)\r\n\t\t\t\t\t\t\t\t\tscheduler.set_timesteps(__A\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# copy over dummy past residuals (must be after setting timesteps)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= dummy_past_residuals[: scheduler.config.solver_order]\r\n\r\n\t\t\t\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdirname:\r\n\t\t\t\t\t\t\t\t\t\t\t\tscheduler.save_config(__A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= scheduler_class.from_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t# copy over dummy past residuals\r\n\t\t\t\t\t\t\t\t\t\t\t\tnew_scheduler.set_timesteps(__A\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t# copy over dummy past residual (must be after setting timesteps)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= dummy_past_residuals[: new_scheduler.config.solver_order]\r\n\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= scheduler.step(__A\t, __A\t, __A\t, **__A\t).prev_sample\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= new_scheduler.step(__A\t, __A\t, __A\t, **__A\t).prev_sample\r\n\r\n\t\t\t\t\t\t\t\t\tassert torch.sum(torch.abs(output - new_output\t)\t) < 1E-5, \"Scheduler outputs are not identical\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A=None\t, **__A\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tif scheduler is None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.scheduler_classes[0]\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.get_scheduler_config(**__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= scheduler_class(**__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.scheduler_classes[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.get_scheduler_config(**__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= scheduler_class(**__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= 10\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.dummy_model()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.dummy_sample_deter\r\n\t\t\t\t\t\tscheduler.set_timesteps(__A\t)\r\n\r\n\t\t\t\t\t\tfor i, t in enumerate(scheduler.timesteps\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= model(__A\t, __A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= scheduler.step(__A\t, __A\t, __A\t).prev_sample\r\n\r\n\t\t\t\t\t\treturn sample\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= dict(self.forward_default_kwargs\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= kwargs.pop(\"\"\"num_inference_steps\"\"\"\t, __A\t)\r\n\r\n\t\t\t\t\t\tfor scheduler_class in self.scheduler_classes:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.get_scheduler_config()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= scheduler_class(**__A\t)\r\n\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.dummy_sample\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 0.1 * sample\r\n\r\n\t\t\t\t\t\t\t\t\tif num_inference_steps is not None and hasattr(__A\t, \"\"\"set_timesteps\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tscheduler.set_timesteps(__A\t)\r\n\t\t\t\t\t\t\t\t\telif num_inference_steps is not None and not hasattr(__A\t, \"\"\"set_timesteps\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= num_inference_steps\r\n\r\n\t\t\t\t\t\t\t\t\t# copy over dummy past residuals (must be done after set_timesteps)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= [residual + 0.2, residual + 0.1_5, residual + 0.1_0]\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= dummy_past_residuals[: scheduler.config.solver_order]\r\n\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= scheduler.timesteps[5]\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= scheduler.timesteps[6]\r\n\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= scheduler.step(__A\t, __A\t, __A\t, **__A\t).prev_sample\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= scheduler.step(__A\t, __A\t, __A\t, **__A\t).prev_sample\r\n\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(output_a.shape\t, sample.shape\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(output_a.shape\t, output_a.shape\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\t# make sure that iterating over schedulers with same config names gives same results\r\n\t\t\t\t\t\t# for defaults\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= UniPCMultistepScheduler(**self.get_scheduler_config()\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.full_loop(scheduler=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= torch.mean(torch.abs(__A\t)\t)\r\n\r\n\t\t\t\t\t\tassert abs(result_mean.item() - 0.2_4_6_4\t) < 1E-3\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= DPMSolverSinglestepScheduler.from_config(scheduler.config\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= DEISMultistepScheduler.from_config(scheduler.config\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= DPMSolverMultistepScheduler.from_config(scheduler.config\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= UniPCMultistepScheduler.from_config(scheduler.config\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.full_loop(scheduler=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.mean(torch.abs(__A\t)\t)\r\n\r\n\t\t\t\t\t\tassert abs(result_mean.item() - 0.2_4_6_4\t) < 1E-3\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tfor timesteps in [25, 50, 100, 999, 1000]:\r\n\t\t\t\t\t\t\t\t\tself.check_over_configs(num_train_timesteps=__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tself.check_over_configs(thresholding=__A\t)\r\n\t\t\t\t\t\tfor order in [1, 2, 3]:\r\n\t\t\t\t\t\t\t\t\tfor solver_type in [\"bh1\", \"bh2\"]:\r\n\t\t\t\t\t\t\t\t\t\t\t\tfor threshold in [0.5, 1.0, 2.0]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor prediction_type in [\"epsilon\", \"sample\"]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.check_over_configs(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t thresholding=__A\t, prediction_type=__A\t, sample_max_value=__A\t, solver_order=__A\t, solver_type=__A\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tfor prediction_type in [\"epsilon\", \"v_prediction\"]:\r\n\t\t\t\t\t\t\t\t\tself.check_over_configs(prediction_type=__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tfor solver_type in [\"bh1\", \"bh2\"]:\r\n\t\t\t\t\t\t\t\t\tfor order in [1, 2, 3]:\r\n\t\t\t\t\t\t\t\t\t\t\t\tfor prediction_type in [\"epsilon\", \"sample\"]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.check_over_configs(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t solver_order=__A\t, solver_type=__A\t, prediction_type=__A\t, )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.full_loop(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t solver_order=__A\t, solver_type=__A\t, prediction_type=__A\t, )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tassert not torch.isnan(__A\t).any(), \"Samples have nan numbers\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tself.check_over_configs(lower_order_final=__A\t)\r\n\t\t\t\t\t\tself.check_over_configs(lower_order_final=__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tfor num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:\r\n\t\t\t\t\t\t\t\t\tself.check_over_forward(num_inference_steps=__A\t, time_step=0\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.full_loop()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= torch.mean(torch.abs(__A\t)\t)\r\n\r\n\t\t\t\t\t\tassert abs(result_mean.item() - 0.2_4_6_4\t) < 1E-3\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.full_loop(prediction_type=\"\"\"v_prediction\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= torch.mean(torch.abs(__A\t)\t)\r\n\r\n\t\t\t\t\t\tassert abs(result_mean.item() - 0.1_0_1_4\t) < 1E-3\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.scheduler_classes[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.get_scheduler_config(thresholding=__A\t, dynamic_thresholding_ratio=0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= scheduler_class(**__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 10\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.dummy_model()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.dummy_sample_deter.half()\r\n\t\t\t\t\t\tscheduler.set_timesteps(__A\t)\r\n\r\n\t\t\t\t\t\tfor i, t in enumerate(scheduler.timesteps\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= model(__A\t, __A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= scheduler.step(__A\t, __A\t, __A\t).prev_sample\r\n\r\n\t\t\t\t\t\tassert sample.dtype == torch.floataa\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, **__A\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tfor scheduler_class in self.scheduler_classes:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.get_scheduler_config(**__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= scheduler_class(**__A\t)\r\n\r\n\t\t\t\t\t\t\t\t\tscheduler.set_timesteps(scheduler.config.num_train_timesteps\t)\r\n\t\t\t\t\t\t\t\t\tassert len(scheduler.timesteps.unique()\t) == scheduler.num_inference_steps\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: list\t\t\t\t,\t\t\t\t\tlowercase__\t\t: list\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif index == number_of_items:\r\n\t\t\t\t\t\treturn 0\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= knapsack(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tindex + 1\t\t)\r\n\t\t\tif weights[index] <= max_weight:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= values[index] + knapsack(\r\n\t\t\t\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tmax_weight - weights[index]\t\t\t\t,\t\t\t\t\tindex + 1\t\t)\r\n\t\t\treturn max(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":620,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom ...processing_utils import ProcessorMixin\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Any \t\t\t=\t\t\t\t\t\t[\"image_processor\", \"feature_extractor\"]\r\n\t\t\tUpperCAmelCase_ :Any \t\t\t=\t\t\t\t\t\t\"TvltImageProcessor\"\r\n\t\t\tUpperCAmelCase_ :List[Any] \t\t\t=\t\t\t\t\t\t\"TvltFeatureExtractor\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tsuper().__init__(image_processor=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= image_processor\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= feature_extractor\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __call__( self\t, __A=None\t, __A=None\t, __A=None\t, __A=None\t, __A=False\t, __A=False\t, *__A\t, **__A\t, ) ->\t\t\t\t\tOptional[int]:\r\n\r\n\t\t\t\t\t\tif images is None and audio is None:\r\n\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"You need to specify either an `images` or `audio` input to process.\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= None\r\n\t\t\t\t\t\tif images is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.image_processor(__A\t, mask_pixel=__A\t, *__A\t, **__A\t)\r\n\t\t\t\t\t\tif images_mixed is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.image_processor(__A\t, is_mixed=__A\t, *__A\t, **__A\t)\r\n\t\t\t\t\t\tif audio is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.feature_extractor(\r\n\t\t\t\t\t\t\t\t\t __A\t, *__A\t, sampling_rate=__A\t, mask_audio=__A\t, **__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= {}\r\n\t\t\t\t\t\tif audio is not None:\r\n\t\t\t\t\t\t\t\t\toutput_dict.update(__A\t)\r\n\t\t\t\t\t\tif images is not None:\r\n\t\t\t\t\t\t\t\t\toutput_dict.update(__A\t)\r\n\t\t\t\t\t\tif images_mixed_dict is not None:\r\n\t\t\t\t\t\t\t\t\toutput_dict.update(__A\t)\r\n\t\t\t\t\t\treturn output_dict\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.image_processor.model_input_names\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.feature_extractor.model_input_names\r\n\t\t\t\t\t\treturn list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom .imports import is_tqdm_available\r\n\r\n\r\nif is_tqdm_available():\r\n\t\t\t\tfrom tqdm.auto import tqdm as _tqdm\r\n\r\nfrom ..state import PartialState\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: bool = True\t\t\t\t,\t\t\t\t\t*lowercase__\t\t: Optional[int]\t\t\t\t,\t\t\t\t\t**lowercase__\t\t: str\t\t) -> Optional[Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif not is_tqdm_available():\r\n\t\t\t\t\t\traise ImportError(\"\"\"Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= False\r\n\t\t\tif main_process_only:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= PartialState().local_process_index == 0\r\n\t\t\treturn _tqdm(*lowercase__\t\t\t\t,\t\t\t\t\t**lowercase__\t\t\t\t,\t\t\t\t\tdisable=lowercase__\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":621,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport json\r\nimport os\r\nimport unittest\r\n\r\nfrom transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer\r\nfrom transformers.testing_utils import slow\r\n\r\nfrom ...test_tokenization_common import TokenizerTesterMixin\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\tBioGptTokenizer\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tFalse\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tsuper().setUp()\r\n\r\n\t\t\t\t\t\t# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= [\r\n\t\t\t\t\t\t \"\"\"l\"\"\",\r\n\t\t\t\t\t\t \"\"\"o\"\"\",\r\n\t\t\t\t\t\t \"\"\"w\"\"\",\r\n\t\t\t\t\t\t \"\"\"e\"\"\",\r\n\t\t\t\t\t\t \"\"\"r\"\"\",\r\n\t\t\t\t\t\t \"\"\"s\"\"\",\r\n\t\t\t\t\t\t \"\"\"t\"\"\",\r\n\t\t\t\t\t\t \"\"\"i\"\"\",\r\n\t\t\t\t\t\t \"\"\"d\"\"\",\r\n\t\t\t\t\t\t \"\"\"n\"\"\",\r\n\t\t\t\t\t\t \"\"\"w\"\"\",\r\n\t\t\t\t\t\t \"\"\"r\"\"\",\r\n\t\t\t\t\t\t \"\"\"t\"\"\",\r\n\t\t\t\t\t\t \"\"\"lo\"\"\",\r\n\t\t\t\t\t\t \"\"\"low\"\"\",\r\n\t\t\t\t\t\t \"\"\"er\"\"\",\r\n\t\t\t\t\t\t \"\"\"low\"\"\",\r\n\t\t\t\t\t\t \"\"\"lowest\"\"\",\r\n\t\t\t\t\t\t \"\"\"newer\"\"\",\r\n\t\t\t\t\t\t \"\"\"wider\"\"\",\r\n\t\t\t\t\t\t \"\"\"\"\"\",\r\n\t\t\t\t\t\t]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= dict(zip(__A\t, range(len(__A\t)\t)\t)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= [\"\"\"l o 123\"\"\", \"\"\"lo w 1456\"\"\", \"\"\"e r 1789\"\"\", \"\"\"\"\"\"]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= os.path.join(self.tmpdirname\t, VOCAB_FILES_NAMES[\"\"\"vocab_file\"\"\"]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= os.path.join(self.tmpdirname\t, VOCAB_FILES_NAMES[\"\"\"merges_file\"\"\"]\t)\r\n\t\t\t\t\t\twith open(self.vocab_file\t, \"\"\"w\"\"\"\t) as fp:\r\n\t\t\t\t\t\t\t\t\tfp.write(json.dumps(__A\t)\t)\r\n\t\t\t\t\t\twith open(self.merges_file\t, \"\"\"w\"\"\"\t) as fp:\r\n\t\t\t\t\t\t\t\t\tfp.write(\"\"\"\\n\"\"\".join(__A\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"lower newer\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= \"\"\"lower newer\"\"\"\r\n\t\t\t\t\t\treturn input_text, output_text\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= BioGptTokenizer(self.vocab_file\t, self.merges_file\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= \"\"\"lower\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= [\"\"\"low\"\"\", \"\"\"er\"\"\"]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= tokenizer.tokenize(__A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokens + [\"\"\"\"\"\"]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= [14, 15, 20]\r\n\t\t\t\t\t\tself.assertListEqual(tokenizer.convert_tokens_to_ids(__A\t)\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= BioGptTokenizer.from_pretrained(\"\"\"microsoft/biogpt\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer.encode(\"\"\"sequence builders\"\"\"\t, add_special_tokens=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= tokenizer.encode(\"\"\"multi-sequence build\"\"\"\t, add_special_tokens=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer.build_inputs_with_special_tokens(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= tokenizer.build_inputs_with_special_tokens(__A\t, __A\t)\r\n\r\n\t\t\t\t\t\tself.assertTrue(encoded_sentence == [2] + text\t)\r\n\t\t\t\t\t\tself.assertTrue(encoded_pair == [2] + text + [2] + text_a\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport importlib\r\nimport json\r\nimport os\r\nimport sys\r\nimport tempfile\r\nimport unittest\r\nfrom pathlib import Path\r\n\r\nimport transformers\r\nimport transformers.models.auto\r\nfrom transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig\r\nfrom transformers.models.bert.configuration_bert import BertConfig\r\nfrom transformers.models.roberta.configuration_roberta import RobertaConfig\r\nfrom transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir\r\n\r\n\r\nsys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))\r\n\r\nfrom test_module.custom_configuration import CustomConfig # noqa E402\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= get_tests_dir('fixtures/dummy-config.json')\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 0\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tself.assertIsNotNone(transformers.models.auto.__spec__\t)\r\n\t\t\t\t\t\tself.assertIsNotNone(importlib.util.find_spec(\"\"\"transformers.models.auto\"\"\"\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"bert-base-uncased\"\"\"\t)\r\n\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= AutoConfig.from_pretrained(__A\t)\r\n\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= AutoConfig.from_pretrained(__A\t)\r\n\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= AutoConfig.for_model(\"\"\"roberta\"\"\"\t)\r\n\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmp_dir:\r\n\t\t\t\t\t\t\t\t\t# This model name contains bert and roberta, but roberta ends up being picked.\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= os.path.join(__A\t, \"\"\"fake-roberta\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\tos.makedirs(__A\t, exist_ok=__A\t)\r\n\t\t\t\t\t\t\t\t\twith open(os.path.join(__A\t, \"\"\"config.json\"\"\"\t)\t, \"\"\"w\"\"\"\t) as f:\r\n\t\t\t\t\t\t\t\t\t\t\t\tf.write(json.dumps({}\t)\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= AutoConfig.from_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(type(__A\t)\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\tAutoConfig.register(\"\"\"custom\"\"\"\t, __A\t)\r\n\t\t\t\t\t\t\t\t\t# Wrong model type will raise an error\r\n\t\t\t\t\t\t\t\t\twith self.assertRaises(__A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tAutoConfig.register(\"\"\"model\"\"\"\t, __A\t)\r\n\t\t\t\t\t\t\t\t\t# Trying to register something existing in the Transformers library will raise an error\r\n\t\t\t\t\t\t\t\t\twith self.assertRaises(__A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tAutoConfig.register(\"\"\"bert\"\"\"\t, __A\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Now that the config is registered, it can be used as any other config with the auto-API\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= CustomConfig()\r\n\t\t\t\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmp_dir:\r\n\t\t\t\t\t\t\t\t\t\t\t\tconfig.save_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= AutoConfig.from_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\t\t\t\t\t\tfinally:\r\n\t\t\t\t\t\t\t\t\tif \"custom\" in CONFIG_MAPPING._extra_content:\r\n\t\t\t\t\t\t\t\t\t\t\t\tdel CONFIG_MAPPING._extra_content[\"custom\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\twith self.assertRaisesRegex(\r\n\t\t\t\t\t\t __A\t, \"\"\"bert-base is not a local folder and is not a valid model identifier\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"bert-base\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\twith self.assertRaisesRegex(\r\n\t\t\t\t\t\t __A\t, r\"\"\"aaaaaa is not a valid git identifier \\(branch name, tag name or commit id\\)\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= AutoConfig.from_pretrained(__A\t, revision=\"\"\"aaaaaa\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\twith self.assertRaisesRegex(\r\n\t\t\t\t\t\t __A\t, \"\"\"hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.\"\"\"\t, ):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/no-config-test-repo\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\t# If remote code is not set, we will time out when asking whether to load the model.\r\n\t\t\t\t\t\twith self.assertRaises(__A\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t)\r\n\t\t\t\t\t\t# If remote code is disabled, we can't load this config.\r\n\t\t\t\t\t\twith self.assertRaises(__A\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t, trust_remote_code=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t, trust_remote_code=__A\t)\r\n\t\t\t\t\t\tself.assertEqual(config.__class__.__name__\t, \"\"\"NewModelConfig\"\"\"\t)\r\n\r\n\t\t\t\t\t\t# Test config can be reloaded.\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmp_dir:\r\n\t\t\t\t\t\t\t\t\tconfig.save_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= AutoConfig.from_pretrained(__A\t, trust_remote_code=__A\t)\r\n\t\t\t\t\t\tself.assertEqual(reloaded_config.__class__.__name__\t, \"\"\"NewModelConfig\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\t\t\t\t\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\t\"new-model\"\r\n\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\tAutoConfig.register(\"\"\"new-model\"\"\"\t, __A\t)\r\n\t\t\t\t\t\t\t\t\t# If remote code is not set, the default is to use local\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(config.__class__.__name__\t, \"\"\"NewModelConfigLocal\"\"\"\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# If remote code is disabled, we load the local one.\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t, trust_remote_code=__A\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(config.__class__.__name__\t, \"\"\"NewModelConfigLocal\"\"\"\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# If remote is enabled, we load from the Hub\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t, trust_remote_code=__A\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(config.__class__.__name__\t, \"\"\"NewModelConfig\"\"\"\t)\r\n\r\n\t\t\t\t\t\tfinally:\r\n\t\t\t\t\t\t\t\t\tif \"new-model\" in CONFIG_MAPPING._extra_content:\r\n\t\t\t\t\t\t\t\t\t\t\t\tdel CONFIG_MAPPING._extra_content[\"new-model\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":622,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: list\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int = 0\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int = 0\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= right or len(lowercase__\t\t) - 1\r\n\t\t\tif left > right:\r\n\t\t\t\t\t\treturn -1\r\n\t\t\telif list_data[left] == key:\r\n\t\t\t\t\t\treturn left\r\n\t\t\telif list_data[right] == key:\r\n\t\t\t\t\t\treturn right\r\n\t\t\telse:\r\n\t\t\t\t\t\treturn search(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tleft + 1\t\t\t\t,\t\t\t\t\tright - 1\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport json\r\nimport os\r\nimport unittest\r\n\r\nfrom transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (\r\n VOCAB_FILES_NAMES,\r\n GPTSanJapaneseTokenizer,\r\n)\r\nfrom transformers.testing_utils import require_tokenizers, slow\r\n\r\nfrom ...test_tokenization_common import TokenizerTesterMixin\r\n\r\n\r\n\r\n\r\n@require_tokenizers\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tGPTSanJapaneseTokenizer\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tFalse\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\t{\"do_clean_text\": False, \"add_prefix_space\": False}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tsuper().setUp()\r\n\r\n\t\t\t\t\t\t# fmt: off\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= [\"\"\"ใ“ใ‚“\"\"\", \"\"\"ใ“ใ‚“ใซ\"\"\", \"\"\"ใซใกใฏ\"\"\", \"\"\"ใฐใ‚“ใฏ\"\"\", \"\"\"ไธ–็•Œ,ใ”บ็•Œ\"\"\", \"\"\"ใ€\"\"\", \"\"\"ใ€‚\"\"\", \"\"\"
\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"<|emoji1|>\"\"\", \"\"\"\"\"\", \"\"\"<|bagoftoken|>\"\"\", \"\"\"<|endoftext|>\"\"\"]\r\n\t\t\t\t\t\t# fmt: on\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {\"\"\"emoji\"\"\": {\"\"\"\\ud83d\\ude00\"\"\": \"\"\"<|emoji1|>\"\"\"}, \"\"\"emoji_inv\"\"\": {\"\"\"<|emoji1|>\"\"\": \"\"\"\\ud83d\\ude00\"\"\"}} # ๐Ÿ˜€\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= {\"\"\"unk_token\"\"\": \"\"\"\"\"\"}\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= os.path.join(self.tmpdirname\t, VOCAB_FILES_NAMES[\"\"\"vocab_file\"\"\"]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= os.path.join(self.tmpdirname\t, VOCAB_FILES_NAMES[\"\"\"emoji_file\"\"\"]\t)\r\n\t\t\t\t\t\twith open(self.vocab_file\t, \"\"\"w\"\"\"\t, encoding=\"\"\"utf-8\"\"\"\t) as vocab_writer:\r\n\t\t\t\t\t\t\t\t\tvocab_writer.write(\"\"\"\"\"\".join([x + \"\"\"\\n\"\"\" for x in vocab_tokens]\t)\t)\r\n\t\t\t\t\t\twith open(self.emoji_file\t, \"\"\"w\"\"\"\t) as emoji_writer:\r\n\t\t\t\t\t\t\t\t\temoji_writer.write(json.dumps(__A\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, **__A\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tkwargs.update(self.special_tokens_map\t)\r\n\t\t\t\t\t\treturn GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \\nใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \\nใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\t\t\t\t\t\treturn input_text, output_text\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.get_input_output_texts(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer.encode(__A\t, add_special_tokens=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokenizer.decode(__A\t, clean_up_tokenization_spaces=__A\t)\r\n\t\t\t\t\t\treturn text, ids\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tpass # TODO add if relevant\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tpass # TODO add if relevant\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tpass # TODO add if relevant\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\t# Testing tokenization\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ€€ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= [\"\"\"ใ“ใ‚“\"\"\", \"\"\"ใซใกใฏ\"\"\", \"\"\"ใ€\"\"\", \"\"\"ไธ–็•Œ\"\"\", \"\"\"ใ€‚\"\"\", \"\"\"\"\"\", \"\"\"ใ“ใ‚“\"\"\", \"\"\"ใฐใ‚“ใฏ\"\"\", \"\"\"ใ€\"\"\", \"\"\"ใ”บ็•Œ\"\"\", \"\"\"ใ€‚\"\"\"]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= tokenizer.tokenize(__A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\t\t\t\t\t\t# Testing conversion to ids without special tokens\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer.convert_tokens_to_ids(__A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\t\t\t\t\t\t# Testing conversion to ids with special tokens\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= tokens + [tokenizer.unk_token]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= tokenizer.convert_tokens_to_ids(__A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\t# Testing tokenization\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€<|bagoftoken|>ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€<|bagoftoken|>ใ”บ็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokenizer.encode(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer.decode(__A\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.tokenizer_class.from_pretrained(\"\"\"Tanrei/GPTSAN-japanese\"\"\"\t)\r\n\r\n\t\t\t\t\t\t# Testing tokenization\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= \"\"\"ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tokenizer.encode(prefix_text + input_text\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer.encode(\"\"\"\"\"\"\t, prefix_text=prefix_text + input_text\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer.encode(__A\t, prefix_text=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer.decode(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer.decode(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= tokenizer.decode(__A\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.tokenizer_class.from_pretrained(\"\"\"Tanrei/GPTSAN-japanese\"\"\"\t)\r\n\r\n\t\t\t\t\t\t# Testing tokenization\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= len(tokenizer.encode(__A\t)\t) - 2\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= len(tokenizer.encode(__A\t)\t) - 2\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [1] + [0] * (len_prefix + len_text + 1)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= [1] * (len_prefix + len_text + 1) + [0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= [1] + [1] * (len_prefix) + [0] * (len_text + 1)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tokenizer(prefix_text + input_text\t).token_type_ids\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer(\"\"\"\"\"\"\t, prefix_text=prefix_text + input_text\t).token_type_ids\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tokenizer(__A\t, prefix_text=__A\t).token_type_ids\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.tokenizer_class.from_pretrained(\"\"\"Tanrei/GPTSAN-japanese\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer.encode(\"\"\"ใ‚ใƒณใ„ใƒฏ\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tokenizer.encode(\"\"\"\"\"\"\t, prefix_text=\"\"\"ใ‚ใƒณใ„ใƒฏ\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer.encode(\"\"\"ใ„ใƒฏ\"\"\"\t, prefix_text=\"\"\"ใ‚ใƒณ\"\"\"\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(tokenizer.decode(__A\t)\t, tokenizer.decode(__A\t)\t)\r\n\t\t\t\t\t\tself.assertEqual(tokenizer.decode(__A\t)\t, tokenizer.decode(__A\t)\t)\r\n\t\t\t\t\t\tself.assertNotEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertNotEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertEqual(x_token_a[1]\t, x_token_a[-1]\t) # SEG token\r\n\t\t\t\t\t\tself.assertEqual(x_token_a[1]\t, x_token_a[3]\t) # SEG token\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.tokenizer_class.from_pretrained(\"\"\"Tanrei/GPTSAN-japanese\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [[\"\"\"ๆญฆ็”ฐไฟก็Ž„\"\"\", \"\"\"ใฏใ€\"\"\"], [\"\"\"็น”็”ฐไฟก้•ท\"\"\", \"\"\"ใฎ้…ไธ‹ใฎใ€\"\"\"]]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer(__A\t, padding=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= tokenizer.batch_encode_plus(__A\t, padding=__A\t)\r\n\r\n\t\t\t\t\t\t# fmt: off\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]\r\n\t\t\t\t\t\t# fmt: on\r\n\t\t\t\t\t\tself.assertListEqual(x_token.input_ids\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token.token_type_ids\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token.attention_mask\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token_a.input_ids\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token_a.token_type_ids\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token_a.attention_mask\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\t# Intentionally convert some words to accommodate character fluctuations unique to Japanese\r\n\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\t# tokenizer has no padding token\r\n\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":623,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\nimport unittest\r\n\r\nfrom transformers import LEDConfig, is_tf_available\r\nfrom transformers.testing_utils import require_tf, slow\r\n\r\nfrom ...test_configuration_common import ConfigTester\r\nfrom ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor\r\nfrom ...test_pipeline_mixin import PipelineTesterMixin\r\n\r\n\r\nif is_tf_available():\r\n\t\t\t\timport tensorflow as tf\r\n\r\n\t\t\t\tfrom transformers import TFLEDForConditionalGeneration, TFLEDModel\r\n\r\n\r\n\r\n\r\n@require_tf\r\nclass _SCREAMING_SNAKE_CASE :\r\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\tLEDConfig\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\t{}\r\n\t\t\tUpperCAmelCase_ :Union[str, Any] \t\t\t=\t\t\t\t\t\t\"gelu\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A=13\t, __A=7\t, __A=True\t, __A=False\t, __A=99\t, __A=32\t, __A=2\t, __A=4\t, __A=37\t, __A=0.1\t, __A=0.1\t, __A=20\t, __A=2\t, __A=1\t, __A=0\t, __A=4\t, ) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= parent\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= batch_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= seq_length\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= is_training\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= use_labels\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= vocab_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= hidden_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= num_hidden_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= num_attention_heads\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= intermediate_size\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= hidden_dropout_prob\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= attention_probs_dropout_prob\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= max_position_embeddings\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= eos_token_id\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= pad_token_id\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= bos_token_id\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= attention_window\r\n\r\n\t\t\t\t\t\t# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size\r\n\t\t\t\t\t\t# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention\r\n\t\t\t\t\t\t# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]\r\n\t\t\t\t\t\t# because its local attention only attends to `self.attention_window` and one before and one after\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.attention_window + 2\r\n\r\n\t\t\t\t\t\t# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for\r\n\t\t\t\t\t\t# the `test_attention_outputs` and `test_hidden_states_output` tests\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= (\r\n\t\t\t\t\t\t self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window\r\n\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= ids_tensor([self.batch_size, self.seq_length - 1]\t, self.vocab_size\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size\t)\t, 1\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= tf.concat([input_ids, eos_tensor]\t, axis=1\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= ids_tensor([self.batch_size, self.seq_length]\t, self.vocab_size\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.config_cls(\r\n\t\t\t\t\t\t vocab_size=self.vocab_size\t, d_model=self.hidden_size\t, encoder_layers=self.num_hidden_layers\t, decoder_layers=self.num_hidden_layers\t, encoder_attention_heads=self.num_attention_heads\t, decoder_attention_heads=self.num_attention_heads\t, encoder_ffn_dim=self.intermediate_size\t, decoder_ffn_dim=self.intermediate_size\t, dropout=self.hidden_dropout_prob\t, attention_dropout=self.attention_probs_dropout_prob\t, max_position_embeddings=self.max_position_embeddings\t, eos_token_ids=[2]\t, bos_token_id=self.bos_token_id\t, pad_token_id=self.pad_token_id\t, decoder_start_token_id=self.pad_token_id\t, attention_window=self.attention_window\t, **self.config_updates\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= prepare_led_inputs_dict(__A\t, __A\t, __A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= tf.concat(\r\n\t\t\t\t\t\t [tf.zeros_like(__A\t)[:, :-1], tf.ones_like(__A\t)[:, -1:]]\t, axis=-1\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= global_attention_mask\r\n\t\t\t\t\t\treturn config, inputs_dict\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= TFLEDModel(config=__A\t).get_decoder()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= inputs_dict[\"\"\"input_ids\"\"\"]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= input_ids[:1, :]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= inputs_dict[\"\"\"attention_mask\"\"\"][:1, :]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= 1\r\n\r\n\t\t\t\t\t\t# first forward pass\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= model(__A\t, attention_mask=__A\t, use_cache=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Any \t\t\t\t\t= outputs.to_tuple()\r\n\r\n\t\t\t\t\t\t# create hypothetical next token and extent to next_input_ids\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= ids_tensor((self.batch_size, 3)\t, config.vocab_size\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= tf.cast(ids_tensor((self.batch_size, 3)\t, 2\t)\t, tf.inta\t)\r\n\r\n\t\t\t\t\t\t# append to next input_ids and\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tf.concat([input_ids, next_tokens]\t, axis=-1\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= tf.concat([attention_mask, next_attn_mask]\t, axis=-1\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= model(__A\t, attention_mask=__A\t)[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= model(__A\t, attention_mask=__A\t, past_key_values=__A\t)[0]\r\n\r\n\t\t\t\t\t\tself.parent.assertEqual(next_tokens.shape[1]\t, output_from_past.shape[1]\t)\r\n\r\n\t\t\t\t\t\t# select random slice\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= int(ids_tensor((1,)\t, output_from_past.shape[-1]\t)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= output_from_no_past[:, -3:, random_slice_idx]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= output_from_past[:, :, random_slice_idx]\r\n\r\n\t\t\t\t\t\t# test that outputs are equal for slice\r\n\t\t\t\t\t\ttf.debugging.assert_near(__A\t, __A\t, rtol=1E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Tuple\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Any=None\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int=None\t\t\t\t,\t\t\t\t\tlowercase__\t\t: str=None\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Optional[int]=None\t\t\t\t,\t\t\t\t\t) -> Optional[Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif attention_mask is None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= tf.cast(tf.math.not_equal(lowercase__\t\t\t\t,\t\t\t\t\tconfig.pad_token_id\t\t)\t\t\t\t,\t\t\t\t\ttf.inta\t\t)\r\n\t\t\tif decoder_attention_mask is None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tf.concat(\r\n\t\t\t\t\t\t [\r\n\t\t\t\t\t\t tf.ones(decoder_input_ids[:, :1].shape\t\t\t\t,\t\t\t\t\tdtype=tf.inta\t\t),\r\n\t\t\t\t\t\t tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:]\t\t\t\t,\t\t\t\t\tconfig.pad_token_id\t\t)\t\t\t\t,\t\t\t\t\ttf.inta\t\t),\r\n\t\t\t\t\t\t ]\t\t\t\t,\t\t\t\t\taxis=-1\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tif head_mask is None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= tf.ones((config.encoder_layers, config.encoder_attention_heads)\t\t)\r\n\t\t\tif decoder_head_mask is None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= tf.ones((config.decoder_layers, config.decoder_attention_heads)\t\t)\r\n\t\t\treturn {\r\n\t\t\t \"input_ids\": input_ids,\r\n\t\t\t \"attention_mask\": attention_mask,\r\n\t\t\t \"decoder_input_ids\": decoder_input_ids,\r\n\t\t\t \"decoder_attention_mask\": decoder_attention_mask,\r\n\t\t\t \"head_mask\": head_mask,\r\n\t\t\t \"decoder_head_mask\": decoder_head_mask,\r\n\t\t\t}\r\n\r\n\r\n\r\n\r\n@require_tf\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tA__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Union[str, Any] \t\t\t=\t\t\t\t\t\t(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()\r\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\t(TFLEDForConditionalGeneration,) if is_tf_available() else ()\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\t(\r\n\t\t\t {\r\n\t\t\t \"conversational\": TFLEDForConditionalGeneration,\r\n\t\t\t \"feature-extraction\": TFLEDModel,\r\n\t\t\t \"summarization\": TFLEDForConditionalGeneration,\r\n\t\t\t \"text2text-generation\": TFLEDForConditionalGeneration,\r\n\t\t\t \"translation\": TFLEDForConditionalGeneration,\r\n\t\t\t }\r\n\t\t\t if is_tf_available()\r\n\t\t\t else {}\r\n\t\t\t)\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tTrue\r\n\t\t\tUpperCAmelCase_ :Optional[Any] \t\t\t=\t\t\t\t\t\tFalse\r\n\t\t\tUpperCAmelCase_ :Union[str, Any] \t\t\t=\t\t\t\t\t\tFalse\r\n\t\t\tUpperCAmelCase_ :Optional[Any] \t\t\t=\t\t\t\t\t\tFalse\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= TFLEDModelTester(self\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= ConfigTester(self\t, config_class=__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tself.config_tester.run_common_tests()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.model_tester.prepare_config_and_inputs_for_common()\r\n\t\t\t\t\t\tself.model_tester.check_decoder_model_past_large_inputs(*__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Any \t\t\t\t\t= self.model_tester.prepare_config_and_inputs_for_common()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= tf.zeros_like(inputs_dict[\"\"\"attention_mask\"\"\"]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= 2\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= tf.where(\r\n\t\t\t\t\t\t tf.range(self.model_tester.seq_length\t)[None, :] < num_global_attn_indices\t, 1\t, inputs_dict[\"\"\"global_attention_mask\"\"\"]\t, )\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= True\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.model_tester.seq_length\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.model_tester.encoder_seq_length\r\n\r\n\t\t\t\t\t\tdef check_decoder_attentions_output(__A\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= outputs.decoder_attentions\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(len(__A\t)\t, self.model_tester.num_hidden_layers\t)\r\n\t\t\t\t\t\t\t\t\tself.assertListEqual(\r\n\t\t\t\t\t\t\t\t\t list(decoder_attentions[0].shape[-3:]\t)\t, [self.model_tester.num_attention_heads, seq_length, seq_length]\t, )\r\n\r\n\t\t\t\t\t\tdef check_encoder_attentions_output(__A\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= [t.numpy() for t in outputs.encoder_attentions]\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= [t.numpy() for t in outputs.encoder_global_attentions]\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(len(__A\t)\t, self.model_tester.num_hidden_layers\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(len(__A\t)\t, self.model_tester.num_hidden_layers\t)\r\n\t\t\t\t\t\t\t\t\tself.assertListEqual(\r\n\t\t\t\t\t\t\t\t\t list(attentions[0].shape[-3:]\t)\t, [self.model_tester.num_attention_heads, seq_length, seq_length]\t, )\r\n\t\t\t\t\t\t\t\t\tself.assertListEqual(\r\n\t\t\t\t\t\t\t\t\t list(global_attentions[0].shape[-3:]\t)\t, [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices]\t, )\r\n\r\n\t\t\t\t\t\tfor model_class in self.all_model_classes:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= True\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= False\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= False\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= model_class(__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= model(self._prepare_for_class(__A\t, __A\t)\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= len(__A\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(config.output_hidden_states\t, __A\t)\r\n\t\t\t\t\t\t\t\t\tcheck_encoder_attentions_output(__A\t)\r\n\r\n\t\t\t\t\t\t\t\t\tif self.is_encoder_decoder:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= model_class(__A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= model(self._prepare_for_class(__A\t, __A\t)\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(config.output_hidden_states\t, __A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tcheck_decoder_attentions_output(__A\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Check that output attentions can also be changed via the config\r\n\t\t\t\t\t\t\t\t\tdel inputs_dict[\"output_attentions\"]\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= True\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= model_class(__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= model(self._prepare_for_class(__A\t, __A\t)\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(config.output_hidden_states\t, __A\t)\r\n\t\t\t\t\t\t\t\t\tcheck_encoder_attentions_output(__A\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Check attention is always last and order is fine\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= True\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= True\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= model_class(__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= model(self._prepare_for_class(__A\t, __A\t)\t)\r\n\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(out_len + (2 if self.is_encoder_decoder else 1)\t, len(__A\t)\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(model.config.output_hidden_states\t, __A\t)\r\n\t\t\t\t\t\t\t\t\tcheck_encoder_attentions_output(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@unittest.skip(\"\"\"LED keeps using potentially symbolic tensors in conditionals and breaks tracing.\"\"\"\t)\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\t# TODO: Head-masking not yet implement\r\n\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: List[Any]\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\treturn tf.constant(lowercase__\t\t\t\t,\t\t\t\t\tdtype=tf.intaa\t\t)\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 1e-4\r\n\r\n\r\n\r\n\r\n@slow\r\n@require_tf\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= TFLEDForConditionalGeneration.from_pretrained(\"\"\"allenai/led-base-16384\"\"\"\t).led\r\n\r\n\t\t\t\t\t\t# change to intended input here\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= prepare_led_inputs_dict(model.config\t, __A\t, __A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= model(**__A\t)[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= (1, 1024, 768)\r\n\t\t\t\t\t\tself.assertEqual(output.shape\t, __A\t)\r\n\t\t\t\t\t\t# change to expected output here\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tf.convert_to_tensor(\r\n\t\t\t\t\t\t [[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]]\t, )\r\n\t\t\t\t\t\ttf.debugging.assert_near(output[:, :3, :3]\t, __A\t, atol=1E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= TFLEDForConditionalGeneration.from_pretrained(\"\"\"allenai/led-base-16384\"\"\"\t)\r\n\r\n\t\t\t\t\t\t# change to intended input here\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= prepare_led_inputs_dict(model.config\t, __A\t, __A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= model(**__A\t)[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= (1, 1024, model.config.vocab_size)\r\n\t\t\t\t\t\tself.assertEqual(output.shape\t, __A\t)\r\n\t\t\t\t\t\t# change to expected output here\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tf.convert_to_tensor(\r\n\t\t\t\t\t\t [[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]]\t, )\r\n\t\t\t\t\t\ttf.debugging.assert_near(output[:, :3, :3]\t, __A\t, atol=1E-3\t, rtol=1E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nfrom sklearn.linear_model import LinearRegression\r\n\r\n# Splitting the dataset into the Training set and Test set\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# Fitting Polynomial Regression to the dataset\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\n\r\n# Importing the dataset\r\n__UpperCAmelCase\t\t\t\t\t\t\t= pd.read_csv(\r\n 'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'\r\n 'position_salaries.csv'\r\n)\r\n__UpperCAmelCase\t\t\t\t\t\t\t= dataset.iloc[:, 1:2].values\r\n__UpperCAmelCase\t\t\t\t\t\t\t= dataset.iloc[:, 2].values\r\n\r\n\r\n__UpperCAmelCase\t\t\t,\t\t\t__UpperCAmelCase\t\t\t,\t\t\t__UpperCAmelCase\t\t\t,\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= train_test_split(X, y, test_size=0.2, random_state=0)\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= PolynomialFeatures(degree=4)\r\n__UpperCAmelCase\t\t\t\t\t\t\t= poly_reg.fit_transform(X)\r\n__UpperCAmelCase\t\t\t\t\t\t\t= LinearRegression()\r\npol_reg.fit(X_poly, y)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> str:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tplt.scatter(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tcolor=\"\"\"red\"\"\"\t\t)\r\n\t\t\tplt.plot(lowercase__\t\t\t\t,\t\t\t\t\tpol_reg.predict(poly_reg.fit_transform(lowercase__\t\t)\t\t)\t\t\t\t,\t\t\t\t\tcolor=\"\"\"blue\"\"\"\t\t)\r\n\t\t\tplt.title(\"\"\"Truth or Bluff (Linear Regression)\"\"\"\t\t)\r\n\t\t\tplt.xlabel(\"\"\"Position level\"\"\"\t\t)\r\n\t\t\tplt.ylabel(\"\"\"Salary\"\"\"\t\t)\r\n\t\t\tplt.show()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tviz_polymonial()\r\n\r\n\t\t\t\t# Predicting a new result with Polymonial Regression\r\n\t\t\t\tpol_reg.predict(poly_reg.fit_transform([[5.5]]))\r\n\t\t\t\t# output should be 132148.43750003\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":624,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.preprocessing import Normalizer\r\nfrom sklearn.svm import SVR\r\nfrom statsmodels.tsa.statespace.sarimax import SARIMAX\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: list\t\t\t\t,\t\t\t\t\tlowercase__\t\t: list\t\t\t\t,\t\t\t\t\tlowercase__\t\t: list\t\t\t\t,\t\t\t\t\tlowercase__\t\t: list\t\t\t\t,\t\t\t\t\tlowercase__\t\t: list\t\t) -> float:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= np.array([[1, item, train_mtch[i]] for i, item in enumerate(lowercase__\t\t)]\t\t)\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= np.array(lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= np.dot(np.dot(np.linalg.inv(np.dot(x.transpose()\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\t\t)\t\t\t\t,\t\t\t\t\tx.transpose()\t\t)\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\treturn abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2]\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: list\t\t\t\t,\t\t\t\t\tlowercase__\t\t: list\t\t\t\t,\t\t\t\t\tlowercase__\t\t: list\t\t) -> float:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= (1, 2, 1)\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= (1, 1, 0, 7)\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= SARIMAX(\r\n\t\t\t lowercase__\t\t\t\t,\t\t\t\t\texog=lowercase__\t\t\t\t,\t\t\t\t\torder=lowercase__\t\t\t\t,\t\t\t\t\tseasonal_order=lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= model.fit(disp=lowercase__\t\t\t\t,\t\t\t\t\tmaxiter=6_0_0\t\t\t\t,\t\t\t\t\tmethod=\"\"\"nm\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= model_fit.predict(1\t\t\t\t,\t\t\t\t\tlen(lowercase__\t\t)\t\t\t\t,\t\t\t\t\texog=[test_match]\t\t)\r\n\t\t\treturn result[0]\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: list\t\t\t\t,\t\t\t\t\tlowercase__\t\t: list\t\t\t\t,\t\t\t\t\tlowercase__\t\t: list\t\t) -> float:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= SVR(kernel=\"\"\"rbf\"\"\"\t\t\t\t,\t\t\t\t\tC=1\t\t\t\t,\t\t\t\t\tgamma=0.1\t\t\t\t,\t\t\t\t\tepsilon=0.1\t\t)\r\n\t\t\tregressor.fit(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= regressor.predict(lowercase__\t\t)\r\n\t\t\treturn y_pred[0]\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: list\t\t) -> float:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\ttrain_user.sort()\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= np.percentile(lowercase__\t\t\t\t,\t\t\t\t\t2_5\t\t)\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= np.percentile(lowercase__\t\t\t\t,\t\t\t\t\t7_5\t\t)\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= qa - qa\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= qa - (iqr * 0.1)\r\n\t\t\treturn low_lim\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: list\t\t\t\t,\t\t\t\t\tlowercase__\t\t: float\t\t) -> bool:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= 0\r\n\t\t\tfor i in list_vote:\r\n\t\t\t\t\t\tif i > actual_result:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= not_safe + 1\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tif abs(abs(lowercase__\t\t) - abs(lowercase__\t\t)\t\t) <= 0.1:\r\n\t\t\t\t\t\t\t\t\t\t\t\tsafe += 1\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tnot_safe += 1\r\n\t\t\treturn safe > not_safe\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t# data_input_df = pd.read_csv(\"ex_data.csv\", header=None)\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]]\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= pd.DataFrame(\r\n\t\t\t\t data_input, columns=['total_user', 'total_even', 'days']\r\n\t\t\t\t)\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= Normalizer().fit_transform(data_input_df.values)\r\n\t\t\t\t# split data\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= normalize_df[:, 2].tolist()\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= normalize_df[:, 0].tolist()\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= normalize_df[:, 1].tolist()\r\n\r\n\t\t\t\t# for svr (input variable = total date and total match)\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= normalize_df[:, [1, 2]].tolist()\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= x[: len(x) - 1]\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= x[len(x) - 1 :]\r\n\r\n\t\t\t\t# for linear regression & sarimax\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= total_date[: len(total_date) - 1]\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= total_user[: len(total_user) - 1]\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= total_match[: len(total_match) - 1]\r\n\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= total_date[len(total_date) - 1 :]\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= total_user[len(total_user) - 1 :]\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= total_match[len(total_match) - 1 :]\r\n\r\n\t\t\t\t# voting system with forecasting\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\r\n\t\t\t\t linear_regression_prediction(\r\n\t\t\t\t trn_date, trn_user, trn_match, tst_date, tst_match\r\n\t\t\t\t ),\r\n\t\t\t\t sarimax_predictor(trn_user, trn_match, tst_match),\r\n\t\t\t\t support_vector_regressor(x_train, x_test, trn_user),\r\n\t\t\t\t]\r\n\r\n\t\t\t\t# check the safety of today's data\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= '' if data_safety_checker(res_vote, tst_user) else 'not '\r\n\t\t\t\tprint('Today\\'s data is {not_str}safe.')\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 1.6021e-19 # units = C\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: float\t\t\t\t,\t\t\t\t\tlowercase__\t\t: float\t\t\t\t,\t\t\t\t\tlowercase__\t\t: float\t\t\t\t,\t\t\t\t\t) -> tuple[str, float]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif (conductivity, electron_conc, mobility).count(0\t\t) != 1:\r\n\t\t\t\t\t\traise ValueError(\"\"\"You cannot supply more or less than 2 values\"\"\"\t\t)\r\n\t\t\telif conductivity < 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"Conductivity cannot be negative\"\"\"\t\t)\r\n\t\t\telif electron_conc < 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"Electron concentration cannot be negative\"\"\"\t\t)\r\n\t\t\telif mobility < 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"mobility cannot be negative\"\"\"\t\t)\r\n\t\t\telif conductivity == 0:\r\n\t\t\t\t\t\treturn (\r\n\t\t\t\t\t\t \"conductivity\",\r\n\t\t\t\t\t\t mobility * electron_conc * ELECTRON_CHARGE,\r\n\t\t\t\t\t\t)\r\n\t\t\telif electron_conc == 0:\r\n\t\t\t\t\t\treturn (\r\n\t\t\t\t\t\t \"electron_conc\",\r\n\t\t\t\t\t\t conductivity / (mobility * ELECTRON_CHARGE),\r\n\t\t\t\t\t\t)\r\n\t\t\telse:\r\n\t\t\t\t\t\treturn (\r\n\t\t\t\t\t\t \"mobility\",\r\n\t\t\t\t\t\t conductivity / (electron_conc * ELECTRON_CHARGE),\r\n\t\t\t\t\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":625,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import List, Optional, Union\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\nfrom .utils import logging\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Union[tf.Tensor, np.ndarray]\t\t) -> List[int]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif isinstance(lowercase__\t\t\t\t,\t\t\t\t\tnp.ndarray\t\t):\r\n\t\t\t\t\t\treturn list(tensor.shape\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tf.shape(lowercase__\t\t)\r\n\r\n\t\t\tif tensor.shape == tf.TensorShape(lowercase__\t\t):\r\n\t\t\t\t\t\treturn dynamic\r\n\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tensor.shape.as_list()\r\n\r\n\t\t\treturn [dynamic[i] if s is None else s for i, s in enumerate(lowercase__\t\t)]\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: tf.Tensor\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Optional[int] = None\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Optional[str] = None\t\t) -> tf.Tensor:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\treturn tf.nn.softmax(logits=logits + 1E-9\t\t\t\t,\t\t\t\t\taxis=lowercase__\t\t\t\t,\t\t\t\t\tname=lowercase__\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Any\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: str=1E-5\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Dict=-1\t\t) -> List[str]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t):\r\n\t\t\t\t\t\traise NotImplementedError(\"\"\"Only 1D weight and bias tensors are supported for now, with only a single axis.\"\"\"\t\t)\r\n\r\n\t\t\t# Get mean and variance on the axis to be normalized\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tf.nn.moments(lowercase__\t\t\t\t,\t\t\t\t\taxes=[axis]\t\t\t\t,\t\t\t\t\tkeepdims=lowercase__\t\t)\r\n\r\n\t\t\tif axis != -1:\r\n\t\t\t\t\t\t# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions\r\n\t\t\t\t\t\t# on every dimension except axis\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= [1] * inputs.shape.rank\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= shape_list(lowercase__\t\t)[axis]\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tf.reshape(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= tf.reshape(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Compute layer normalization using the batch_normalization\r\n\t\t\t# function.\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tf.nn.batch_normalization(\r\n\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\toffset=lowercase__\t\t\t\t,\t\t\t\t\tscale=lowercase__\t\t\t\t,\t\t\t\t\tvariance_epsilon=lowercase__\t\t\t\t,\t\t\t\t\t)\r\n\t\t\treturn outputs\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: List[str]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: str=0\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Tuple=-1\t\t) -> List[str]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif end_dim < 0:\r\n\t\t\t\t\t\tend_dim += input.shape.rank\r\n\t\t\tif start_dim < 0:\r\n\t\t\t\t\t\tstart_dim += input.shape.rank\r\n\r\n\t\t\tif start_dim == end_dim:\r\n\t\t\t\t\t\treturn input\r\n\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tf.shape(lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tf.math.reduce_prod(in_shape[start_dim : end_dim + 1]\t\t)\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]]\t\t\t\t,\t\t\t\t\taxis=0\t\t)\r\n\t\t\treturn tf.reshape(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: tf.Tensor\t\t) -> tf.Tensor:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif not isinstance(lowercase__\t\t\t\t,\t\t\t\t\ttf.Tensor\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= tf.convert_to_tensor(lowercase__\t\t) # Catches stray NumPy inputs\r\n\t\t\tif encoder_attention_mask.shape.rank == 3:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= encoder_attention_mask[:, None, :, :]\r\n\t\t\tif encoder_attention_mask.shape.rank == 2:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= encoder_attention_mask[:, None, None, :]\r\n\t\t\t# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition\r\n\t\t\t# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow\r\n\t\t\t# /transformer/transformer_layers.py#L270\r\n\t\t\t# encoder_extended_attention_mask = (encoder_extended_attention_mask ==\r\n\t\t\t# encoder_extended_attention_mask.transpose(-1, -2))\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= (\r\n\t\t\t tf.cast(1\t\t\t\t,\t\t\t\t\tencoder_attention_mask.dtype\t\t) - encoder_extended_attention_mask\r\n\t\t\t) * encoder_extended_attention_mask.dtype.min\r\n\r\n\t\t\treturn encoder_extended_attention_mask\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: tf.Tensor\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t\t\t,\t\t\t\t\tlowercase__\t\t: str = \"input_ids\"\t\t) -> None:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\ttf.debugging.assert_less(\r\n\t\t\t lowercase__\t\t\t\t,\t\t\t\t\ttf.cast(lowercase__\t\t\t\t,\t\t\t\t\tdtype=tensor.dtype\t\t)\t\t\t\t,\t\t\t\t\tmessage=(\r\n\t\t\t f\"\"\"The maximum value of {tensor_name} ({tf.math.reduce_max(lowercase__\t\t)}) must be smaller than the embedding \"\"\"\r\n\t\t\t f\"\"\"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time.\"\"\"\r\n\t\t\t )\t\t\t\t,\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Optional[int]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Tuple\t\t) -> str:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 6_4_5_1_2\r\n\t\t\t# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`\r\n\t\t\t# because in that case even chunking the array would not make the saving\r\n\t\t\t# possible.\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= [x for x in data if len(lowercase__\t\t) > HDF5_OBJECT_HEADER_LIMIT]\r\n\r\n\t\t\t# Expecting this to never be true.\r\n\t\t\tif bad_attributes:\r\n\t\t\t\t\t\traise RuntimeError(\r\n\t\t\t\t\t\t \"\"\"The following attributes cannot be saved to HDF5 file because \"\"\"\r\n\t\t\t\t\t\t f\"\"\"they are larger than {HDF5_OBJECT_HEADER_LIMIT} \"\"\"\r\n\t\t\t\t\t\t f\"\"\"bytes: {bad_attributes}\"\"\"\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= np.asarray(lowercase__\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 1\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= np.array_split(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# This will never loop forever thanks to the test above.\r\n\t\t\twhile any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data\t\t):\r\n\t\t\t\t\t\tnum_chunks += 1\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= np.array_split(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\tif num_chunks > 1:\r\n\t\t\t\t\t\tfor chunk_id, chunk_data in enumerate(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= chunk_data\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= data\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: List[str]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[str]\t\t) -> Tuple:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif name in group.attrs:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= [n.decode(\"\"\"utf8\"\"\"\t\t) if hasattr(lowercase__\t\t\t\t,\t\t\t\t\t\"\"\"decode\"\"\"\t\t) else n for n in group.attrs[name]]\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= []\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= 0\r\n\t\t\t\t\t\twhile \"%s%d\" % (name, chunk_id) in group.attrs:\r\n\t\t\t\t\t\t\t\t\tdata.extend(\r\n\t\t\t\t\t\t\t\t\t [n.decode(\"\"\"utf8\"\"\"\t\t) if hasattr(lowercase__\t\t\t\t,\t\t\t\t\t\"\"\"decode\"\"\"\t\t) else n for n in group.attrs[\"\"\"%s%d\"\"\" % (name, chunk_id)]]\t\t)\r\n\t\t\t\t\t\t\t\t\tchunk_id += 1\r\n\t\t\treturn data\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int\t\t) -> Optional[Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef _expand_single_ad_tensor(lowercase__\t\t: Union[str, Any]\t\t):\r\n\t\t\t\t\t\tif isinstance(lowercase__\t\t\t\t,\t\t\t\t\ttf.Tensor\t\t) and t.shape.rank == 1:\r\n\t\t\t\t\t\t\t\t\treturn tf.expand_dims(lowercase__\t\t\t\t,\t\t\t\t\taxis=-1\t\t)\r\n\t\t\t\t\t\treturn t\r\n\r\n\t\t\treturn tf.nest.map_structure(_expand_single_ad_tensor\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport warnings\r\n\r\nfrom ...utils import logging\r\nfrom .image_processing_clip import CLIPImageProcessor\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, *__A\t, **__A\t) ->\t\t\t\t\tNone:\r\n\t\t\t\t\t\twarnings.warn(\r\n\t\t\t\t\t\t \"\"\"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please\"\"\"\r\n\t\t\t\t\t\t \"\"\" use CLIPImageProcessor instead.\"\"\"\t, __A\t, )\r\n\t\t\t\t\t\tsuper().__init__(*__A\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":626,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport warnings\r\nfrom functools import wraps\r\nfrom typing import Callable\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Callable\t\t) -> Callable:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@wraps(lowercase__\t\t)\r\n\t\t\tdef _inner_fn(*lowercase__\t\t: Optional[int]\t\t\t\t,\t\t\t\t\t**lowercase__\t\t: Optional[Any]\t\t):\r\n\t\t\t\t\t\twarnings.warn(\r\n\t\t\t\t\t\t (f\"\"\"'{fn.__name__}' is experimental and might be subject to breaking changes in the future.\"\"\")\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\t)\r\n\t\t\t\t\t\treturn fn(*lowercase__\t\t\t\t,\t\t\t\t\t**lowercase__\t\t)\r\n\r\n\t\t\treturn _inner_fn\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom itertools import zip_longest\r\n\r\nimport requests\r\nfrom bsa import BeautifulSoup\r\nfrom pandas import DataFrame\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str = \"laptop\"\t\t) -> DataFrame:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= f\"\"\"https://www.amazon.in/laptop/s?k={product}\"\"\"\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {\r\n\t\t\t \"\"\"User-Agent\"\"\": \"\"\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36\"\"\",\r\n\t\t\t \"\"\"Accept-Language\"\"\": \"\"\"en-US, en;q=0.5\"\"\",\r\n\t\t\t}\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= BeautifulSoup(requests.get(lowercase__\t\t\t\t,\t\t\t\t\theaders=lowercase__\t\t).text\t\t)\r\n\t\t\t# Initialize a Pandas dataframe with the column titles\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= DataFrame(\r\n\t\t\t columns=[\r\n\t\t\t \"\"\"Product Title\"\"\",\r\n\t\t\t \"\"\"Product Link\"\"\",\r\n\t\t\t \"\"\"Current Price of the product\"\"\",\r\n\t\t\t \"\"\"Product Rating\"\"\",\r\n\t\t\t \"\"\"MRP of the product\"\"\",\r\n\t\t\t \"\"\"Discount\"\"\",\r\n\t\t\t ]\t\t)\r\n\t\t\t# Loop through each entry and store them in the dataframe\r\n\t\t\tfor item, _ in zip_longest(\r\n\t\t\t soup.find_all(\r\n\t\t\t \"\"\"div\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"s-result-item\"\"\", \"\"\"data-component-type\"\"\": \"\"\"s-search-result\"\"\"}\t\t\t\t,\t\t\t\t\t)\t\t\t\t,\t\t\t\t\tsoup.find_all(\"\"\"div\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-row a-size-base a-color-base\"\"\"}\t\t)\t\t\t\t,\t\t\t\t\t):\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= item.ha.text\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= \"\"\"https://www.amazon.in/\"\"\" + item.ha.a[\"\"\"href\"\"\"]\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= item.find(\"\"\"span\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-offscreen\"\"\"}\t\t).text\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= item.find(\"\"\"span\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-icon-alt\"\"\"}\t\t).text\r\n\t\t\t\t\t\t\t\t\texcept AttributeError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"Not available\"\"\"\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= (\r\n\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"โ‚น\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t + item.find(\r\n\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"span\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-price a-text-price\"\"\"}\t\t).text.split(\"\"\"โ‚น\"\"\"\t\t)[1]\r\n\t\t\t\t\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\texcept AttributeError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= \"\"\"\"\"\"\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= float(\r\n\t\t\t\t\t\t\t\t\t\t\t\t (\r\n\t\t\t\t\t\t\t\t\t\t\t\t (\r\n\t\t\t\t\t\t\t\t\t\t\t\t float(product_mrp.strip(\"\"\"โ‚น\"\"\"\t\t).replace(\"\"\",\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"\"\"\"\t\t)\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t - float(product_price.strip(\"\"\"โ‚น\"\"\"\t\t).replace(\"\"\",\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"\"\"\"\t\t)\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t )\r\n\t\t\t\t\t\t\t\t\t\t\t\t / float(product_mrp.strip(\"\"\"โ‚น\"\"\"\t\t).replace(\"\"\",\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"\"\"\"\t\t)\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t )\r\n\t\t\t\t\t\t\t\t\t\t\t\t * 1_0_0\t\t)\r\n\t\t\t\t\t\t\t\t\texcept ValueError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= float(\"\"\"nan\"\"\"\t\t)\r\n\t\t\t\t\t\texcept AttributeError:\r\n\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= [\r\n\t\t\t\t\t\t product_title,\r\n\t\t\t\t\t\t product_link,\r\n\t\t\t\t\t\t product_price,\r\n\t\t\t\t\t\t product_rating,\r\n\t\t\t\t\t\t product_mrp,\r\n\t\t\t\t\t\t discount,\r\n\t\t\t\t\t\t]\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\" \"\"\"\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= \"\"\" \"\"\"\r\n\t\t\tdata_frame.index += 1\r\n\t\t\treturn data_frame\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= 'headphones'\r\n\t\t\t\tget_amazon_product_data(product).to_csv(F\"\"\"Amazon Product Data for {product}.csv\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":627,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport inspect\r\nimport unittest\r\n\r\nimport numpy as np\r\n\r\nfrom transformers import ViTConfig, is_flax_available\r\nfrom transformers.testing_utils import require_flax, slow\r\n\r\nfrom ...test_configuration_common import ConfigTester\r\nfrom ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor\r\n\r\n\r\nif is_flax_available():\r\n\t\t\t\timport jax\r\n\r\n\t\t\t\tfrom transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A=13\t, __A=30\t, __A=2\t, __A=3\t, __A=True\t, __A=True\t, __A=32\t, __A=5\t, __A=4\t, __A=37\t, __A=\"gelu\"\t, __A=0.1\t, __A=0.1\t, __A=10\t, __A=0.0_2\t, ) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= parent\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= batch_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= image_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= patch_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= num_channels\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= is_training\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= use_labels\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= hidden_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= num_hidden_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= num_attention_heads\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= intermediate_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= hidden_act\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= hidden_dropout_prob\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= attention_probs_dropout_prob\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= type_sequence_label_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= initializer_range\r\n\r\n\t\t\t\t\t\t# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= (image_size // patch_size) ** 2\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= num_patches + 1\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= ViTConfig(\r\n\t\t\t\t\t\t image_size=self.image_size\t, patch_size=self.patch_size\t, num_channels=self.num_channels\t, hidden_size=self.hidden_size\t, num_hidden_layers=self.num_hidden_layers\t, num_attention_heads=self.num_attention_heads\t, intermediate_size=self.intermediate_size\t, hidden_act=self.hidden_act\t, hidden_dropout_prob=self.hidden_dropout_prob\t, attention_probs_dropout_prob=self.attention_probs_dropout_prob\t, is_decoder=__A\t, initializer_range=self.initializer_range\t, )\r\n\r\n\t\t\t\t\t\treturn config, pixel_values\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= FlaxViTModel(config=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= model(__A\t)\r\n\t\t\t\t\t\t# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= (self.image_size, self.image_size)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= (self.patch_size, self.patch_size)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])\r\n\t\t\t\t\t\tself.parent.assertEqual(result.last_hidden_state.shape\t, (self.batch_size, num_patches + 1, self.hidden_size)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.type_sequence_label_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= FlaxViTForImageClassification(config=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= model(__A\t)\r\n\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape\t, (self.batch_size, self.type_sequence_label_size)\t)\r\n\r\n\t\t\t\t\t\t# test greyscale images\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 1\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= FlaxViTForImageClassification(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= floats_tensor([self.batch_size, 1, self.image_size, self.image_size]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= model(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.prepare_config_and_inputs()\r\n\t\t\t\t\t\t(\r\n\t\t\t\t\t\t (\r\n\t\t\t\t\t\t lowerCAmelCase_\r\n\t\t\t\t\t\t)\t\t\t\t\t,\t(\r\n\t\t\t\t\t\t lowerCAmelCase_\r\n\t\t\t\t\t\t)\t\t\t\t\t,\t\r\n\t\t\t\t\t\t) :Any \t\t\t\t\t= config_and_inputs\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= {\"\"\"pixel_values\"\"\": pixel_values}\r\n\t\t\t\t\t\treturn config, inputs_dict\r\n\r\n\r\n\r\n\r\n@require_flax\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\t(FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tNone:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= FlaxViTModelTester(self\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= ConfigTester(self\t, config_class=__A\t, has_text_modality=__A\t, hidden_size=37\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tself.config_tester.run_common_tests()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\tself.model_tester.create_and_check_model(*__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\tself.model_tester.create_and_check_for_image_classification(*__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.model_tester.prepare_config_and_inputs_for_common()\r\n\r\n\t\t\t\t\t\tfor model_class in self.all_model_classes:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= model_class(__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= inspect.signature(model.__call__\t)\r\n\t\t\t\t\t\t\t\t\t# signature.parameters is an OrderedDict => so arg_names order is deterministic\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= [*signature.parameters.keys()]\r\n\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [\"\"\"pixel_values\"\"\"]\r\n\t\t\t\t\t\t\t\t\tself.assertListEqual(arg_names[:1]\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.model_tester.prepare_config_and_inputs_for_common()\r\n\r\n\t\t\t\t\t\tfor model_class in self.all_model_classes:\r\n\t\t\t\t\t\t\t\t\twith self.subTest(model_class.__name__\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self._prepare_for_class(__A\t, __A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= model_class(__A\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t@jax.jit\r\n\t\t\t\t\t\t\t\t\t\t\t\tdef model_jitted(__A\t, **__A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn model(pixel_values=__A\t, **__A\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\twith self.subTest(\"\"\"JIT Enabled\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= model_jitted(**__A\t).to_tuple()\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\twith self.subTest(\"\"\"JIT Disabled\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twith jax.disable_jit():\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= model_jitted(**__A\t).to_tuple()\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(len(__A\t)\t, len(__A\t)\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tfor jitted_output, output in zip(__A\t, __A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(jitted_output.shape\t, output.shape\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tfor model_class_name in self.all_model_classes:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= model_class_name.from_pretrained(\"\"\"google/vit-base-patch16-224\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= model(np.ones((1, 3, 224, 224)\t)\t)\r\n\t\t\t\t\t\t\t\t\tself.assertIsNotNone(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport shutil\r\nimport tempfile\r\nimport unittest\r\n\r\nfrom transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast\r\nfrom transformers.testing_utils import require_sentencepiece, require_torchaudio\r\n\r\nfrom .test_feature_extraction_clap import floats_list\r\n\r\n\r\n\r\n\r\n@require_torchaudio\r\n@require_sentencepiece\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= \"\"\"laion/clap-htsat-unfused\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tempfile.mkdtemp()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, **__A\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\treturn RobertaTokenizer.from_pretrained(self.checkpoint\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, **__A\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\treturn ClapFeatureExtractor.from_pretrained(self.checkpoint\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tshutil.rmtree(self.tmpdirname\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_tokenizer()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_feature_extractor()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tprocessor.save_pretrained(self.tmpdirname\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= ClapProcessor.from_pretrained(self.tmpdirname\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor.tokenizer.get_vocab()\t, tokenizer.get_vocab()\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor.tokenizer\t, __A\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor.feature_extractor.to_json_string()\t, feature_extractor.to_json_string()\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor.feature_extractor\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= ClapProcessor(tokenizer=self.get_tokenizer()\t, feature_extractor=self.get_feature_extractor()\t)\r\n\t\t\t\t\t\tprocessor.save_pretrained(self.tmpdirname\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.get_tokenizer(bos_token=\"\"\"(BOS)\"\"\"\t, eos_token=\"\"\"(EOS)\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.get_feature_extractor(do_normalize=__A\t, padding_value=1.0\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= ClapProcessor.from_pretrained(\r\n\t\t\t\t\t\t self.tmpdirname\t, bos_token=\"\"\"(BOS)\"\"\"\t, eos_token=\"\"\"(EOS)\"\"\"\t, do_normalize=__A\t, padding_value=1.0\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor.tokenizer.get_vocab()\t, tokenizer_add_kwargs.get_vocab()\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor.tokenizer\t, __A\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor.feature_extractor.to_json_string()\t, feature_extractor_add_kwargs.to_json_string()\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor.feature_extractor\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.get_feature_extractor()\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= floats_list((3, 1000)\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= feature_extractor(__A\t, return_tensors=\"\"\"np\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= processor(audios=__A\t, return_tensors=\"\"\"np\"\"\"\t)\r\n\r\n\t\t\t\t\t\tfor key in input_feat_extract.keys():\r\n\t\t\t\t\t\t\t\t\tself.assertAlmostEqual(input_feat_extract[key].sum()\t, input_processor[key].sum()\t, delta=1E-2\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.get_feature_extractor()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"This is a test string\"\"\"\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= processor(text=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer(__A\t)\r\n\r\n\t\t\t\t\t\tfor key in encoded_tok.keys():\r\n\t\t\t\t\t\t\t\t\tself.assertListEqual(encoded_tok[key]\t, encoded_processor[key]\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.get_feature_extractor()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= processor.batch_decode(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tokenizer.batch_decode(__A\t)\r\n\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_feature_extractor()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tself.assertListEqual(\r\n\t\t\t\t\t\t processor.model_input_names[2:]\t, feature_extractor.model_input_names\t, msg=\"\"\"`processor` and `feature_extractor` model input names do not match\"\"\"\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":628,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport sys\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= (\r\n '73167176531330624919225119674426574742355349194934'\r\n '96983520312774506326239578318016984801869478851843'\r\n '85861560789112949495459501737958331952853208805511'\r\n '12540698747158523863050715693290963295227443043557'\r\n '66896648950445244523161731856403098711121722383113'\r\n '62229893423380308135336276614282806444486645238749'\r\n '30358907296290491560440772390713810515859307960866'\r\n '70172427121883998797908792274921901699720888093776'\r\n '65727333001053367881220235421809751254540594752243'\r\n '52584907711670556013604839586446706324415722155397'\r\n '53697817977846174064955149290862569321978468622482'\r\n '83972241375657056057490261407972968652414535100474'\r\n '82166370484403199890008895243450658541227588666881'\r\n '16427171479924442928230863465674813919123162824586'\r\n '17866458359124566529476545682848912883142607690042'\r\n '24219022671055626321111109370544217506941658960408'\r\n '07198403850962455444362981230987879927244284909188'\r\n '84580156166097919133875499200524063689912560717606'\r\n '05886116467109405077541002256983155200055935729725'\r\n '71636269561882670428252483600823257530420752963450'\r\n)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 1\r\n\t\t\tfor digit in s:\r\n\t\t\t\t\t\tproduct *= int(lowercase__\t\t)\r\n\t\t\treturn product\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str = N\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= -sys.maxsize - 1\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= n[:1_3]\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= 1_3\r\n\t\t\twhile cur_index < len(lowercase__\t\t) - 1_3:\r\n\t\t\t\t\t\tif int(n[cur_index]\t\t) >= int(substr[0]\t\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= substr[1:] + n[cur_index]\r\n\t\t\t\t\t\t\t\t\tcur_index += 1\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= max(lowercase__\t\t\t\t,\t\t\t\t\tstr_eval(lowercase__\t\t)\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= n[cur_index : cur_index + 1_3]\r\n\t\t\t\t\t\t\t\t\tcur_index += 1_3\r\n\t\t\treturn largest_product\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tprint(F\"\"\"{solution() = }\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport os\r\nfrom math import logaa\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str = \"base_exp.txt\"\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :float \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 0\r\n\t\t\tfor i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__\t\t)\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\t\t)\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= list(map(lowercase__\t\t\t\t,\t\t\t\t\tline.split(\"\"\",\"\"\"\t\t)\t\t)\t\t)\r\n\t\t\t\t\t\tif x * logaa(lowercase__\t\t) > largest:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= x * logaa(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= i + 1\r\n\t\t\treturn result\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tprint(solution())\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":629,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport math_equivalence # From: git+https://github.com/hendrycks/math.git\r\n\r\nimport datasets\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= '\\\\n@article{hendrycksmath2021,\\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\\n author={Dan Hendrycks\\n and Collin Burns\\n and Saurav Kadavath\\n and Akul Arora\\n and Steven Basart\\n and Eric Tang\\n and Dawn Song\\n and Jacob Steinhardt},\\n journal={arXiv preprint arXiv:2103.03874},\\n year={2021}\\n}\\n'\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= '\\\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\\\frac{1}{2}\") and then computes accuracy.\\n'\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= R'\\nCalculates accuracy after canonicalizing inputs.\\n\\nArgs:\\n predictions: list of predictions to score. Each prediction\\n is a string that contains natural language and LaTex.\\n references: list of reference for each prediction. Each\\n reference is a string that contains natural language\\n and LaTex.\\nReturns:\\n accuracy: accuracy after canonicalizing inputs\\n (e.g., converting \"1/2\" to \"\\\\frac{1}{2}\")\\n\\nExamples:\\n >>> metric = datasets.load_metric(\"competition_math\")\\n >>> results = metric.compute(references=[\"\\\\frac{1}{2}\"], predictions=[\"1/2\"])\\n >>> print(results)\\n {\\'accuracy\\': 1.0}\\n'\r\n\r\n\r\n\r\n\r\n@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION ,\t\t\t_KWARGS_DESCRIPTION\t\t\t\t\t\t\t)\r\nclass _SCREAMING_SNAKE_CASE ( datasets.Metric\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\treturn datasets.MetricInfo(\r\n\t\t\t\t\t\t description=_DESCRIPTION\t, citation=_CITATION\t, inputs_description=_KWARGS_DESCRIPTION\t, features=datasets.Features(\r\n\t\t\t\t\t\t {\r\n\t\t\t\t\t\t \"\"\"predictions\"\"\": datasets.Value(\"\"\"string\"\"\"\t),\r\n\t\t\t\t\t\t \"\"\"references\"\"\": datasets.Value(\"\"\"string\"\"\"\t),\r\n\t\t\t\t\t\t }\t)\t, homepage=\"\"\"https://github.com/hendrycks/math\"\"\"\t, codebase_urls=[\"\"\"https://github.com/hendrycks/math\"\"\"]\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= 0.0\r\n\t\t\t\t\t\tfor i, j in zip(__A\t, __A\t):\r\n\t\t\t\t\t\t\t\t\tn_correct += 1.0 if math_equivalence.is_equiv(__A\t, __A\t) else 0.0\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= n_correct / len(__A\t)\r\n\t\t\t\t\t\treturn {\r\n\t\t\t\t\t\t \"accuracy\": accuracy,\r\n\t\t\t\t\t\t}\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport itertools\r\nimport math\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int\t\t) -> bool:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif 1 < number < 4:\r\n\t\t\t\t\t\t# 2 and 3 are primes\r\n\t\t\t\t\t\treturn True\r\n\t\t\telif number < 2 or number % 2 == 0 or number % 3 == 0:\r\n\t\t\t\t\t\t# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes\r\n\t\t\t\t\t\treturn False\r\n\r\n\t\t\t# All primes number are in format of 6k +/- 1\r\n\t\t\tfor i in range(5\t\t\t\t,\t\t\t\t\tint(math.sqrt(lowercase__\t\t) + 1\t\t)\t\t\t\t,\t\t\t\t\t6\t\t):\r\n\t\t\t\t\t\tif number % i == 0 or number % (i + 2) == 0:\r\n\t\t\t\t\t\t\t\t\treturn False\r\n\t\t\treturn True\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> Dict:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 2\r\n\t\t\twhile True:\r\n\t\t\t\t\t\tif is_prime(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\tyield num\r\n\t\t\t\t\t\tnum += 1\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int = 1_0_0_0_1\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\treturn next(itertools.islice(prime_generator()\t\t\t\t,\t\t\t\t\tnth - 1\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tprint(F\"\"\"{solution() = }\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":630,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport unittest\r\n\r\nimport numpy as np\r\n\r\nfrom transformers import DistilBertConfig, is_flax_available\r\nfrom transformers.testing_utils import require_flax, slow\r\n\r\nfrom ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask\r\n\r\n\r\nif is_flax_available():\r\n\t\t\t\timport jax.numpy as jnp\r\n\r\n\t\t\t\tfrom transformers.models.distilbert.modeling_flax_distilbert import (\r\n\t\t\t\t FlaxDistilBertForMaskedLM,\r\n\t\t\t\t FlaxDistilBertForMultipleChoice,\r\n\t\t\t\t FlaxDistilBertForQuestionAnswering,\r\n\t\t\t\t FlaxDistilBertForSequenceClassification,\r\n\t\t\t\t FlaxDistilBertForTokenClassification,\r\n\t\t\t\t FlaxDistilBertModel,\r\n\t\t\t\t)\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A=13\t, __A=7\t, __A=True\t, __A=True\t, __A=True\t, __A=True\t, __A=99\t, __A=32\t, __A=5\t, __A=4\t, __A=37\t, __A=\"gelu\"\t, __A=0.1\t, __A=0.1\t, __A=512\t, __A=16\t, __A=2\t, __A=0.0_2\t, __A=4\t, ) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= parent\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= batch_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= seq_length\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= is_training\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= use_attention_mask\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= use_token_type_ids\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= use_labels\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= vocab_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= hidden_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= num_hidden_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= num_attention_heads\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= intermediate_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= hidden_act\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= hidden_dropout_prob\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= attention_probs_dropout_prob\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= max_position_embeddings\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= type_vocab_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= type_sequence_label_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= initializer_range\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= num_choices\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= ids_tensor([self.batch_size, self.seq_length]\t, self.vocab_size\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= None\r\n\t\t\t\t\t\tif self.use_attention_mask:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= random_attention_mask([self.batch_size, self.seq_length]\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= DistilBertConfig(\r\n\t\t\t\t\t\t vocab_size=self.vocab_size\t, dim=self.hidden_size\t, n_layers=self.num_hidden_layers\t, n_heads=self.num_attention_heads\t, hidden_dim=self.intermediate_size\t, hidden_act=self.hidden_act\t, dropout=self.hidden_dropout_prob\t, attention_dropout=self.attention_probs_dropout_prob\t, max_position_embeddings=self.max_position_embeddings\t, initializer_range=self.initializer_range\t, tie_weights_=__A\t, )\r\n\r\n\t\t\t\t\t\treturn config, input_ids, attention_mask\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.prepare_config_and_inputs()\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[str] \t\t\t\t\t= config_and_inputs\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= {\"\"\"input_ids\"\"\": input_ids, \"\"\"attention_mask\"\"\": attention_mask}\r\n\t\t\t\t\t\treturn config, inputs_dict\r\n\r\n\r\n\r\n\r\n@require_flax\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\t(\r\n\t\t\t (\r\n\t\t\t FlaxDistilBertModel,\r\n\t\t\t FlaxDistilBertForMaskedLM,\r\n\t\t\t FlaxDistilBertForMultipleChoice,\r\n\t\t\t FlaxDistilBertForQuestionAnswering,\r\n\t\t\t FlaxDistilBertForSequenceClassification,\r\n\t\t\t FlaxDistilBertForTokenClassification,\r\n\t\t\t FlaxDistilBertForQuestionAnswering,\r\n\t\t\t )\r\n\t\t\t if is_flax_available()\r\n\t\t\t else ()\r\n\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= FlaxDistilBertModelTester(self\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tfor model_class_name in self.all_model_classes:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= model_class_name.from_pretrained(\"\"\"distilbert-base-uncased\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= model(np.ones((1, 1)\t)\t)\r\n\t\t\t\t\t\t\t\t\tself.assertIsNotNone(__A\t)\r\n\r\n\r\n\r\n\r\n@require_flax\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= FlaxDistilBertModel.from_pretrained(\"\"\"distilbert-base-uncased\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= model(__A\t, attention_mask=__A\t)[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= (1, 11, 768)\r\n\t\t\t\t\t\tself.assertEqual(output.shape\t, __A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]]\t)\r\n\r\n\t\t\t\t\t\tself.assertTrue(jnp.allclose(output[:, 1:4, 1:4]\t, __A\t, atol=1E-4\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int = 5_0\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [1] * (length + 1)\r\n\r\n\t\t\tfor row_length in range(3\t\t\t\t,\t\t\t\t\tlength + 1\t\t):\r\n\t\t\t\t\t\tfor block_length in range(3\t\t\t\t,\t\t\t\t\trow_length + 1\t\t):\r\n\t\t\t\t\t\t\t\t\tfor block_start in range(row_length - block_length\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tways_number[row_length] += ways_number[\r\n\t\t\t\t\t\t\t\t\t\t\t\t row_length - block_start - block_length - 1\r\n\t\t\t\t\t\t\t\t\t\t\t\t]\r\n\r\n\t\t\t\t\t\t\t\t\tways_number[row_length] += 1\r\n\r\n\t\t\treturn ways_number[length]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tprint(F\"\"\"{solution() = }\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":631,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'A': ['B', 'C', 'E'],\r\n 'B': ['A', 'D', 'E'],\r\n 'C': ['A', 'F', 'G'],\r\n 'D': ['B'],\r\n 'E': ['A', 'B', 'D'],\r\n 'F': ['C'],\r\n 'G': ['C'],\r\n}\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: dict\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Union[str, Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t) -> list[str]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= set()\r\n\t\t\t# keep track of all the paths to be checked\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= [[start]]\r\n\r\n\t\t\t# return path if start is goal\r\n\t\t\tif start == goal:\r\n\t\t\t\t\t\treturn [start]\r\n\r\n\t\t\t# keeps looping until all possible paths have been checked\r\n\t\t\twhile queue:\r\n\t\t\t\t\t\t# pop the first path from the queue\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= queue.pop(0\t\t)\r\n\t\t\t\t\t\t# get the last node from the path\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= path[-1]\r\n\t\t\t\t\t\tif node not in explored:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= graph[node]\r\n\t\t\t\t\t\t\t\t\t# go through all neighbour nodes, construct a new path and\r\n\t\t\t\t\t\t\t\t\t# push it into the queue\r\n\t\t\t\t\t\t\t\t\tfor neighbour in neighbours:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= list(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tnew_path.append(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tqueue.append(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t# return path if neighbour is goal\r\n\t\t\t\t\t\t\t\t\t\t\t\tif neighbour == goal:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn new_path\r\n\r\n # mark node as explored\r\n\t\t\t\t\t\t\t\t\texplored.add(lowercase__\t\t)\r\n\r\n # in case there's no path between the 2 nodes\r\n\t\t\treturn []\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: dict\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Union[str, Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif not graph or start not in graph or target not in graph:\r\n\t\t\t\t\t\treturn -1\r\n\t\t\tif start == target:\r\n\t\t\t\t\t\treturn 0\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= [start]\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= set(lowercase__\t\t)\r\n\t\t\t# Keep tab on distances from `start` node.\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {start: 0, target: -1}\r\n\t\t\twhile queue:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= queue.pop(0\t\t)\r\n\t\t\t\t\t\tif node == target:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= (\r\n\t\t\t\t\t\t\t\t\t dist[node] if dist[target] == -1 else min(dist[target]\t\t\t\t,\t\t\t\t\tdist[node]\t\t)\r\n\t\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\tfor adjacent in graph[node]:\r\n\t\t\t\t\t\t\t\t\tif adjacent not in visited:\r\n\t\t\t\t\t\t\t\t\t\t\t\tvisited.add(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tqueue.append(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= dist[node] + 1\r\n\t\t\treturn dist[target]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tprint(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']\r\n\t\t\t\tprint(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/\r\n\r\nimport gc\r\nimport random\r\nimport tempfile\r\nimport unittest\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom PIL import Image\r\nfrom transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer\r\n\r\nfrom diffusers import (\r\n AutoencoderKL,\r\n ControlNetModel,\r\n DDIMScheduler,\r\n StableDiffusionControlNetImgaImgPipeline,\r\n UNetaDConditionModel,\r\n)\r\nfrom diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel\r\nfrom diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device\r\nfrom diffusers.utils.import_utils import is_xformers_available\r\nfrom diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu\r\n\r\nfrom ..pipeline_params import (\r\n IMAGE_TO_IMAGE_IMAGE_PARAMS,\r\n TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,\r\n TEXT_GUIDED_IMAGE_VARIATION_PARAMS,\r\n)\r\nfrom ..test_pipelines_common import (\r\n PipelineKarrasSchedulerTesterMixin,\r\n PipelineLatentTesterMixin,\r\n PipelineTesterMixin,\r\n)\r\n\r\n\r\nenable_full_determinism()\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tA__ ,\t\t\tA__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tStableDiffusionControlNetImgaImgPipeline\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_PARAMS - {\"height\", \"width\"}\r\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\tIMAGE_TO_IMAGE_IMAGE_PARAMS.union({\"control_image\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[Any] \t\t\t=\t\t\t\t\t\tIMAGE_TO_IMAGE_IMAGE_PARAMS\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= UNetaDConditionModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, sample_size=32\t, in_channels=4\t, out_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, up_block_types=(\"\"\"CrossAttnUpBlock2D\"\"\", \"\"\"UpBlock2D\"\"\")\t, cross_attention_dim=32\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= ControlNetModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, in_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, cross_attention_dim=32\t, conditioning_embedding_out_channels=(16, 32)\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= DDIMScheduler(\r\n\t\t\t\t\t\t beta_start=0.0_0_0_8_5\t, beta_end=0.0_1_2\t, beta_schedule=\"\"\"scaled_linear\"\"\"\t, clip_sample=__A\t, set_alpha_to_one=__A\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoencoderKL(\r\n\t\t\t\t\t\t block_out_channels=[32, 64]\t, in_channels=3\t, out_channels=3\t, down_block_types=[\"\"\"DownEncoderBlock2D\"\"\", \"\"\"DownEncoderBlock2D\"\"\"]\t, up_block_types=[\"\"\"UpDecoderBlock2D\"\"\", \"\"\"UpDecoderBlock2D\"\"\"]\t, latent_channels=4\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= CLIPTextConfig(\r\n\t\t\t\t\t\t bos_token_id=0\t, eos_token_id=2\t, hidden_size=32\t, intermediate_size=37\t, layer_norm_eps=1E-05\t, num_attention_heads=4\t, num_hidden_layers=5\t, pad_token_id=1\t, vocab_size=1000\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= CLIPTextModel(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= CLIPTokenizer.from_pretrained(\"\"\"hf-internal-testing/tiny-random-clip\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"unet\"\"\": unet,\r\n\t\t\t\t\t\t \"\"\"controlnet\"\"\": controlnet,\r\n\t\t\t\t\t\t \"\"\"scheduler\"\"\": scheduler,\r\n\t\t\t\t\t\t \"\"\"vae\"\"\": vae,\r\n\t\t\t\t\t\t \"\"\"text_encoder\"\"\": text_encoder,\r\n\t\t\t\t\t\t \"\"\"tokenizer\"\"\": tokenizer,\r\n\t\t\t\t\t\t \"\"\"safety_checker\"\"\": None,\r\n\t\t\t\t\t\t \"\"\"feature_extractor\"\"\": None,\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\treturn components\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=0\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tif str(__A\t).startswith(\"\"\"mps\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.manual_seed(__A\t)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= torch.Generator(device=__A\t).manual_seed(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 2\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= randn_tensor(\r\n\t\t\t\t\t\t (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor)\t, generator=__A\t, device=torch.device(__A\t)\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= floats_tensor(control_image.shape\t, rng=random.Random(__A\t)\t).to(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= image.cpu().permute(0\t, 2\t, 3\t, 1\t)[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= Image.fromarray(np.uinta(__A\t)\t).convert(\"\"\"RGB\"\"\"\t).resize((64, 64)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"prompt\"\"\": \"\"\"A painting of a squirrel eating a burger\"\"\",\r\n\t\t\t\t\t\t \"\"\"generator\"\"\": generator,\r\n\t\t\t\t\t\t \"\"\"num_inference_steps\"\"\": 2,\r\n\t\t\t\t\t\t \"\"\"guidance_scale\"\"\": 6.0,\r\n\t\t\t\t\t\t \"\"\"output_type\"\"\": \"\"\"numpy\"\"\",\r\n\t\t\t\t\t\t \"\"\"image\"\"\": image,\r\n\t\t\t\t\t\t \"\"\"control_image\"\"\": control_image,\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\treturn inputs\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn self._test_attention_slicing_forward_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@unittest.skipIf(\r\n\t\t\t torch_device != \"\"\"cuda\"\"\" or not is_xformers_available()\t, reason=\"\"\"XFormers attention is only available with CUDA and `xformers` installed\"\"\"\t, )\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tself._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tself._test_inference_batch_single_identical(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tA__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\tStableDiffusionControlNetImgaImgPipeline\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_PARAMS - {\"height\", \"width\"}\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tfrozenset([]\t\t\t\t\t\t\t) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= UNetaDConditionModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, sample_size=32\t, in_channels=4\t, out_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, up_block_types=(\"\"\"CrossAttnUpBlock2D\"\"\", \"\"\"UpBlock2D\"\"\")\t, cross_attention_dim=32\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\r\n\t\t\t\t\t\tdef init_weights(__A\t):\r\n\t\t\t\t\t\t\t\t\tif isinstance(__A\t, torch.nn.Convad\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\ttorch.nn.init.normal(m.weight\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tm.bias.data.fill_(1.0\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= ControlNetModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, in_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, cross_attention_dim=32\t, conditioning_embedding_out_channels=(16, 32)\t, )\r\n\t\t\t\t\t\tcontrolneta.controlnet_down_blocks.apply(__A\t)\r\n\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= ControlNetModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, in_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, cross_attention_dim=32\t, conditioning_embedding_out_channels=(16, 32)\t, )\r\n\t\t\t\t\t\tcontrolneta.controlnet_down_blocks.apply(__A\t)\r\n\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= DDIMScheduler(\r\n\t\t\t\t\t\t beta_start=0.0_0_0_8_5\t, beta_end=0.0_1_2\t, beta_schedule=\"\"\"scaled_linear\"\"\"\t, clip_sample=__A\t, set_alpha_to_one=__A\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= AutoencoderKL(\r\n\t\t\t\t\t\t block_out_channels=[32, 64]\t, in_channels=3\t, out_channels=3\t, down_block_types=[\"\"\"DownEncoderBlock2D\"\"\", \"\"\"DownEncoderBlock2D\"\"\"]\t, up_block_types=[\"\"\"UpDecoderBlock2D\"\"\", \"\"\"UpDecoderBlock2D\"\"\"]\t, latent_channels=4\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= CLIPTextConfig(\r\n\t\t\t\t\t\t bos_token_id=0\t, eos_token_id=2\t, hidden_size=32\t, intermediate_size=37\t, layer_norm_eps=1E-05\t, num_attention_heads=4\t, num_hidden_layers=5\t, pad_token_id=1\t, vocab_size=1000\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= CLIPTextModel(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= CLIPTokenizer.from_pretrained(\"\"\"hf-internal-testing/tiny-random-clip\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= MultiControlNetModel([controlneta, controlneta]\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"unet\"\"\": unet,\r\n\t\t\t\t\t\t \"\"\"controlnet\"\"\": controlnet,\r\n\t\t\t\t\t\t \"\"\"scheduler\"\"\": scheduler,\r\n\t\t\t\t\t\t \"\"\"vae\"\"\": vae,\r\n\t\t\t\t\t\t \"\"\"text_encoder\"\"\": text_encoder,\r\n\t\t\t\t\t\t \"\"\"tokenizer\"\"\": tokenizer,\r\n\t\t\t\t\t\t \"\"\"safety_checker\"\"\": None,\r\n\t\t\t\t\t\t \"\"\"feature_extractor\"\"\": None,\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\treturn components\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=0\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tif str(__A\t).startswith(\"\"\"mps\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= torch.manual_seed(__A\t)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.Generator(device=__A\t).manual_seed(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= 2\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= [\r\n\t\t\t\t\t\t randn_tensor(\r\n\t\t\t\t\t\t (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor)\t, generator=__A\t, device=torch.device(__A\t)\t, ),\r\n\t\t\t\t\t\t randn_tensor(\r\n\t\t\t\t\t\t (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor)\t, generator=__A\t, device=torch.device(__A\t)\t, ),\r\n\t\t\t\t\t\t]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= floats_tensor(control_image[0].shape\t, rng=random.Random(__A\t)\t).to(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= image.cpu().permute(0\t, 2\t, 3\t, 1\t)[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= Image.fromarray(np.uinta(__A\t)\t).convert(\"\"\"RGB\"\"\"\t).resize((64, 64)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"prompt\"\"\": \"\"\"A painting of a squirrel eating a burger\"\"\",\r\n\t\t\t\t\t\t \"\"\"generator\"\"\": generator,\r\n\t\t\t\t\t\t \"\"\"num_inference_steps\"\"\": 2,\r\n\t\t\t\t\t\t \"\"\"guidance_scale\"\"\": 6.0,\r\n\t\t\t\t\t\t \"\"\"output_type\"\"\": \"\"\"numpy\"\"\",\r\n\t\t\t\t\t\t \"\"\"image\"\"\": image,\r\n\t\t\t\t\t\t \"\"\"control_image\"\"\": control_image,\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\treturn inputs\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.get_dummy_components()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.pipeline_class(**__A\t)\r\n\t\t\t\t\t\tpipe.to(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 1_0.0\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 4\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= pipe(**__A\t)[0]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= pipe(**__A\t, control_guidance_start=0.1\t, control_guidance_end=0.2\t)[0]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= pipe(**__A\t, control_guidance_start=[0.1, 0.3]\t, control_guidance_end=[0.2, 0.7]\t)[0]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= pipe(**__A\t, control_guidance_start=0.4\t, control_guidance_end=[0.5, 0.8]\t)[0]\r\n\r\n\t\t\t\t\t\t# make sure that all outputs are different\r\n\t\t\t\t\t\tassert np.sum(np.abs(output_a - output_a\t)\t) > 1E-3\r\n\t\t\t\t\t\tassert np.sum(np.abs(output_a - output_a\t)\t) > 1E-3\r\n\t\t\t\t\t\tassert np.sum(np.abs(output_a - output_a\t)\t) > 1E-3\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\treturn self._test_attention_slicing_forward_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@unittest.skipIf(\r\n\t\t\t torch_device != \"\"\"cuda\"\"\" or not is_xformers_available()\t, reason=\"\"\"XFormers attention is only available with CUDA and `xformers` installed\"\"\"\t, )\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tself._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tself._test_inference_batch_single_identical(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.get_dummy_components()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.pipeline_class(**__A\t)\r\n\t\t\t\t\t\tpipe.to(__A\t)\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__A\t)\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdir:\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\t# save_pretrained is not implemented for Multi-ControlNet\r\n\t\t\t\t\t\t\t\t\t\t\t\tpipe.save_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\texcept NotImplementedError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n@slow\r\n@require_torch_gpu\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tsuper().tearDown()\r\n\t\t\t\t\t\tgc.collect()\r\n\t\t\t\t\t\ttorch.cuda.empty_cache()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= ControlNetModel.from_pretrained(\"\"\"lllyasviel/sd-controlnet-canny\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= StableDiffusionControlNetImgaImgPipeline.from_pretrained(\r\n\t\t\t\t\t\t \"\"\"runwayml/stable-diffusion-v1-5\"\"\"\t, safety_checker=__A\t, controlnet=__A\t)\r\n\t\t\t\t\t\tpipe.enable_model_cpu_offload()\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= torch.Generator(device=\"\"\"cpu\"\"\"\t).manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"evil space-punk bird\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= load_image(\r\n\t\t\t\t\t\t \"\"\"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png\"\"\"\t).resize((512, 512)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= load_image(\r\n\t\t\t\t\t\t \"\"\"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png\"\"\"\t).resize((512, 512)\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= pipe(\r\n\t\t\t\t\t\t __A\t, __A\t, control_image=__A\t, generator=__A\t, output_type=\"\"\"np\"\"\"\t, num_inference_steps=50\t, strength=0.6\t, )\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= output.images[0]\r\n\r\n\t\t\t\t\t\tassert image.shape == (512, 512, 3)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= load_numpy(\r\n\t\t\t\t\t\t \"\"\"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy\"\"\"\t)\r\n\r\n\t\t\t\t\t\tassert np.abs(expected_image - image\t).max() < 9E-2\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":632,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport logging\r\nimport os\r\nimport sys\r\nfrom dataclasses import dataclass, field\r\nfrom typing import Optional\r\n\r\nfrom seqaseq_trainer import SeqaSeqTrainer\r\nfrom seqaseq_training_args import SeqaSeqTrainingArguments\r\n\r\nimport transformers\r\nfrom transformers import (\r\n AutoConfig,\r\n AutoModelForSeqaSeqLM,\r\n AutoTokenizer,\r\n HfArgumentParser,\r\n MBartTokenizer,\r\n MBartTokenizerFast,\r\n set_seed,\r\n)\r\nfrom transformers.trainer_utils import EvaluationStrategy, is_main_process\r\nfrom transformers.training_args import ParallelMode\r\nfrom utils import (\r\n SeqaSeqDataCollator,\r\n SeqaSeqDataset,\r\n assert_all_frozen,\r\n build_compute_metrics_fn,\r\n check_output_dir,\r\n freeze_embeds,\r\n freeze_params,\r\n lmap,\r\n save_json,\r\n use_task_specific_params,\r\n write_txt_file,\r\n)\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.getLogger(__name__)\r\n\r\n\r\n\r\n\r\n@dataclass\r\nclass _SCREAMING_SNAKE_CASE :\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t metadata={\"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[str] \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[str] \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[str] \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\"help\": \"Where do you want to store the pretrained models downloaded from huggingface.co\"} ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :bool \t\t\t=\t\t\t\t\t\tfield(default=A__ ,\t\t\tmetadata={\"help\": \"Whether tp freeze the encoder.\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :bool \t\t\t=\t\t\t\t\t\tfield(default=A__ ,\t\t\tmetadata={\"help\": \"Whether to freeze the embeddings.\"}\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n@dataclass\r\nclass _SCREAMING_SNAKE_CASE :\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t metadata={\"help\": \"The input data dir. Should contain the .tsv files (or other data files) for the task.\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[str] \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=\"summarization\" ,\t\t\tmetadata={\"help\": \"Task name, summarization (or summarization_{dataset} for pegasus) or translation\"} ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=1024 ,\t\t\tmetadata={\r\n\t\t\t \"help\": (\r\n\t\t\t \"The maximum total input sequence length after tokenization. Sequences longer \"\r\n\t\t\t \"than this will be truncated, sequences shorter will be padded.\"\r\n\t\t\t )\r\n\t\t\t } ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=128 ,\t\t\tmetadata={\r\n\t\t\t \"help\": (\r\n\t\t\t \"The maximum total sequence length for target text after tokenization. Sequences longer \"\r\n\t\t\t \"than this will be truncated, sequences shorter will be padded.\"\r\n\t\t\t )\r\n\t\t\t } ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=142 ,\t\t\tmetadata={\r\n\t\t\t \"help\": (\r\n\t\t\t \"The maximum total sequence length for validation target text after tokenization. Sequences longer \"\r\n\t\t\t \"than this will be truncated, sequences shorter will be padded. \"\r\n\t\t\t \"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used \"\r\n\t\t\t \"during ``evaluate`` and ``predict``.\"\r\n\t\t\t )\r\n\t\t\t } ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=142 ,\t\t\tmetadata={\r\n\t\t\t \"help\": (\r\n\t\t\t \"The maximum total sequence length for test target text after tokenization. Sequences longer \"\r\n\t\t\t \"than this will be truncated, sequences shorter will be padded.\"\r\n\t\t\t )\r\n\t\t\t } ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tfield(default=-1 ,\t\t\tmetadata={\"help\": \"# training examples. -1 means use all.\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tfield(default=-1 ,\t\t\tmetadata={\"help\": \"# validation examples. -1 means use all.\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tfield(default=-1 ,\t\t\tmetadata={\"help\": \"# test examples. -1 means use all.\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[str] \t\t\t=\t\t\t\t\t\tfield(default=A__ ,\t\t\tmetadata={\"help\": \"Source language id for translation.\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[str] \t\t\t=\t\t\t\t\t\tfield(default=A__ ,\t\t\tmetadata={\"help\": \"Target language id for translation.\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tfield(default=A__ ,\t\t\tmetadata={\"help\": \"# num_beams to use for evaluation.\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :bool \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\"help\": \"If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.\"} ,\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Optional[Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Optional[int]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Optional[Any]\t\t) -> Optional[int]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlogger.info(f\"\"\"***** {split} metrics *****\"\"\"\t\t)\r\n\t\t\tfor key in sorted(metrics.keys()\t\t):\r\n\t\t\t\t\t\tlogger.info(f\"\"\" {key} = {metrics[key]}\"\"\"\t\t)\r\n\t\t\tsave_json(lowercase__\t\t\t\t,\t\t\t\t\tos.path.join(lowercase__\t\t\t\t,\t\t\t\t\tf\"\"\"{split}_results.json\"\"\"\t\t)\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> Tuple:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments)\t\t)\r\n\r\n\t\t\tif len(sys.argv\t\t) == 2 and sys.argv[1].endswith(\"\"\".json\"\"\"\t\t):\r\n\t\t\t\t\t\t# If we pass only one argument to the script and it's the path to a json file,\r\n\t\t\t\t\t\t# let's parse it to get our arguments.\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]\t\t)\t\t)\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Any \t\t\t\t\t= parser.parse_args_into_dataclasses()\r\n\r\n\t\t\tcheck_output_dir(lowercase__\t\t)\r\n\r\n\t\t\t# Setup logging\r\n\t\t\tlogging.basicConfig(\r\n\t\t\t format=\"\"\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\"\"\"\t\t\t\t,\t\t\t\t\tdatefmt=\"\"\"%m/%d/%Y %H:%M:%S\"\"\"\t\t\t\t,\t\t\t\t\tlevel=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tlogger.warning(\r\n\t\t\t \"\"\"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\"\"\"\t\t\t\t,\t\t\t\t\ttraining_args.local_rank\t\t\t\t,\t\t\t\t\ttraining_args.device\t\t\t\t,\t\t\t\t\ttraining_args.n_gpu\t\t\t\t,\t\t\t\t\tbool(training_args.parallel_mode == ParallelMode.DISTRIBUTED\t\t)\t\t\t\t,\t\t\t\t\ttraining_args.fpaa\t\t\t\t,\t\t\t\t\t)\r\n\t\t\ttransformers.utils.logging.enable_default_handler()\r\n\t\t\ttransformers.utils.logging.enable_explicit_format()\r\n\t\t\t# Set the verbosity to info of the Transformers logger (on main process only):\r\n\t\t\tif is_main_process(training_args.local_rank\t\t):\r\n\t\t\t\t\t\ttransformers.utils.logging.set_verbosity_info()\r\n\t\t\tlogger.info(\"\"\"Training/evaluation parameters %s\"\"\"\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Set seed\r\n\t\t\tset_seed(training_args.seed\t\t)\r\n\r\n\t\t\t# Load pretrained model and tokenizer\r\n\t\t\t#\r\n\t\t\t# Distributed training:\r\n\t\t\t# The .from_pretrained methods guarantee that only one local process can concurrently\r\n\t\t\t# download model & vocab.\r\n\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= AutoConfig.from_pretrained(\r\n\t\t\t model_args.config_name if model_args.config_name else model_args.model_name_or_path\t\t\t\t,\t\t\t\t\tcache_dir=model_args.cache_dir\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= (\"\"\"encoder_layerdrop\"\"\", \"\"\"decoder_layerdrop\"\"\", \"\"\"dropout\"\"\", \"\"\"attention_dropout\"\"\")\r\n\t\t\tfor p in extra_model_params:\r\n\t\t\t\t\t\tif getattr(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\tassert hasattr(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t), f\"\"\"({config.__class__.__name__}) doesn't have a `{p}` attribute\"\"\"\r\n\t\t\t\t\t\t\t\t\tsetattr(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tgetattr(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= AutoTokenizer.from_pretrained(\r\n\t\t\t model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path\t\t\t\t,\t\t\t\t\tcache_dir=model_args.cache_dir\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= AutoModelForSeqaSeqLM.from_pretrained(\r\n\t\t\t model_args.model_name_or_path\t\t\t\t,\t\t\t\t\tfrom_tf=\"\"\".ckpt\"\"\" in model_args.model_name_or_path\t\t\t\t,\t\t\t\t\tconfig=lowercase__\t\t\t\t,\t\t\t\t\tcache_dir=model_args.cache_dir\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t# use task specific params\r\n\t\t\tuse_task_specific_params(lowercase__\t\t\t\t,\t\t\t\t\tdata_args.task\t\t)\r\n\r\n\t\t\t# set num_beams for evaluation\r\n\t\t\tif data_args.eval_beams is None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= model.config.num_beams\r\n\r\n\t\t\t# set decoder_start_token_id for MBart\r\n\t\t\tif model.config.decoder_start_token_id is None and isinstance(lowercase__\t\t\t\t,\t\t\t\t\t(MBartTokenizer, MBartTokenizerFast)\t\t):\r\n\t\t\t\t\t\tassert (\r\n\t\t\t\t\t\t data_args.tgt_lang is not None and data_args.src_lang is not None\r\n\t\t\t\t\t\t), \"mBart requires --tgt_lang and --src_lang\"\r\n\t\t\t\t\t\tif isinstance(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer.lang_code_to_id[data_args.tgt_lang]\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer.convert_tokens_to_ids(data_args.tgt_lang\t\t)\r\n\r\n\t\t\tif model_args.freeze_embeds:\r\n\t\t\t\t\t\tfreeze_embeds(lowercase__\t\t)\r\n\t\t\tif model_args.freeze_encoder:\r\n\t\t\t\t\t\tfreeze_params(model.get_encoder()\t\t)\r\n\t\t\t\t\t\tassert_all_frozen(model.get_encoder()\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= SeqaSeqDataset\r\n\r\n\t\t\t# Get datasets\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= (\r\n\t\t\t dataset_class(\r\n\t\t\t lowercase__\t\t\t\t,\t\t\t\t\ttype_path=\"\"\"train\"\"\"\t\t\t\t,\t\t\t\t\tdata_dir=data_args.data_dir\t\t\t\t,\t\t\t\t\tn_obs=data_args.n_train\t\t\t\t,\t\t\t\t\tmax_target_length=data_args.max_target_length\t\t\t\t,\t\t\t\t\tmax_source_length=data_args.max_source_length\t\t\t\t,\t\t\t\t\tprefix=model.config.prefix or \"\"\"\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\t if training_args.do_train\r\n\t\t\t else None\r\n\t\t\t)\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= (\r\n\t\t\t dataset_class(\r\n\t\t\t lowercase__\t\t\t\t,\t\t\t\t\ttype_path=\"\"\"val\"\"\"\t\t\t\t,\t\t\t\t\tdata_dir=data_args.data_dir\t\t\t\t,\t\t\t\t\tn_obs=data_args.n_val\t\t\t\t,\t\t\t\t\tmax_target_length=data_args.val_max_target_length\t\t\t\t,\t\t\t\t\tmax_source_length=data_args.max_source_length\t\t\t\t,\t\t\t\t\tprefix=model.config.prefix or \"\"\"\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\t if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO\r\n\t\t\t else None\r\n\t\t\t)\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= (\r\n\t\t\t dataset_class(\r\n\t\t\t lowercase__\t\t\t\t,\t\t\t\t\ttype_path=\"\"\"test\"\"\"\t\t\t\t,\t\t\t\t\tdata_dir=data_args.data_dir\t\t\t\t,\t\t\t\t\tn_obs=data_args.n_test\t\t\t\t,\t\t\t\t\tmax_target_length=data_args.test_max_target_length\t\t\t\t,\t\t\t\t\tmax_source_length=data_args.max_source_length\t\t\t\t,\t\t\t\t\tprefix=model.config.prefix or \"\"\"\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\t if training_args.do_predict\r\n\t\t\t else None\r\n\t\t\t)\r\n\r\n\t\t\t# Initialize our Trainer\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= (\r\n\t\t\t build_compute_metrics_fn(data_args.task\t\t\t\t,\t\t\t\t\tlowercase__\t\t) if training_args.predict_with_generate else None\r\n\t\t\t)\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= SeqaSeqTrainer(\r\n\t\t\t model=lowercase__\t\t\t\t,\t\t\t\t\targs=lowercase__\t\t\t\t,\t\t\t\t\tdata_args=lowercase__\t\t\t\t,\t\t\t\t\ttrain_dataset=lowercase__\t\t\t\t,\t\t\t\t\teval_dataset=lowercase__\t\t\t\t,\t\t\t\t\tdata_collator=SeqaSeqDataCollator(\r\n\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tmodel.config.decoder_start_token_id\t\t\t\t,\t\t\t\t\ttraining_args.tpu_num_cores\t\t)\t\t\t\t,\t\t\t\t\tcompute_metrics=lowercase__\t\t\t\t,\t\t\t\t\ttokenizer=lowercase__\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {}\r\n\t\t\t# Training\r\n\t\t\tif training_args.do_train:\r\n\t\t\t\t\t\tlogger.info(\"\"\"*** Train ***\"\"\"\t\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= trainer.train(\r\n\t\t\t\t\t\t model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path\t\t) else None\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= train_result.metrics\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= data_args.n_train\r\n\r\n\t\t\t\t\t\ttrainer.save_model() # this also saves the tokenizer\r\n\r\n\t\t\t\t\t\tif trainer.is_world_process_zero():\r\n\t\t\t\t\t\t\t\t\thandle_metrics(\"\"\"train\"\"\"\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\ttraining_args.output_dir\t\t)\r\n\t\t\t\t\t\t\t\t\tall_metrics.update(lowercase__\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Need to save the state, since Trainer.save_model saves only the tokenizer with the model\r\n\t\t\t\t\t\t\t\t\ttrainer.state.save_to_json(os.path.join(training_args.output_dir\t\t\t\t,\t\t\t\t\t\"\"\"trainer_state.json\"\"\"\t\t)\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# For convenience, we also re-save the tokenizer to the same directory,\r\n\t\t\t\t\t\t\t\t\t# so that you can share your model easily on huggingface.co/models =)\r\n\t\t\t\t\t\t\t\t\ttokenizer.save_pretrained(training_args.output_dir\t\t)\r\n\r\n # Evaluation\r\n\t\t\tif training_args.do_eval:\r\n\t\t\t\t\t\tlogger.info(\"\"\"*** Evaluate ***\"\"\"\t\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= trainer.evaluate(metric_key_prefix=\"\"\"val\"\"\"\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= data_args.n_val\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= round(metrics[\"\"\"val_loss\"\"\"]\t\t\t\t,\t\t\t\t\t4\t\t)\r\n\r\n\t\t\t\t\t\tif trainer.is_world_process_zero():\r\n\t\t\t\t\t\t\t\t\thandle_metrics(\"\"\"val\"\"\"\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\ttraining_args.output_dir\t\t)\r\n\t\t\t\t\t\t\t\t\tall_metrics.update(lowercase__\t\t)\r\n\r\n\t\t\tif training_args.do_predict:\r\n\t\t\t\t\t\tlogger.info(\"\"\"*** Predict ***\"\"\"\t\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= trainer.predict(test_dataset=lowercase__\t\t\t\t,\t\t\t\t\tmetric_key_prefix=\"\"\"test\"\"\"\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= test_output.metrics\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= data_args.n_test\r\n\r\n\t\t\t\t\t\tif trainer.is_world_process_zero():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= round(metrics[\"\"\"test_loss\"\"\"]\t\t\t\t,\t\t\t\t\t4\t\t)\r\n\t\t\t\t\t\t\t\t\thandle_metrics(\"\"\"test\"\"\"\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\ttraining_args.output_dir\t\t)\r\n\t\t\t\t\t\t\t\t\tall_metrics.update(lowercase__\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\tif training_args.predict_with_generate:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= tokenizer.batch_decode(\r\n\t\t\t\t\t\t\t\t\t\t\t\t test_output.predictions\t\t\t\t,\t\t\t\t\tskip_special_tokens=lowercase__\t\t\t\t,\t\t\t\t\tclean_up_tokenization_spaces=lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= lmap(str.strip\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\twrite_txt_file(lowercase__\t\t\t\t,\t\t\t\t\tos.path.join(training_args.output_dir\t\t\t\t,\t\t\t\t\t\"\"\"test_generations.txt\"\"\"\t\t)\t\t)\r\n\r\n\t\t\tif trainer.is_world_process_zero():\r\n\t\t\t\t\t\tsave_json(lowercase__\t\t\t\t,\t\t\t\t\tos.path.join(training_args.output_dir\t\t\t\t,\t\t\t\t\t\"\"\"all_results.json\"\"\"\t\t)\t\t)\r\n\r\n\t\t\treturn all_metrics\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Tuple\t\t) -> Dict:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tmain()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tmain()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import Optional\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom torch import nn\r\nfrom transformers import GPTaConfig, GPTaLMHeadModel\r\nfrom transformers.modeling_utils import ModuleUtilsMixin\r\n\r\nfrom ...configuration_utils import ConfigMixin, register_to_config\r\nfrom ...models import ModelMixin\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tA__ ,\t\t\tA__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\t[r\"h\\.\\d+\\.attn\\.bias\", r\"h\\.\\d+\\.attn\\.masked_bias\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@register_to_config\r\n\t\t\tdef __init__( self\t, __A\t, __A\t, __A = None\t, __A = 5_0257\t, __A = 1024\t, __A = 768\t, __A = 12\t, __A = 12\t, __A = None\t, __A = \"gelu_new\"\t, __A = 0.1\t, __A = 0.1\t, __A = 0.1\t, __A = 1E-5\t, __A = 0.0_2\t, __A = True\t, __A = True\t, __A = False\t, __A = False\t, ) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tsuper().__init__()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= prefix_length\r\n\r\n\t\t\t\t\t\tif prefix_inner_dim != n_embd and prefix_hidden_dim is None:\r\n\t\t\t\t\t\t\t\t\traise ValueError(\r\n\t\t\t\t\t\t\t\t\t f\"\"\"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and\"\"\"\r\n\t\t\t\t\t\t\t\t\t f\"\"\" `n_embd`: {n_embd} are not equal.\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= prefix_inner_dim\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= prefix_hidden_dim\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= (\r\n\t\t\t\t\t\t nn.Linear(self.prefix_inner_dim\t, self.prefix_hidden_dim\t)\r\n\t\t\t\t\t\t if self.prefix_hidden_dim is not None\r\n\t\t\t\t\t\t else nn.Identity()\r\n\t\t\t\t\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= (\r\n\t\t\t\t\t\t nn.Linear(self.prefix_hidden_dim\t, __A\t) if self.prefix_hidden_dim is not None else nn.Identity()\r\n\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= GPTaConfig(\r\n\t\t\t\t\t\t vocab_size=__A\t, n_positions=__A\t, n_embd=__A\t, n_layer=__A\t, n_head=__A\t, n_inner=__A\t, activation_function=__A\t, resid_pdrop=__A\t, embd_pdrop=__A\t, attn_pdrop=__A\t, layer_norm_epsilon=__A\t, initializer_range=__A\t, scale_attn_weights=__A\t, use_cache=__A\t, scale_attn_by_inverse_layer_idx=__A\t, reorder_and_upcast_attn=__A\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= GPTaLMHeadModel(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A = None\t, __A = None\t, ) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.transformer.transformer.wte(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.encode_prefix(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.decode_prefix(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= torch.cat((prefix_embeds, embedding_text)\t, dim=1\t)\r\n\r\n\t\t\t\t\t\tif labels is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.get_dummy_token(input_ids.shape[0]\t, input_ids.device\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= torch.cat((dummy_token, input_ids)\t, dim=1\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.transformer(inputs_embeds=__A\t, labels=__A\t, attention_mask=__A\t)\r\n\t\t\t\t\t\tif self.prefix_hidden_dim is not None:\r\n\t\t\t\t\t\t\t\t\treturn out, hidden\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\treturn out\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t) ->\t\t\t\t\ttorch.Tensor:\r\n\t\t\t\t\t\treturn torch.zeros(__A\t, self.prefix_length\t, dtype=torch.intaa\t, device=__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\treturn self.encode_prefix(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@torch.no_grad()\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.split(__A\t, 1\t, dim=0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= []\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= []\r\n\t\t\t\t\t\tfor feature in features:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.decode_prefix(feature.to(__A\t)\t) # back to the clip feature\r\n\t\t\t\t\t\t\t\t\t# Only support beam search for now\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.generate_beam(\r\n\t\t\t\t\t\t\t\t\t input_embeds=__A\t, device=__A\t, eos_token_id=__A\t)\r\n\t\t\t\t\t\t\t\t\tgenerated_tokens.append(output_tokens[0]\t)\r\n\t\t\t\t\t\t\t\t\tgenerated_seq_lengths.append(seq_lengths[0]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.stack(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= torch.stack(__A\t)\r\n\t\t\t\t\t\treturn generated_tokens, generated_seq_lengths\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@torch.no_grad()\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A=None\t, __A=None\t, __A=None\t, __A = 5\t, __A = 67\t, __A = 1.0\t, __A = None\t, ) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= eos_token_id\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= None\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= None\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= torch.ones(__A\t, device=__A\t, dtype=torch.int\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= torch.zeros(__A\t, device=__A\t, dtype=torch.bool\t)\r\n\r\n\t\t\t\t\t\tif input_embeds is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= input_embeds\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.transformer.transformer.wte(__A\t)\r\n\r\n\t\t\t\t\t\tfor i in range(__A\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.transformer(inputs_embeds=__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= outputs.logits\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= logits[:, -1, :] / (temperature if temperature > 0 else 1.0)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= logits.softmax(-1\t).log()\r\n\r\n\t\t\t\t\t\t\t\t\tif scores is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Any \t\t\t\t\t= logits.topk(__A\t, -1\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= generated.expand(__A\t, *generated.shape[1:]\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[str] \t\t\t\t\t= next_tokens.permute(1\t, 0\t), scores.squeeze(0\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tif tokens is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= next_tokens\r\n\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tokens.expand(__A\t, *tokens.shape[1:]\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= torch.cat((tokens, next_tokens)\t, dim=1\t)\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= -float(np.inf\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 0\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= scores[:, None] + logits\r\n\t\t\t\t\t\t\t\t\t\t\t\tseq_lengths[~is_stopped] += 1\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= scores_sum / seq_lengths[:, None]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Tuple \t\t\t\t\t= scores_sum_average.view(-1\t).topk(__A\t, -1\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= next_tokens // scores_sum.shape[1]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= seq_lengths[next_tokens_source]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= next_tokens % scores_sum.shape[1]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= next_tokens.unsqueeze(1\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokens[next_tokens_source]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.cat((tokens, next_tokens)\t, dim=1\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= generated[next_tokens_source]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= scores_sum_average * seq_lengths\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= is_stopped[next_tokens_source]\r\n\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.transformer.transformer.wte(next_tokens.squeeze()\t).view(generated.shape[0]\t, 1\t, -1\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.cat((generated, next_token_embed)\t, dim=1\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= is_stopped + next_tokens.eq(__A\t).squeeze()\r\n\t\t\t\t\t\t\t\t\tif is_stopped.all():\r\n\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= scores / seq_lengths\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= scores.argsort(descending=__A\t)\r\n\t\t\t\t\t\t# tokens tensors are already padded to max_seq_length\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= [tokens[i] for i in order]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= torch.stack(__A\t, dim=0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.tensor([seq_lengths[i] for i in order]\t, dtype=seq_lengths.dtype\t)\r\n\t\t\t\t\t\treturn output_texts, seq_lengths\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":633,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom transformers import DistilBertTokenizer, DistilBertTokenizerFast\r\nfrom transformers.testing_utils import require_tokenizers, slow\r\n\r\nfrom ..bert.test_tokenization_bert import BertTokenizationTest\r\n\r\n\r\n\r\n\r\n@require_tokenizers\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Dict \t\t\t=\t\t\t\t\t\tDistilBertTokenizer\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\tDistilBertTokenizerFast\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tTrue\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= DistilBertTokenizer.from_pretrained(\"\"\"distilbert-base-uncased\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tokenizer.encode(\"\"\"sequence builders\"\"\"\t, add_special_tokens=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= tokenizer.encode(\"\"\"multi-sequence build\"\"\"\t, add_special_tokens=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokenizer.build_inputs_with_special_tokens(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer.build_inputs_with_special_tokens(__A\t, __A\t)\r\n\r\n\t\t\t\t\t\tassert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]\r\n\t\t\t\t\t\tassert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [\r\n\t\t\t\t\t\t tokenizer.sep_token_id\r\n\t\t\t\t\t\t]\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport copy\r\nfrom collections import OrderedDict\r\nfrom typing import Dict, Mapping\r\n\r\nfrom packaging import version\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...onnx import OnnxConfig\r\nfrom ...utils import logging\r\nfrom ..auto import CONFIG_MAPPING\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',\r\n # See all DETR models at https://huggingface.co/models?filter=detr\r\n}\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\t\"detr\"\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\t[\"past_key_values\"]\r\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\t{\r\n\t\t\t \"hidden_size\": \"d_model\",\r\n\t\t\t \"num_attention_heads\": \"encoder_attention_heads\",\r\n\t\t\t}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A=True\t, __A=None\t, __A=3\t, __A=100\t, __A=6\t, __A=2048\t, __A=8\t, __A=6\t, __A=2048\t, __A=8\t, __A=0.0\t, __A=0.0\t, __A=True\t, __A=\"relu\"\t, __A=256\t, __A=0.1\t, __A=0.0\t, __A=0.0\t, __A=0.0_2\t, __A=1.0\t, __A=False\t, __A=\"sine\"\t, __A=\"resnet50\"\t, __A=True\t, __A=False\t, __A=1\t, __A=5\t, __A=2\t, __A=1\t, __A=1\t, __A=5\t, __A=2\t, __A=0.1\t, **__A\t, ) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tif backbone_config is not None and use_timm_backbone:\r\n\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"You can't specify both `backbone_config` and `use_timm_backbone`.\"\"\"\t)\r\n\r\n\t\t\t\t\t\tif not use_timm_backbone:\r\n\t\t\t\t\t\t\t\t\tif backbone_config is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlogger.info(\"\"\"`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= CONFIG_MAPPING[\"\"\"resnet\"\"\"](out_features=[\"\"\"stage4\"\"\"]\t)\r\n\t\t\t\t\t\t\t\t\telif isinstance(__A\t, __A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= backbone_config.get(\"\"\"model_type\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= CONFIG_MAPPING[backbone_model_type]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= config_class.from_dict(__A\t)\r\n\t\t\t\t\t\t\t\t\t# set timm attributes to None\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= None, None, None\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= use_timm_backbone\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= backbone_config\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= num_channels\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= num_queries\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= d_model\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= encoder_ffn_dim\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= encoder_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= encoder_attention_heads\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= decoder_ffn_dim\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= decoder_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= decoder_attention_heads\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= dropout\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= attention_dropout\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= activation_dropout\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= activation_function\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= init_std\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= init_xavier_std\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= encoder_layerdrop\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= decoder_layerdrop\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= encoder_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= auxiliary_loss\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= position_embedding_type\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= backbone\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= use_pretrained_backbone\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= dilation\r\n\t\t\t\t\t\t# Hungarian matcher\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= class_cost\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= bbox_cost\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= giou_cost\r\n\t\t\t\t\t\t# Loss coefficients\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= mask_loss_coefficient\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= dice_loss_coefficient\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= bbox_loss_coefficient\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= giou_loss_coefficient\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= eos_coefficient\r\n\t\t\t\t\t\tsuper().__init__(is_encoder_decoder=__A\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn self.encoder_attention_heads\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn self.d_model\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@classmethod\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( cls\t, __A\t, **__A\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\treturn cls(backbone_config=__A\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict[str, any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= copy.deepcopy(self.__dict__\t)\r\n\t\t\t\t\t\tif output[\"backbone_config\"] is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.backbone_config.to_dict()\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.__class__.model_type\r\n\t\t\t\t\t\treturn output\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[Any] \t\t\t=\t\t\t\t\t\tversion.parse(\"1.11\"\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tMapping[str, Mapping[int, str]]:\r\n\t\t\t\t\t\treturn OrderedDict(\r\n\t\t\t\t\t\t [\r\n\t\t\t\t\t\t (\"\"\"pixel_values\"\"\", {0: \"\"\"batch\"\"\", 1: \"\"\"num_channels\"\"\", 2: \"\"\"height\"\"\", 3: \"\"\"width\"\"\"}),\r\n\t\t\t\t\t\t (\"\"\"pixel_mask\"\"\", {0: \"\"\"batch\"\"\"}),\r\n\t\t\t\t\t\t ]\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tfloat:\r\n\t\t\t\t\t\treturn 1E-5\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn 12\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":634,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/\r\n\r\nimport gc\r\nimport random\r\nimport tempfile\r\nimport unittest\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom PIL import Image\r\nfrom transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer\r\n\r\nfrom diffusers import (\r\n AutoencoderKL,\r\n ControlNetModel,\r\n DDIMScheduler,\r\n StableDiffusionControlNetImgaImgPipeline,\r\n UNetaDConditionModel,\r\n)\r\nfrom diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel\r\nfrom diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device\r\nfrom diffusers.utils.import_utils import is_xformers_available\r\nfrom diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu\r\n\r\nfrom ..pipeline_params import (\r\n IMAGE_TO_IMAGE_IMAGE_PARAMS,\r\n TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,\r\n TEXT_GUIDED_IMAGE_VARIATION_PARAMS,\r\n)\r\nfrom ..test_pipelines_common import (\r\n PipelineKarrasSchedulerTesterMixin,\r\n PipelineLatentTesterMixin,\r\n PipelineTesterMixin,\r\n)\r\n\r\n\r\nenable_full_determinism()\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tA__ ,\t\t\tA__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tStableDiffusionControlNetImgaImgPipeline\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_PARAMS - {\"height\", \"width\"}\r\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\tIMAGE_TO_IMAGE_IMAGE_PARAMS.union({\"control_image\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[Any] \t\t\t=\t\t\t\t\t\tIMAGE_TO_IMAGE_IMAGE_PARAMS\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= UNetaDConditionModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, sample_size=32\t, in_channels=4\t, out_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, up_block_types=(\"\"\"CrossAttnUpBlock2D\"\"\", \"\"\"UpBlock2D\"\"\")\t, cross_attention_dim=32\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= ControlNetModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, in_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, cross_attention_dim=32\t, conditioning_embedding_out_channels=(16, 32)\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= DDIMScheduler(\r\n\t\t\t\t\t\t beta_start=0.0_0_0_8_5\t, beta_end=0.0_1_2\t, beta_schedule=\"\"\"scaled_linear\"\"\"\t, clip_sample=__A\t, set_alpha_to_one=__A\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoencoderKL(\r\n\t\t\t\t\t\t block_out_channels=[32, 64]\t, in_channels=3\t, out_channels=3\t, down_block_types=[\"\"\"DownEncoderBlock2D\"\"\", \"\"\"DownEncoderBlock2D\"\"\"]\t, up_block_types=[\"\"\"UpDecoderBlock2D\"\"\", \"\"\"UpDecoderBlock2D\"\"\"]\t, latent_channels=4\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= CLIPTextConfig(\r\n\t\t\t\t\t\t bos_token_id=0\t, eos_token_id=2\t, hidden_size=32\t, intermediate_size=37\t, layer_norm_eps=1E-05\t, num_attention_heads=4\t, num_hidden_layers=5\t, pad_token_id=1\t, vocab_size=1000\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= CLIPTextModel(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= CLIPTokenizer.from_pretrained(\"\"\"hf-internal-testing/tiny-random-clip\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"unet\"\"\": unet,\r\n\t\t\t\t\t\t \"\"\"controlnet\"\"\": controlnet,\r\n\t\t\t\t\t\t \"\"\"scheduler\"\"\": scheduler,\r\n\t\t\t\t\t\t \"\"\"vae\"\"\": vae,\r\n\t\t\t\t\t\t \"\"\"text_encoder\"\"\": text_encoder,\r\n\t\t\t\t\t\t \"\"\"tokenizer\"\"\": tokenizer,\r\n\t\t\t\t\t\t \"\"\"safety_checker\"\"\": None,\r\n\t\t\t\t\t\t \"\"\"feature_extractor\"\"\": None,\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\treturn components\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=0\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tif str(__A\t).startswith(\"\"\"mps\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.manual_seed(__A\t)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= torch.Generator(device=__A\t).manual_seed(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 2\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= randn_tensor(\r\n\t\t\t\t\t\t (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor)\t, generator=__A\t, device=torch.device(__A\t)\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= floats_tensor(control_image.shape\t, rng=random.Random(__A\t)\t).to(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= image.cpu().permute(0\t, 2\t, 3\t, 1\t)[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= Image.fromarray(np.uinta(__A\t)\t).convert(\"\"\"RGB\"\"\"\t).resize((64, 64)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"prompt\"\"\": \"\"\"A painting of a squirrel eating a burger\"\"\",\r\n\t\t\t\t\t\t \"\"\"generator\"\"\": generator,\r\n\t\t\t\t\t\t \"\"\"num_inference_steps\"\"\": 2,\r\n\t\t\t\t\t\t \"\"\"guidance_scale\"\"\": 6.0,\r\n\t\t\t\t\t\t \"\"\"output_type\"\"\": \"\"\"numpy\"\"\",\r\n\t\t\t\t\t\t \"\"\"image\"\"\": image,\r\n\t\t\t\t\t\t \"\"\"control_image\"\"\": control_image,\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\treturn inputs\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn self._test_attention_slicing_forward_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@unittest.skipIf(\r\n\t\t\t torch_device != \"\"\"cuda\"\"\" or not is_xformers_available()\t, reason=\"\"\"XFormers attention is only available with CUDA and `xformers` installed\"\"\"\t, )\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tself._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tself._test_inference_batch_single_identical(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tA__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\tStableDiffusionControlNetImgaImgPipeline\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_PARAMS - {\"height\", \"width\"}\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tfrozenset([]\t\t\t\t\t\t\t) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= UNetaDConditionModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, sample_size=32\t, in_channels=4\t, out_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, up_block_types=(\"\"\"CrossAttnUpBlock2D\"\"\", \"\"\"UpBlock2D\"\"\")\t, cross_attention_dim=32\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\r\n\t\t\t\t\t\tdef init_weights(__A\t):\r\n\t\t\t\t\t\t\t\t\tif isinstance(__A\t, torch.nn.Convad\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\ttorch.nn.init.normal(m.weight\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tm.bias.data.fill_(1.0\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= ControlNetModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, in_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, cross_attention_dim=32\t, conditioning_embedding_out_channels=(16, 32)\t, )\r\n\t\t\t\t\t\tcontrolneta.controlnet_down_blocks.apply(__A\t)\r\n\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= ControlNetModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, in_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, cross_attention_dim=32\t, conditioning_embedding_out_channels=(16, 32)\t, )\r\n\t\t\t\t\t\tcontrolneta.controlnet_down_blocks.apply(__A\t)\r\n\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= DDIMScheduler(\r\n\t\t\t\t\t\t beta_start=0.0_0_0_8_5\t, beta_end=0.0_1_2\t, beta_schedule=\"\"\"scaled_linear\"\"\"\t, clip_sample=__A\t, set_alpha_to_one=__A\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= AutoencoderKL(\r\n\t\t\t\t\t\t block_out_channels=[32, 64]\t, in_channels=3\t, out_channels=3\t, down_block_types=[\"\"\"DownEncoderBlock2D\"\"\", \"\"\"DownEncoderBlock2D\"\"\"]\t, up_block_types=[\"\"\"UpDecoderBlock2D\"\"\", \"\"\"UpDecoderBlock2D\"\"\"]\t, latent_channels=4\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= CLIPTextConfig(\r\n\t\t\t\t\t\t bos_token_id=0\t, eos_token_id=2\t, hidden_size=32\t, intermediate_size=37\t, layer_norm_eps=1E-05\t, num_attention_heads=4\t, num_hidden_layers=5\t, pad_token_id=1\t, vocab_size=1000\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= CLIPTextModel(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= CLIPTokenizer.from_pretrained(\"\"\"hf-internal-testing/tiny-random-clip\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= MultiControlNetModel([controlneta, controlneta]\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"unet\"\"\": unet,\r\n\t\t\t\t\t\t \"\"\"controlnet\"\"\": controlnet,\r\n\t\t\t\t\t\t \"\"\"scheduler\"\"\": scheduler,\r\n\t\t\t\t\t\t \"\"\"vae\"\"\": vae,\r\n\t\t\t\t\t\t \"\"\"text_encoder\"\"\": text_encoder,\r\n\t\t\t\t\t\t \"\"\"tokenizer\"\"\": tokenizer,\r\n\t\t\t\t\t\t \"\"\"safety_checker\"\"\": None,\r\n\t\t\t\t\t\t \"\"\"feature_extractor\"\"\": None,\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\treturn components\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=0\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tif str(__A\t).startswith(\"\"\"mps\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= torch.manual_seed(__A\t)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.Generator(device=__A\t).manual_seed(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= 2\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= [\r\n\t\t\t\t\t\t randn_tensor(\r\n\t\t\t\t\t\t (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor)\t, generator=__A\t, device=torch.device(__A\t)\t, ),\r\n\t\t\t\t\t\t randn_tensor(\r\n\t\t\t\t\t\t (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor)\t, generator=__A\t, device=torch.device(__A\t)\t, ),\r\n\t\t\t\t\t\t]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= floats_tensor(control_image[0].shape\t, rng=random.Random(__A\t)\t).to(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= image.cpu().permute(0\t, 2\t, 3\t, 1\t)[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= Image.fromarray(np.uinta(__A\t)\t).convert(\"\"\"RGB\"\"\"\t).resize((64, 64)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"prompt\"\"\": \"\"\"A painting of a squirrel eating a burger\"\"\",\r\n\t\t\t\t\t\t \"\"\"generator\"\"\": generator,\r\n\t\t\t\t\t\t \"\"\"num_inference_steps\"\"\": 2,\r\n\t\t\t\t\t\t \"\"\"guidance_scale\"\"\": 6.0,\r\n\t\t\t\t\t\t \"\"\"output_type\"\"\": \"\"\"numpy\"\"\",\r\n\t\t\t\t\t\t \"\"\"image\"\"\": image,\r\n\t\t\t\t\t\t \"\"\"control_image\"\"\": control_image,\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\treturn inputs\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.get_dummy_components()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.pipeline_class(**__A\t)\r\n\t\t\t\t\t\tpipe.to(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 1_0.0\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 4\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= pipe(**__A\t)[0]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= pipe(**__A\t, control_guidance_start=0.1\t, control_guidance_end=0.2\t)[0]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= pipe(**__A\t, control_guidance_start=[0.1, 0.3]\t, control_guidance_end=[0.2, 0.7]\t)[0]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= pipe(**__A\t, control_guidance_start=0.4\t, control_guidance_end=[0.5, 0.8]\t)[0]\r\n\r\n\t\t\t\t\t\t# make sure that all outputs are different\r\n\t\t\t\t\t\tassert np.sum(np.abs(output_a - output_a\t)\t) > 1E-3\r\n\t\t\t\t\t\tassert np.sum(np.abs(output_a - output_a\t)\t) > 1E-3\r\n\t\t\t\t\t\tassert np.sum(np.abs(output_a - output_a\t)\t) > 1E-3\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\treturn self._test_attention_slicing_forward_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@unittest.skipIf(\r\n\t\t\t torch_device != \"\"\"cuda\"\"\" or not is_xformers_available()\t, reason=\"\"\"XFormers attention is only available with CUDA and `xformers` installed\"\"\"\t, )\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tself._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tself._test_inference_batch_single_identical(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.get_dummy_components()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.pipeline_class(**__A\t)\r\n\t\t\t\t\t\tpipe.to(__A\t)\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__A\t)\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdir:\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\t# save_pretrained is not implemented for Multi-ControlNet\r\n\t\t\t\t\t\t\t\t\t\t\t\tpipe.save_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\texcept NotImplementedError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n@slow\r\n@require_torch_gpu\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tsuper().tearDown()\r\n\t\t\t\t\t\tgc.collect()\r\n\t\t\t\t\t\ttorch.cuda.empty_cache()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= ControlNetModel.from_pretrained(\"\"\"lllyasviel/sd-controlnet-canny\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= StableDiffusionControlNetImgaImgPipeline.from_pretrained(\r\n\t\t\t\t\t\t \"\"\"runwayml/stable-diffusion-v1-5\"\"\"\t, safety_checker=__A\t, controlnet=__A\t)\r\n\t\t\t\t\t\tpipe.enable_model_cpu_offload()\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= torch.Generator(device=\"\"\"cpu\"\"\"\t).manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"evil space-punk bird\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= load_image(\r\n\t\t\t\t\t\t \"\"\"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png\"\"\"\t).resize((512, 512)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= load_image(\r\n\t\t\t\t\t\t \"\"\"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png\"\"\"\t).resize((512, 512)\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= pipe(\r\n\t\t\t\t\t\t __A\t, __A\t, control_image=__A\t, generator=__A\t, output_type=\"\"\"np\"\"\"\t, num_inference_steps=50\t, strength=0.6\t, )\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= output.images[0]\r\n\r\n\t\t\t\t\t\tassert image.shape == (512, 512, 3)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= load_numpy(\r\n\t\t\t\t\t\t \"\"\"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy\"\"\"\t)\r\n\r\n\t\t\t\t\t\tassert np.abs(expected_image - image\t).max() < 9E-2\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import (\r\n OptionalDependencyNotAvailable,\r\n _LazyModule,\r\n is_tf_available,\r\n is_torch_available,\r\n is_vision_available,\r\n)\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}\r\n\r\ntry:\r\n\t\t\t\tif not is_vision_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= ['DeiTFeatureExtractor']\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= ['DeiTImageProcessor']\r\n\r\ntry:\r\n\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\r\n\t\t\t\t 'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',\r\n\t\t\t\t 'DeiTForImageClassification',\r\n\t\t\t\t 'DeiTForImageClassificationWithTeacher',\r\n\t\t\t\t 'DeiTForMaskedImageModeling',\r\n\t\t\t\t 'DeiTModel',\r\n\t\t\t\t 'DeiTPreTrainedModel',\r\n\t\t\t\t]\r\n\r\ntry:\r\n\t\t\t\tif not is_tf_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\r\n\t\t\t\t 'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',\r\n\t\t\t\t 'TFDeiTForImageClassification',\r\n\t\t\t\t 'TFDeiTForImageClassificationWithTeacher',\r\n\t\t\t\t 'TFDeiTForMaskedImageModeling',\r\n\t\t\t\t 'TFDeiTModel',\r\n\t\t\t\t 'TFDeiTPreTrainedModel',\r\n\t\t\t\t]\r\n\r\n\r\nif TYPE_CHECKING:\r\n\t\t\t\tfrom .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_vision_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .feature_extraction_deit import DeiTFeatureExtractor\r\n\t\t\t\t\t\t\t\tfrom .image_processing_deit import DeiTImageProcessor\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .modeling_deit import (\r\n\t\t\t\t\t\t\t\t DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t DeiTForImageClassification,\r\n\t\t\t\t\t\t\t\t DeiTForImageClassificationWithTeacher,\r\n\t\t\t\t\t\t\t\t DeiTForMaskedImageModeling,\r\n\t\t\t\t\t\t\t\t DeiTModel,\r\n\t\t\t\t\t\t\t\t DeiTPreTrainedModel,\r\n\t\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_tf_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .modeling_tf_deit import (\r\n\t\t\t\t\t\t\t\t TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t TFDeiTForImageClassification,\r\n\t\t\t\t\t\t\t\t TFDeiTForImageClassificationWithTeacher,\r\n\t\t\t\t\t\t\t\t TFDeiTForMaskedImageModeling,\r\n\t\t\t\t\t\t\t\t TFDeiTModel,\r\n\t\t\t\t\t\t\t\t TFDeiTPreTrainedModel,\r\n\t\t\t\t\t\t\t\t)\r\n\r\n\r\nelse:\r\n\t\t\t\timport sys\r\n\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":635,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport json\r\nimport os\r\nfrom typing import Optional, Tuple\r\n\r\nfrom ...tokenization_utils import PreTrainedTokenizer\r\nfrom ...utils import logging\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {'vocab_file': 'vocab.json'}\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'vocab_file': {\r\n 'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',\r\n }\r\n}\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {'mgp-str': 27}\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Union[str, Any] \t\t\t=\t\t\t\t\t\tVOCAB_FILES_NAMES\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tPRETRAINED_VOCAB_FILES_MAP\r\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\tPRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A=\"[GO]\"\t, __A=\"[GO]\"\t, __A=\"[s]\"\t, __A=\"[GO]\"\t, **__A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tsuper().__init__(\r\n\t\t\t\t\t\t unk_token=__A\t, bos_token=__A\t, eos_token=__A\t, pad_token=__A\t, **__A\t, )\r\n\r\n\t\t\t\t\t\twith open(__A\t, encoding=\"\"\"utf-8\"\"\"\t) as vocab_handle:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= json.load(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {v: k for k, v in self.vocab.items()}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\treturn len(self.vocab\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\treturn dict(self.vocab\t, **self.added_tokens_encoder\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= []\r\n\t\t\t\t\t\tfor s in text:\r\n\t\t\t\t\t\t\t\t\tchar_tokens.extend(__A\t)\r\n\t\t\t\t\t\treturn char_tokens\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\treturn self.vocab.get(__A\t, self.vocab.get(self.unk_token\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\treturn self.decoder.get(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A = None\t) ->\t\t\t\t\tTuple[str]:\r\n\t\t\t\t\t\tif not os.path.isdir(__A\t):\r\n\t\t\t\t\t\t\t\t\tlogger.error(\"\"\"Vocabulary path ({}) should be a directory\"\"\".format(__A\t)\t)\r\n\t\t\t\t\t\t\t\t\treturn\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= os.path.join(\r\n\t\t\t\t\t\t __A\t, (filename_prefix + \"\"\"-\"\"\" if filename_prefix else \"\"\"\"\"\") + VOCAB_FILES_NAMES[\"\"\"vocab_file\"\"\"]\t)\r\n\r\n\t\t\t\t\t\twith open(__A\t, \"\"\"w\"\"\"\t, encoding=\"\"\"utf-8\"\"\"\t) as f:\r\n\t\t\t\t\t\t\t\t\tf.write(json.dumps(self.vocab\t, indent=2\t, sort_keys=__A\t, ensure_ascii=__A\t) + \"\"\"\\n\"\"\"\t)\r\n\r\n\t\t\t\t\t\treturn (vocab_file,)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'configuration_squeezebert': [\r\n 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',\r\n 'SqueezeBertConfig',\r\n 'SqueezeBertOnnxConfig',\r\n ],\r\n 'tokenization_squeezebert': ['SqueezeBertTokenizer'],\r\n}\r\n\r\ntry:\r\n\t\t\t\tif not is_tokenizers_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= ['SqueezeBertTokenizerFast']\r\n\r\ntry:\r\n\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\r\n\t\t\t\t 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',\r\n\t\t\t\t 'SqueezeBertForMaskedLM',\r\n\t\t\t\t 'SqueezeBertForMultipleChoice',\r\n\t\t\t\t 'SqueezeBertForQuestionAnswering',\r\n\t\t\t\t 'SqueezeBertForSequenceClassification',\r\n\t\t\t\t 'SqueezeBertForTokenClassification',\r\n\t\t\t\t 'SqueezeBertModel',\r\n\t\t\t\t 'SqueezeBertModule',\r\n\t\t\t\t 'SqueezeBertPreTrainedModel',\r\n\t\t\t\t]\r\n\r\n\r\nif TYPE_CHECKING:\r\n\t\t\t\tfrom .configuration_squeezebert import (\r\n\t\t\t\t SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,\r\n\t\t\t\t SqueezeBertConfig,\r\n\t\t\t\t SqueezeBertOnnxConfig,\r\n\t\t\t\t)\r\n\t\t\t\tfrom .tokenization_squeezebert import SqueezeBertTokenizer\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_tokenizers_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .tokenization_squeezebert_fast import SqueezeBertTokenizerFast\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .modeling_squeezebert import (\r\n\t\t\t\t\t\t\t\t SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t SqueezeBertForMaskedLM,\r\n\t\t\t\t\t\t\t\t SqueezeBertForMultipleChoice,\r\n\t\t\t\t\t\t\t\t SqueezeBertForQuestionAnswering,\r\n\t\t\t\t\t\t\t\t SqueezeBertForSequenceClassification,\r\n\t\t\t\t\t\t\t\t SqueezeBertForTokenClassification,\r\n\t\t\t\t\t\t\t\t SqueezeBertModel,\r\n\t\t\t\t\t\t\t\t SqueezeBertModule,\r\n\t\t\t\t\t\t\t\t SqueezeBertPreTrainedModel,\r\n\t\t\t\t\t\t\t\t)\r\n\r\nelse:\r\n\t\t\t\timport sys\r\n\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":636,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\treturn int((input_a, input_a).count(0\t\t) == 0\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> None:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tassert and_gate(0\t\t\t\t,\t\t\t\t\t0\t\t) == 0\r\n\t\t\tassert and_gate(0\t\t\t\t,\t\t\t\t\t1\t\t) == 0\r\n\t\t\tassert and_gate(1\t\t\t\t,\t\t\t\t\t0\t\t) == 0\r\n\t\t\tassert and_gate(1\t\t\t\t,\t\t\t\t\t1\t\t) == 1\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\ttest_and_gate()\r\n\t\t\t\tprint(and_gate(1, 0))\r\n\t\t\t\tprint(and_gate(0, 0))\r\n\t\t\t\tprint(and_gate(0, 1))\r\n\t\t\t\tprint(and_gate(1, 1))\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 2_56\r\n# Modulus to hash a string\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 1_00_00_03\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str\t\t\t\t,\t\t\t\t\tlowercase__\t\t: str\t\t) -> bool:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= len(lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= len(lowercase__\t\t)\r\n\t\t\tif p_len > t_len:\r\n\t\t\t\t\t\treturn False\r\n\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= 1\r\n\r\n\t\t\t# Calculating the hash of pattern and substring of text\r\n\t\t\tfor i in range(lowercase__\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= (ord(pattern[i]\t\t) + p_hash * alphabet_size) % modulus\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= (ord(text[i]\t\t) + text_hash * alphabet_size) % modulus\r\n\t\t\t\t\t\tif i == p_len - 1:\r\n\t\t\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= (modulus_power * alphabet_size) % modulus\r\n\r\n\t\t\tfor i in range(0\t\t\t\t,\t\t\t\t\tt_len - p_len + 1\t\t):\r\n\t\t\t\t\t\tif text_hash == p_hash and text[i : i + p_len] == pattern:\r\n\t\t\t\t\t\t\t\t\treturn True\r\n\t\t\t\t\t\tif i == t_len - p_len:\r\n\t\t\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\t# Calculate the https://en.wikipedia.org/wiki/Rolling_hash\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= (\r\n\t\t\t\t\t\t (text_hash - ord(text[i]\t\t) * modulus_power) * alphabet_size\r\n\t\t\t\t\t\t + ord(text[i + p_len]\t\t)\r\n\t\t\t\t\t\t) % modulus\r\n\t\t\treturn False\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> None:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"abc1abc12\"\"\"\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= \"\"\"alskfjaldsabc1abc1abc12k23adsfabcabc\"\"\"\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"alskfjaldsk23adsfabcabc\"\"\"\r\n\t\t\tassert rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t) and not rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Test 2)\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= \"\"\"ABABX\"\"\"\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"ABABZABABYABABX\"\"\"\r\n\t\t\tassert rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Test 3)\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= \"\"\"AAAB\"\"\"\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= \"\"\"ABAAAAAB\"\"\"\r\n\t\t\tassert rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Test 4)\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= \"\"\"abcdabcy\"\"\"\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= \"\"\"abcxabcdabxabcdabcdabcy\"\"\"\r\n\t\t\tassert rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Test 5)\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"Lรผ\"\"\"\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"Lรผsai\"\"\"\r\n\t\t\tassert rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"Lue\"\"\"\r\n\t\t\tassert not rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\tprint(\"\"\"Success.\"\"\"\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\ttest_rabin_karp()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":637,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: float\t\t) -> float:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\treturn 1_0 - x * x\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: float\t\t\t\t,\t\t\t\t\tlowercase__\t\t: float\t\t) -> float:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif equation(lowercase__\t\t) * equation(lowercase__\t\t) >= 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"Wrong space!\"\"\"\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= a\r\n\t\t\twhile (b - a) >= 0.01:\r\n\t\t\t\t\t\t# Find middle point\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= (a + b) / 2\r\n\t\t\t\t\t\t# Check if middle point is root\r\n\t\t\t\t\t\tif equation(lowercase__\t\t) == 0.0:\r\n\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\t# Decide the side to repeat the steps\r\n\t\t\t\t\t\tif equation(lowercase__\t\t) * equation(lowercase__\t\t) < 0:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= c\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= c\r\n\t\t\treturn c\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\r\n\t\t\t\tprint(bisection(-2, 5))\r\n\t\t\t\tprint(bisection(0, 6))\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport argparse\r\nimport os\r\n\r\nimport evaluate\r\nimport torch\r\nfrom datasets import load_dataset\r\nfrom torch.optim import AdamW\r\nfrom torch.utils.data import DataLoader\r\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\r\n\r\nfrom accelerate import Accelerator, DistributedType\r\nfrom accelerate.local_sgd import LocalSGD\r\n\r\n\r\n########################################################################\r\n# This is a fully working simple example to use Accelerate\r\n# with LocalSGD, which is a method to synchronize model\r\n# parameters every K batches. It is different, but complementary\r\n# to gradient accumulation.\r\n#\r\n# This example trains a Bert base model on GLUE MRPC\r\n# in any of the following settings (with the same script):\r\n# - single CPU or single GPU\r\n# - multi GPUS (using PyTorch distributed mode)\r\n# - (multi) TPUs\r\n# - fp16 (mixed-precision) or fp32 (normal precision)\r\n#\r\n# To run it in each of these various modes, follow the instructions\r\n# in the readme for examples:\r\n# https://github.com/huggingface/accelerate/tree/main/examples\r\n#\r\n########################################################################\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 16\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 32\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Accelerator\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int = 1_6\t\t) -> str:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoTokenizer.from_pretrained(\"\"\"bert-base-cased\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= load_dataset(\"\"\"glue\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"mrpc\"\"\"\t\t)\r\n\r\n\t\t\tdef tokenize_function(lowercase__\t\t: int\t\t):\r\n\t\t\t\t\t\t# max_length=None => use the model max length (it's actually the default)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= tokenizer(examples[\"\"\"sentence1\"\"\"]\t\t\t\t,\t\t\t\t\texamples[\"\"\"sentence2\"\"\"]\t\t\t\t,\t\t\t\t\ttruncation=lowercase__\t\t\t\t,\t\t\t\t\tmax_length=lowercase__\t\t)\r\n\t\t\t\t\t\treturn outputs\r\n\r\n\t\t\t# Apply the method we just defined to all the examples in all the splits of the dataset\r\n\t\t\t# starting with the main process first:\r\n\t\t\twith accelerator.main_process_first():\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= datasets.map(\r\n\t\t\t\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tbatched=lowercase__\t\t\t\t,\t\t\t\t\tremove_columns=[\"\"\"idx\"\"\", \"\"\"sentence1\"\"\", \"\"\"sentence2\"\"\"]\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\r\n\t\t\t# transformers library\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokenized_datasets.rename_column(\"\"\"label\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"labels\"\"\"\t\t)\r\n\r\n\t\t\tdef collate_fn(lowercase__\t\t: Dict\t\t):\r\n\t\t\t\t\t\t# On TPU it's best to pad everything to the same length or training will be very slow.\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None\r\n\t\t\t\t\t\t# When using mixed precision we want round multiples of 8/16\r\n\t\t\t\t\t\tif accelerator.mixed_precision == \"fp8\":\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 1_6\r\n\t\t\t\t\t\telif accelerator.mixed_precision != \"no\":\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 8\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= None\r\n\r\n\t\t\t\t\t\treturn tokenizer.pad(\r\n\t\t\t\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tpadding=\"\"\"longest\"\"\"\t\t\t\t,\t\t\t\t\tmax_length=lowercase__\t\t\t\t,\t\t\t\t\tpad_to_multiple_of=lowercase__\t\t\t\t,\t\t\t\t\treturn_tensors=\"\"\"pt\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t# Instantiate dataloaders.\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= DataLoader(\r\n\t\t\t tokenized_datasets[\"\"\"train\"\"\"]\t\t\t\t,\t\t\t\t\tshuffle=lowercase__\t\t\t\t,\t\t\t\t\tcollate_fn=lowercase__\t\t\t\t,\t\t\t\t\tbatch_size=lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= DataLoader(\r\n\t\t\t tokenized_datasets[\"\"\"validation\"\"\"]\t\t\t\t,\t\t\t\t\tshuffle=lowercase__\t\t\t\t,\t\t\t\t\tcollate_fn=lowercase__\t\t\t\t,\t\t\t\t\tbatch_size=lowercase__\t\t)\r\n\r\n\t\t\treturn train_dataloader, eval_dataloader\r\n\r\n\r\n# For testing only\r\nif os.environ.get('TESTING_MOCKED_DATALOADERS', None) == \"1\":\r\n\t\t\t\tfrom accelerate.test_utils.training import mocked_dataloaders\r\n\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= mocked_dataloaders # noqa: F811\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: List[Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Optional[int]\t\t) -> Optional[Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif os.environ.get(\"\"\"TESTING_MOCKED_DATALOADERS\"\"\"\t\t\t\t,\t\t\t\t\tlowercase__\t\t) == \"1\":\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= 2\r\n\t\t\t# New Code #\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= int(args.gradient_accumulation_steps\t\t)\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= int(args.local_sgd_steps\t\t)\r\n\t\t\t# Initialize accelerator\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= Accelerator(\r\n\t\t\t cpu=args.cpu\t\t\t\t,\t\t\t\t\tmixed_precision=args.mixed_precision\t\t\t\t,\t\t\t\t\tgradient_accumulation_steps=lowercase__\t\t)\r\n\t\t\tif accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:\r\n\t\t\t\t\t\traise NotImplementedError(\"\"\"LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)\"\"\"\t\t)\r\n\t\t\t# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= config[\"\"\"lr\"\"\"]\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= int(config[\"\"\"num_epochs\"\"\"]\t\t)\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= int(config[\"\"\"seed\"\"\"]\t\t)\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= int(config[\"\"\"batch_size\"\"\"]\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= evaluate.load(\"\"\"glue\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"mrpc\"\"\"\t\t)\r\n\r\n\t\t\tset_seed(lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= get_dataloaders(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t# Instantiate the model (we build the model here so that the seed also control new weights initialization)\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= AutoModelForSequenceClassification.from_pretrained(\"\"\"bert-base-cased\"\"\"\t\t\t\t,\t\t\t\t\treturn_dict=lowercase__\t\t)\r\n\r\n\t\t\t# We could avoid this line since the accelerator is set with `device_placement=True` (default value).\r\n\t\t\t# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\r\n\t\t\t# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= model.to(accelerator.device\t\t)\r\n\r\n\t\t\t# Instantiate optimizer\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= AdamW(params=model.parameters()\t\t\t\t,\t\t\t\t\tlr=lowercase__\t\t)\r\n\r\n\t\t\t# Instantiate scheduler\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= get_linear_schedule_with_warmup(\r\n\t\t\t optimizer=lowercase__\t\t\t\t,\t\t\t\t\tnum_warmup_steps=1_0_0\t\t\t\t,\t\t\t\t\tnum_training_steps=(len(lowercase__\t\t) * num_epochs)\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t# Prepare everything\r\n\t\t\t# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\r\n\t\t\t# prepare method.\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= accelerator.prepare(\r\n\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Now we train the model\r\n\t\t\tfor epoch in range(lowercase__\t\t):\r\n\t\t\t\t\t\tmodel.train()\r\n\t\t\t\t\t\twith LocalSGD(\r\n\t\t\t\t\t\t accelerator=lowercase__\t\t\t\t,\t\t\t\t\tmodel=lowercase__\t\t\t\t,\t\t\t\t\tlocal_sgd_steps=lowercase__\t\t\t\t,\t\t\t\t\tenabled=local_sgd_steps is not None\t\t) as local_sgd:\r\n\t\t\t\t\t\t\t\t\tfor step, batch in enumerate(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t# We could avoid this line since we set the accelerator with `device_placement=True`.\r\n\t\t\t\t\t\t\t\t\t\t\t\tbatch.to(accelerator.device\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t# New code #\r\n\t\t\t\t\t\t\t\t\t\t\t\t# We use the new `accumulate` context manager to perform gradient accumulation\r\n\t\t\t\t\t\t\t\t\t\t\t\t# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.\r\n\t\t\t\t\t\t\t\t\t\t\t\twith accelerator.accumulate(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= model(**lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= output.loss\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\taccelerator.backward(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toptimizer.step()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlr_scheduler.step()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toptimizer.zero_grad()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# LocalSGD-specific line\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlocal_sgd.step()\r\n\r\n\t\t\t\t\t\tmodel.eval()\r\n\t\t\t\t\t\tfor step, batch in enumerate(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\t# We could avoid this line since we set the accelerator with `device_placement=True`.\r\n\t\t\t\t\t\t\t\t\tbatch.to(accelerator.device\t\t)\r\n\t\t\t\t\t\t\t\t\twith torch.no_grad():\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= model(**lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= outputs.logits.argmax(dim=-1\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= accelerator.gather_for_metrics((predictions, batch[\"\"\"labels\"\"\"])\t\t)\r\n\t\t\t\t\t\t\t\t\tmetric.add_batch(\r\n\t\t\t\t\t\t\t\t\t predictions=lowercase__\t\t\t\t,\t\t\t\t\treferences=lowercase__\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= metric.compute()\r\n\t\t\t\t\t\t# Use accelerator.print to print only on the main process.\r\n\t\t\t\t\t\taccelerator.print(f\"\"\"epoch {epoch}:\"\"\"\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> Tuple:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= argparse.ArgumentParser(description=\"\"\"Simple example of training script.\"\"\"\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--mixed_precision\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=lowercase__\t\t\t\t,\t\t\t\t\tchoices=[\"\"\"no\"\"\", \"\"\"fp16\"\"\", \"\"\"bf16\"\"\", \"\"\"fp8\"\"\"]\t\t\t\t,\t\t\t\t\thelp=\"\"\"Whether to use mixed precision. Choose\"\"\"\r\n\t\t\t \"\"\"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\"\"\r\n\t\t\t \"\"\"and an Nvidia Ampere GPU.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\t# New Code #\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--gradient_accumulation_steps\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=1\t\t\t\t,\t\t\t\t\thelp=\"\"\"The number of minibatches to be ran before gradients are accumulated.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--local_sgd_steps\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=8\t\t\t\t,\t\t\t\t\thelp=\"\"\"Number of local SGD steps or None to disable local SGD\"\"\"\t\t)\r\n\t\t\tparser.add_argument(\"\"\"--cpu\"\"\"\t\t\t\t,\t\t\t\t\taction=\"\"\"store_true\"\"\"\t\t\t\t,\t\t\t\t\thelp=\"\"\"If passed, will train on the CPU.\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= parser.parse_args()\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= {\"\"\"lr\"\"\": 2E-5, \"\"\"num_epochs\"\"\": 3, \"\"\"seed\"\"\": 4_2, \"\"\"batch_size\"\"\": 1_6}\r\n\t\t\ttraining_function(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tmain()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":638,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...utils import logging\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'caidas/swin2sr-classicalsr-x2-64': (\r\n 'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'\r\n ),\r\n}\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Dict \t\t\t=\t\t\t\t\t\t\"swin2sr\"\r\n\r\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\t{\r\n\t\t\t \"hidden_size\": \"embed_dim\",\r\n\t\t\t \"num_attention_heads\": \"num_heads\",\r\n\t\t\t \"num_hidden_layers\": \"num_layers\",\r\n\t\t\t}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A=64\t, __A=1\t, __A=3\t, __A=180\t, __A=[6, 6, 6, 6, 6, 6]\t, __A=[6, 6, 6, 6, 6, 6]\t, __A=8\t, __A=2.0\t, __A=True\t, __A=0.0\t, __A=0.0\t, __A=0.1\t, __A=\"gelu\"\t, __A=False\t, __A=0.0_2\t, __A=1E-5\t, __A=2\t, __A=1.0\t, __A=\"1conv\"\t, __A=\"pixelshuffle\"\t, **__A\t, ) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tsuper().__init__(**__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= image_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= patch_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= num_channels\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= embed_dim\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= depths\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= len(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= num_heads\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= window_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= mlp_ratio\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= qkv_bias\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= hidden_dropout_prob\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= attention_probs_dropout_prob\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= drop_path_rate\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= hidden_act\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= use_absolute_embeddings\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= layer_norm_eps\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= initializer_range\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= upscale\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= img_range\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= resi_connection\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= upsampler\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport argparse\r\nimport json\r\nimport os\r\n\r\nimport evaluate\r\nimport torch\r\nfrom datasets import load_dataset\r\nfrom torch.optim import AdamW\r\nfrom torch.utils.data import DataLoader\r\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\r\n\r\nfrom accelerate import Accelerator, DistributedType\r\nfrom accelerate.utils.deepspeed import DummyOptim, DummyScheduler\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 16\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 32\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Accelerator\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int = 1_6\t\t\t\t,\t\t\t\t\tlowercase__\t\t: str = \"bert-base-cased\"\t\t) -> Union[str, Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoTokenizer.from_pretrained(lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= load_dataset(\"\"\"glue\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"mrpc\"\"\"\t\t)\r\n\r\n\t\t\tdef tokenize_function(lowercase__\t\t: List[str]\t\t):\r\n\t\t\t\t\t\t# max_length=None => use the model max length (it's actually the default)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokenizer(examples[\"\"\"sentence1\"\"\"]\t\t\t\t,\t\t\t\t\texamples[\"\"\"sentence2\"\"\"]\t\t\t\t,\t\t\t\t\ttruncation=lowercase__\t\t\t\t,\t\t\t\t\tmax_length=lowercase__\t\t)\r\n\t\t\t\t\t\treturn outputs\r\n\r\n\t\t\t# Apply the method we just defined to all the examples in all the splits of the dataset\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= datasets.map(\r\n\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tbatched=lowercase__\t\t\t\t,\t\t\t\t\tremove_columns=[\"\"\"idx\"\"\", \"\"\"sentence1\"\"\", \"\"\"sentence2\"\"\"]\t\t\t\t,\t\t\t\t\tload_from_cache_file=lowercase__\t\t)\r\n\r\n\t\t\t# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\r\n\t\t\t# transformers library\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenized_datasets.rename_column(\"\"\"label\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"labels\"\"\"\t\t)\r\n\r\n\t\t\tdef collate_fn(lowercase__\t\t: Union[str, Any]\t\t):\r\n\t\t\t\t\t\t# On TPU it's best to pad everything to the same length or training will be very slow.\r\n\t\t\t\t\t\tif accelerator.distributed_type == DistributedType.TPU:\r\n\t\t\t\t\t\t\t\t\treturn tokenizer.pad(lowercase__\t\t\t\t,\t\t\t\t\tpadding=\"\"\"max_length\"\"\"\t\t\t\t,\t\t\t\t\tmax_length=1_2_8\t\t\t\t,\t\t\t\t\treturn_tensors=\"\"\"pt\"\"\"\t\t)\r\n\t\t\t\t\t\treturn tokenizer.pad(lowercase__\t\t\t\t,\t\t\t\t\tpadding=\"\"\"longest\"\"\"\t\t\t\t,\t\t\t\t\treturn_tensors=\"\"\"pt\"\"\"\t\t)\r\n\r\n\t\t\t# Instantiate dataloaders.\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= DataLoader(\r\n\t\t\t tokenized_datasets[\"\"\"train\"\"\"]\t\t\t\t,\t\t\t\t\tshuffle=lowercase__\t\t\t\t,\t\t\t\t\tcollate_fn=lowercase__\t\t\t\t,\t\t\t\t\tbatch_size=lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= DataLoader(\r\n\t\t\t tokenized_datasets[\"\"\"validation\"\"\"]\t\t\t\t,\t\t\t\t\tshuffle=lowercase__\t\t\t\t,\t\t\t\t\tcollate_fn=lowercase__\t\t\t\t,\t\t\t\t\tbatch_size=lowercase__\t\t)\r\n\r\n\t\t\treturn train_dataloader, eval_dataloader\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Optional[Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Union[str, Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Tuple\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t) -> List[str]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tmodel.eval()\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= 0\r\n\t\t\tfor step, batch in enumerate(lowercase__\t\t):\r\n\t\t\t\t\t\t# We could avoid this line since we set the accelerator with `device_placement=True`.\r\n\t\t\t\t\t\tbatch.to(accelerator.device\t\t)\r\n\t\t\t\t\t\twith torch.no_grad():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= model(**lowercase__\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= outputs.logits.argmax(dim=-1\t\t)\r\n\t\t\t\t\t\t# It is slightly faster to call this once, than multiple times\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= accelerator.gather(\r\n\t\t\t\t\t\t (predictions, batch[\"\"\"labels\"\"\"])\t\t) # If we are in a multiprocess environment, the last batch has duplicates\r\n\t\t\t\t\t\tif accelerator.use_distributed:\r\n\t\t\t\t\t\t\t\t\tif step == len(lowercase__\t\t) - 1:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= predictions[: len(eval_dataloader.dataset\t\t) - samples_seen]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= references[: len(eval_dataloader.dataset\t\t) - samples_seen]\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tsamples_seen += references.shape[0]\r\n\t\t\t\t\t\tmetric.add_batch(\r\n\t\t\t\t\t\t predictions=lowercase__\t\t\t\t,\t\t\t\t\treferences=lowercase__\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= metric.compute()\r\n\t\t\treturn eval_metric[\"accuracy\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[str]\t\t) -> Any:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= Accelerator()\r\n\r\n\t\t\t# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= config[\"\"\"lr\"\"\"]\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= int(config[\"\"\"num_epochs\"\"\"]\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= int(config[\"\"\"seed\"\"\"]\t\t)\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= int(config[\"\"\"batch_size\"\"\"]\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= args.model_name_or_path\r\n\r\n\t\t\tset_seed(lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Dict \t\t\t\t\t= get_dataloaders(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Instantiate the model (we build the model here so that the seed also control new weights initialization)\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= AutoModelForSequenceClassification.from_pretrained(lowercase__\t\t\t\t,\t\t\t\t\treturn_dict=lowercase__\t\t)\r\n\r\n\t\t\t# Instantiate optimizer\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= (\r\n\t\t\t AdamW\r\n\t\t\t if accelerator.state.deepspeed_plugin is None\r\n\t\t\t or \"\"\"optimizer\"\"\" not in accelerator.state.deepspeed_plugin.deepspeed_config\r\n\t\t\t else DummyOptim\r\n\t\t\t)\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= optimizer_cls(params=model.parameters()\t\t\t\t,\t\t\t\t\tlr=lowercase__\t\t)\r\n\r\n\t\t\tif accelerator.state.deepspeed_plugin is not None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= accelerator.state.deepspeed_plugin.deepspeed_config[\r\n\t\t\t\t\t\t \"\"\"gradient_accumulation_steps\"\"\"\r\n\t\t\t\t\t\t]\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= 1\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= (len(lowercase__\t\t) * num_epochs) // gradient_accumulation_steps\r\n\r\n\t\t\t# Instantiate scheduler\r\n\t\t\tif (\r\n\t\t\t accelerator.state.deepspeed_plugin is None\r\n\t\t\t or \"scheduler\" not in accelerator.state.deepspeed_plugin.deepspeed_config\r\n\t\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= get_linear_schedule_with_warmup(\r\n\t\t\t\t\t\t optimizer=lowercase__\t\t\t\t,\t\t\t\t\tnum_warmup_steps=0\t\t\t\t,\t\t\t\t\tnum_training_steps=lowercase__\t\t\t\t,\t\t\t\t\t)\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= DummyScheduler(lowercase__\t\t\t\t,\t\t\t\t\ttotal_num_steps=lowercase__\t\t\t\t,\t\t\t\t\twarmup_num_steps=0\t\t)\r\n\r\n\t\t\t# Prepare everything\r\n\t\t\t# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\r\n\t\t\t# prepare method.\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= accelerator.prepare(\r\n\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# We need to keep track of how many total steps we have iterated over\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 0\r\n\t\t\t# We also need to keep track of the stating epoch so files are named properly\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= evaluate.load(\"\"\"glue\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"mrpc\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= num_epochs\r\n\r\n\t\t\tif args.partial_train_epoch is not None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= args.partial_train_epoch\r\n\r\n\t\t\tif args.resume_from_checkpoint:\r\n\t\t\t\t\t\taccelerator.load_state(args.resume_from_checkpoint\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= args.resume_from_checkpoint.split(\"\"\"epoch_\"\"\"\t\t)[1]\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"\"\"\"\r\n\t\t\t\t\t\tfor char in epoch_string:\r\n\t\t\t\t\t\t\t\t\tif char.isdigit():\r\n\t\t\t\t\t\t\t\t\t\t\t\tstate_epoch_num += char\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= int(lowercase__\t\t) + 1\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= evaluation_loop(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t\t\t\taccelerator.print(\"\"\"resumed checkpoint performance:\"\"\"\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t\t\t\taccelerator.print(\"\"\"resumed checkpoint's scheduler's lr:\"\"\"\t\t\t\t,\t\t\t\t\tlr_scheduler.get_lr()[0]\t\t)\r\n\t\t\t\t\t\taccelerator.print(\"\"\"resumed optimizers's lr:\"\"\"\t\t\t\t,\t\t\t\t\toptimizer.param_groups[0][\"\"\"lr\"\"\"]\t\t)\r\n\t\t\t\t\t\twith open(os.path.join(args.output_dir\t\t\t\t,\t\t\t\t\tf\"\"\"state_{starting_epoch-1}.json\"\"\"\t\t)\t\t\t\t,\t\t\t\t\t\"\"\"r\"\"\"\t\t) as f:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= json.load(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tassert resumed_state[\"accuracy\"] == accuracy, \"Accuracy mismatch, loading from checkpoint failed\"\r\n\t\t\t\t\t\t\t\t\tassert (\r\n\t\t\t\t\t\t\t\t\t resumed_state[\"lr\"] == lr_scheduler.get_lr()[0]\r\n\t\t\t\t\t\t\t\t\t), \"Scheduler learning rate mismatch, loading from checkpoint failed\"\r\n\t\t\t\t\t\t\t\t\tassert (\r\n\t\t\t\t\t\t\t\t\t resumed_state[\"optimizer_lr\"] == optimizer.param_groups[0][\"lr\"]\r\n\t\t\t\t\t\t\t\t\t), \"Optimizer learning rate mismatch, loading from checkpoint failed\"\r\n\t\t\t\t\t\t\t\t\tassert resumed_state[\"epoch\"] == starting_epoch - 1, \"Epoch mismatch, loading from checkpoint failed\"\r\n\t\t\t\t\t\t\t\t\treturn\r\n\r\n # Now we train the model\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= {}\r\n\t\t\tfor epoch in range(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t):\r\n\t\t\t\t\t\tmodel.train()\r\n\t\t\t\t\t\tfor step, batch in enumerate(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= model(**lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= outputs.loss\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= loss / gradient_accumulation_steps\r\n\t\t\t\t\t\t\t\t\taccelerator.backward(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tif step % gradient_accumulation_steps == 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\toptimizer.step()\r\n\t\t\t\t\t\t\t\t\t\t\t\tlr_scheduler.step()\r\n\t\t\t\t\t\t\t\t\t\t\t\toptimizer.zero_grad()\r\n\r\n\t\t\t\t\t\t\t\t\toverall_step += 1\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= f\"\"\"epoch_{epoch}\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= os.path.join(args.output_dir\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t\t\t\taccelerator.save_state(lowercase__\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= evaluation_loop(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= accuracy\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= lr_scheduler.get_lr()[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= optimizer.param_groups[0][\"\"\"lr\"\"\"]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= epoch\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= overall_step\r\n\t\t\t\t\t\taccelerator.print(f\"\"\"epoch {epoch}:\"\"\"\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t\t\t\taccelerator.wait_for_everyone()\r\n\t\t\t\t\t\tif accelerator.is_main_process:\r\n\t\t\t\t\t\t\t\t\twith open(os.path.join(args.output_dir\t\t\t\t,\t\t\t\t\tf\"\"\"state_{epoch}.json\"\"\"\t\t)\t\t\t\t,\t\t\t\t\t\"\"\"w\"\"\"\t\t) as f:\r\n\t\t\t\t\t\t\t\t\t\t\t\tjson.dump(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= argparse.ArgumentParser(description=\"\"\"Simple example of training script tracking peak GPU memory usage.\"\"\"\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--model_name_or_path\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=\"\"\"bert-base-cased\"\"\"\t\t\t\t,\t\t\t\t\thelp=\"\"\"Path to pretrained model or model identifier from huggingface.co/models.\"\"\"\t\t\t\t,\t\t\t\t\trequired=lowercase__\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--output_dir\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=\"\"\".\"\"\"\t\t\t\t,\t\t\t\t\thelp=\"\"\"Optional save directory where all checkpoint folders will be stored. Default is the current working directory.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--resume_from_checkpoint\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=lowercase__\t\t\t\t,\t\t\t\t\thelp=\"\"\"If the training should continue from a checkpoint folder.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--partial_train_epoch\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=lowercase__\t\t\t\t,\t\t\t\t\thelp=\"\"\"If passed, the training will stop after this number of epochs.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--num_epochs\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=2\t\t\t\t,\t\t\t\t\thelp=\"\"\"Number of train epochs.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= parser.parse_args()\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= {\"\"\"lr\"\"\": 2E-5, \"\"\"num_epochs\"\"\": args.num_epochs, \"\"\"seed\"\"\": 4_2, \"\"\"batch_size\"\"\": 1_6}\r\n\r\n\t\t\ttraining_function(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tmain()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":639,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom multiprocessing import Lock, Pipe, Process\r\n\r\n# lock used to ensure that two processes do not access a pipe at the same time\r\n__UpperCAmelCase\t\t\t\t\t\t\t= Lock()\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[str]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[str]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[str]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Union[str, Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Union[str, Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Optional[Any]\t\t) -> List[str]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tglobal process_lock\r\n\r\n\t\t\t# we perform n swaps since after n swaps we know we are sorted\r\n\t\t\t# we *could* stop early if we are sorted already, but it takes as long to\r\n\t\t\t# find out we are sorted as it does to sort the list with this algorithm\r\n\t\t\tfor i in range(0\t\t\t\t,\t\t\t\t\t1_0\t\t):\r\n\t\t\t\t\t\tif (i + position) % 2 == 0 and r_send is not None:\r\n\t\t\t\t\t\t\t\t\t# send your value to your right neighbor\r\n\t\t\t\t\t\t\t\t\tprocess_lock.acquire()\r\n\t\t\t\t\t\t\t\t\tr_send[1].send(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tprocess_lock.release()\r\n\r\n\t\t\t\t\t\t\t\t\t# receive your right neighbor's value\r\n\t\t\t\t\t\t\t\t\tprocess_lock.acquire()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= rr_cv[0].recv()\r\n\t\t\t\t\t\t\t\t\tprocess_lock.release()\r\n\r\n\t\t\t\t\t\t\t\t\t# take the lower value since you are on the left\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= min(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t\t\t\telif (i + position) % 2 != 0 and l_send is not None:\r\n\t\t\t\t\t\t\t\t\t# send your value to your left neighbor\r\n\t\t\t\t\t\t\t\t\tprocess_lock.acquire()\r\n\t\t\t\t\t\t\t\t\tl_send[1].send(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tprocess_lock.release()\r\n\r\n\t\t\t\t\t\t\t\t\t# receive your left neighbor's value\r\n\t\t\t\t\t\t\t\t\tprocess_lock.acquire()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= lr_cv[0].recv()\r\n\t\t\t\t\t\t\t\t\tprocess_lock.release()\r\n\r\n\t\t\t\t\t\t\t\t\t# take the higher value since you are on the right\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= max(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n # after all swaps are performed, send the values back to main\r\n\t\t\tresult_pipe[1].send(lowercase__\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Optional[int]\t\t) -> Optional[Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= []\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= []\r\n\t\t\t# initialize the list of pipes where the values will be retrieved\r\n\t\t\tfor _ in arr:\r\n\t\t\t\t\t\tresult_pipe.append(Pipe()\t\t)\r\n\t\t\t# creates the processes\r\n\t\t\t# the first and last process only have one neighbor so they are made outside\r\n\t\t\t# of the loop\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= Pipe()\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= Pipe()\r\n\t\t\tprocess_array_.append(\r\n\t\t\t Process(\r\n\t\t\t target=lowercase__\t\t\t\t,\t\t\t\t\targs=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0])\t\t\t\t,\t\t\t\t\t)\t\t)\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= temp_rs\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= temp_rr\r\n\r\n\t\t\tfor i in range(1\t\t\t\t,\t\t\t\t\tlen(lowercase__\t\t) - 1\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= Pipe()\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= Pipe()\r\n\t\t\t\t\t\tprocess_array_.append(\r\n\t\t\t\t\t\t Process(\r\n\t\t\t\t\t\t target=lowercase__\t\t\t\t,\t\t\t\t\targs=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i])\t\t\t\t,\t\t\t\t\t)\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= temp_rs\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= temp_rr\r\n\r\n\t\t\tprocess_array_.append(\r\n\t\t\t Process(\r\n\t\t\t target=lowercase__\t\t\t\t,\t\t\t\t\targs=(\r\n\t\t\t len(lowercase__\t\t) - 1,\r\n\t\t\t arr[len(lowercase__\t\t) - 1],\r\n\t\t\t temp_ls,\r\n\t\t\t None,\r\n\t\t\t temp_lr,\r\n\t\t\t None,\r\n\t\t\t result_pipe[len(lowercase__\t\t) - 1],\r\n\t\t\t )\t\t\t\t,\t\t\t\t\t)\t\t)\r\n\r\n\t\t\t# start the processes\r\n\t\t\tfor p in process_array_:\r\n\t\t\t\t\t\tp.start()\r\n\r\n\t\t\t# wait for the processes to end and write their values to the list\r\n\t\t\tfor p in range(0\t\t\t\t,\t\t\t\t\tlen(lowercase__\t\t)\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= result_pipe[p][0].recv()\r\n\t\t\t\t\t\tprocess_array_[p].join()\r\n\t\t\treturn arr\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> Union[str, Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= list(range(1_0\t\t\t\t,\t\t\t\t\t0\t\t\t\t,\t\t\t\t\t-1\t\t)\t\t)\r\n\t\t\tprint(\"\"\"Initial List\"\"\"\t\t)\r\n\t\t\tprint(*lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= odd_even_transposition(lowercase__\t\t)\r\n\t\t\tprint(\"\"\"Sorted List\\n\"\"\"\t\t)\r\n\t\t\tprint(*lowercase__\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tmain()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport baseaa\r\nimport io\r\nimport json\r\nimport os\r\nfrom copy import deepcopy\r\n\r\nfrom ..optimizer import AcceleratedOptimizer\r\nfrom ..scheduler import AcceleratedScheduler\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tif isinstance(__A\t, __A\t):\r\n\t\t\t\t\t\t\t\t\t# Don't modify user's data should they want to reuse it (e.g. in tests), because once we\r\n\t\t\t\t\t\t\t\t\t# modified it, it will not be accepted here again, since `auto` values would have been overridden\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= deepcopy(__A\t)\r\n\t\t\t\t\t\telif os.path.exists(__A\t):\r\n\t\t\t\t\t\t\t\t\twith io.open(__A\t, \"\"\"r\"\"\"\t, encoding=\"\"\"utf-8\"\"\"\t) as f:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= json.load(__A\t)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= baseaa.urlsafe_baadecode(__A\t).decode(\"\"\"utf-8\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= json.loads(__A\t)\r\n\t\t\t\t\t\t\t\t\texcept (UnicodeDecodeError, AttributeError, ValueError):\r\n\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\r\n\t\t\t\t\t\t\t\t\t\t\t\t f\"\"\"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= config\r\n\r\n\t\t\t\t\t\tself.set_stage_and_offload()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\t# zero stage - this is done as early as possible, before model is created, to allow\r\n\t\t\t\t\t\t# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object\r\n\t\t\t\t\t\t# during ``zero.Init()`` which needs to know the dtype, and some other hparams.\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.get_value(\"\"\"zero_optimization.stage\"\"\"\t, -1\t)\r\n\r\n\t\t\t\t\t\t# offload\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= False\r\n\t\t\t\t\t\tif self.is_zeroa() or self.is_zeroa():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= set([\"\"\"cpu\"\"\", \"\"\"nvme\"\"\"]\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= set(\r\n\t\t\t\t\t\t\t\t\t [\r\n\t\t\t\t\t\t\t\t\t self.get_value(\"\"\"zero_optimization.offload_optimizer.device\"\"\"\t),\r\n\t\t\t\t\t\t\t\t\t self.get_value(\"\"\"zero_optimization.offload_param.device\"\"\"\t),\r\n\t\t\t\t\t\t\t\t\t ]\t)\r\n\t\t\t\t\t\t\t\t\tif len(offload_devices & offload_devices_valid\t) > 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= True\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.config\r\n\r\n\t\t\t\t\t\t# find the config node of interest if it exists\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= ds_key_long.split(\"\"\".\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= nodes.pop()\r\n\t\t\t\t\t\tfor node in nodes:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= config.get(__A\t)\r\n\t\t\t\t\t\t\t\t\tif config is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\treturn None, ds_key\r\n\r\n\t\t\t\t\t\treturn config, ds_key\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=None\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.find_config_node(__A\t)\r\n\t\t\t\t\t\tif config is None:\r\n\t\t\t\t\t\t\t\t\treturn default\r\n\t\t\t\t\t\treturn config.get(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=False\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.config\r\n\r\n\t\t\t\t\t\t# find the config node of interest if it exists\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= ds_key_long.split(\"\"\".\"\"\"\t)\r\n\t\t\t\t\t\tfor node in nodes:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= config\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= config.get(__A\t)\r\n\t\t\t\t\t\t\t\t\tif config is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\tif must_exist:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(f\"\"\"Can't find {ds_key_long} entry in the config: {self.config}\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn\r\n\r\n # if found remove it\r\n\t\t\t\t\t\tif parent_config is not None:\r\n\t\t\t\t\t\t\t\t\tparent_config.pop(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.get_value(__A\t)\r\n\t\t\t\t\t\treturn False if value is None else bool(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.get_value(__A\t)\r\n\t\t\t\t\t\treturn False if value is None else not bool(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\treturn self._stage == 2\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\treturn self._stage == 3\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\treturn self._offload\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= engine\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, **__A\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\t# runs backpropagation and handles mixed precision\r\n\t\t\t\t\t\tself.engine.backward(__A\t, **__A\t)\r\n\r\n\t\t\t\t\t\t# Deepspeed's `engine.step` performs the following operations:\r\n\t\t\t\t\t\t# - gradient accumulation check\r\n\t\t\t\t\t\t# - gradient clipping\r\n\t\t\t\t\t\t# - optimizer step\r\n\t\t\t\t\t\t# - zero grad\r\n\t\t\t\t\t\t# - checking overflow\r\n\t\t\t\t\t\t# - lr_scheduler step (only if engine.lr_scheduler is not None)\r\n\t\t\t\t\t\tself.engine.step()\r\n\t\t\t\t\t\t# and this plugin overrides the above calls with no-ops when Accelerate runs under\r\n\t\t\t\t\t\t# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple\r\n\t\t\t\t\t\t# training loop that works transparently under many training regimes.\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tsuper().__init__(__A\t, device_placement=__A\t, scaler=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= hasattr(self.optimizer\t, \"\"\"overflow\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A=None\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tpass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tpass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tif self.__has_overflow__:\r\n\t\t\t\t\t\t\t\t\treturn self.optimizer.overflow\r\n\t\t\t\t\t\treturn False\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tsuper().__init__(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tpass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A=0.0_0_1\t, __A=0\t, **__A\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= params\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= lr\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= weight_decay\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= kwargs\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A=None\t, __A=0\t, **__A\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= optimizer\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= total_num_steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= warmup_num_steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= kwargs\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":640,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom collections.abc import Generator\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> Generator[int, None, None]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 0, 1\r\n\t\t\twhile True:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[str] \t\t\t\t\t= b, a + b\r\n\t\t\t\t\t\tyield b\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int = 1_0_0_0\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= 1\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= fibonacci_generator()\r\n\t\t\twhile len(str(next(lowercase__\t\t)\t\t)\t\t) < n:\r\n\t\t\t\t\t\tanswer += 1\r\n\t\t\treturn answer + 1\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tprint(solution(int(str(input()).strip())))\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n# Copyright 2023 The HuggingFace Inc. team. All rights reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\nimport numpy as np\r\nimport torch\r\n\r\nfrom ..models.clipseg import CLIPSegForImageSegmentation\r\nfrom ..utils import is_vision_available, requires_backends\r\nfrom .base import PipelineTool\r\n\r\n\r\nif is_vision_available():\r\n\t\t\t\tfrom PIL import Image\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Dict \t\t\t=\t\t\t\t\t\t(\r\n\t\t\t \"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.\"\r\n\t\t\t \"It takes two arguments named `image` which should be the original image, and `label` which should be a text \"\r\n\t\t\t \"describing the elements what should be identified in the segmentation mask. The tool returns the mask.\"\r\n\t\t\t)\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\t\"CIDAS/clipseg-rd64-refined\"\r\n\t\t\tUpperCAmelCase_ :List[Any] \t\t\t=\t\t\t\t\t\t\"image_segmenter\"\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tCLIPSegForImageSegmentation\r\n\r\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\t[\"image\", \"text\"]\r\n\t\t\tUpperCAmelCase_ :Dict \t\t\t=\t\t\t\t\t\t[\"image\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, *__A\t, **__A\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\trequires_backends(self\t, [\"\"\"vision\"\"\"]\t)\r\n\t\t\t\t\t\tsuper().__init__(*__A\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\treturn self.pre_processor(text=[label]\t, images=[image]\t, padding=__A\t, return_tensors=\"\"\"pt\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\twith torch.no_grad():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.model(**__A\t).logits\r\n\t\t\t\t\t\treturn logits\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= outputs.cpu().detach().numpy()\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 0\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= 1\r\n\t\t\t\t\t\treturn Image.fromarray((array * 255).astype(np.uinta\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":641,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport os\r\nimport time\r\nfrom dataclasses import dataclass, field\r\nfrom enum import Enum\r\nfrom typing import Dict, List, Optional, Union\r\n\r\nimport torch\r\nfrom filelock import FileLock\r\nfrom torch.utils.data import Dataset\r\n\r\nfrom ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING\r\nfrom ...tokenization_utils import PreTrainedTokenizer\r\nfrom ...utils import logging\r\nfrom ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())\r\n__UpperCAmelCase\t\t\t\t\t\t\t= tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\r\n\r\n\r\n\r\n\r\n@dataclass\r\nclass _SCREAMING_SNAKE_CASE :\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\"help\": \"Model type selected in the list: \" + \", \".join(A__\t\t\t\t\t\t\t)}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\"help\": \"The input data dir. Should contain the .json files for the SQuAD task.\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=128 ,\t\t\tmetadata={\r\n\t\t\t \"help\": (\r\n\t\t\t \"The maximum total input sequence length after tokenization. Sequences longer \"\r\n\t\t\t \"than this will be truncated, sequences shorter will be padded.\"\r\n\t\t\t )\r\n\t\t\t } ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=128 ,\t\t\tmetadata={\"help\": \"When splitting up a long document into chunks, how much stride to take between chunks.\"} ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=64 ,\t\t\tmetadata={\r\n\t\t\t \"help\": (\r\n\t\t\t \"The maximum number of tokens for the question. Questions longer than this will \"\r\n\t\t\t \"be truncated to this length.\"\r\n\t\t\t )\r\n\t\t\t } ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=30 ,\t\t\tmetadata={\r\n\t\t\t \"help\": (\r\n\t\t\t \"The maximum length of an answer that can be generated. This is needed because the start \"\r\n\t\t\t \"and end predictions are not conditioned on one another.\"\r\n\t\t\t )\r\n\t\t\t } ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :bool \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\"help\": \"Overwrite the cached training and evaluation sets\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :bool \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=A__ ,\t\t\tmetadata={\"help\": \"If true, the SQuAD examples contain some that do not have an answer.\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :float \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=0.0 ,\t\t\tmetadata={\"help\": \"If null_score - best_non_null is greater than the threshold predict null.\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=20 ,\t\t\tmetadata={\"help\": \"If null_score - best_non_null is greater than the threshold predict null.\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=0 ,\t\t\tmetadata={\r\n\t\t\t \"help\": (\r\n\t\t\t \"language id of input for language-specific xlm models (see\"\r\n\t\t\t \" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)\"\r\n\t\t\t )\r\n\t\t\t } ,\t\t\t)\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tfield(default=1 ,\t\t\tmetadata={\"help\": \"multiple threads for converting example to features\"}\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[Any] \t\t\t=\t\t\t\t\t\t\"train\"\r\n\t\t\tUpperCAmelCase_ :Dict \t\t\t=\t\t\t\t\t\t\"dev\"\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :SquadDataTrainingArguments\r\n\t\t\tUpperCAmelCase_ :List[SquadFeatures]\r\n\t\t\tUpperCAmelCase_ :Split\r\n\t\t\tUpperCAmelCase_ :bool\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A\t, __A = None\t, __A = Split.train\t, __A = False\t, __A = None\t, __A = \"pt\"\t, ) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= args\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= is_language_sensitive\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()\r\n\t\t\t\t\t\tif isinstance(__A\t, __A\t):\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= Split[mode]\r\n\t\t\t\t\t\t\t\t\texcept KeyError:\r\n\t\t\t\t\t\t\t\t\t\t\t\traise KeyError(\"\"\"mode is not a valid split name\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= mode\r\n\t\t\t\t\t\t# Load data features from cache or dataset file\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= \"\"\"v2\"\"\" if args.version_2_with_negative else \"\"\"v1\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= os.path.join(\r\n\t\t\t\t\t\t cache_dir if cache_dir is not None else args.data_dir\t, f\"\"\"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}\"\"\"\t, )\r\n\r\n\t\t\t\t\t\t# Make sure only the first process in distributed training processes the dataset,\r\n\t\t\t\t\t\t# and the others will use the cache.\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= cached_features_file + \"\"\".lock\"\"\"\r\n\t\t\t\t\t\twith FileLock(__A\t):\r\n\t\t\t\t\t\t\t\t\tif os.path.exists(__A\t) and not args.overwrite_cache:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= time.time()\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= torch.load(__A\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t# Legacy cache files have only features, while new cache files\r\n\t\t\t\t\t\t\t\t\t\t\t\t# will have dataset and examples also.\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.old_features[\"\"\"features\"\"\"]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.old_features.get(\"\"\"dataset\"\"\"\t, __A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.old_features.get(\"\"\"examples\"\"\"\t, __A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlogger.info(\r\n\t\t\t\t\t\t\t\t\t\t\t\t f\"\"\"Loading features from cached file {cached_features_file} [took %.3f s]\"\"\"\t, time.time() - start\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\tif self.dataset is None or self.examples is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.warning(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t f\"\"\"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"\"\" future run\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tif mode == Split.dev:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.processor.get_dev_examples(args.data_dir\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.processor.get_train_examples(args.data_dir\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= squad_convert_examples_to_features(\r\n\t\t\t\t\t\t\t\t\t\t\t\t examples=self.examples\t, tokenizer=__A\t, max_seq_length=args.max_seq_length\t, doc_stride=args.doc_stride\t, max_query_length=args.max_query_length\t, is_training=mode == Split.train\t, threads=args.threads\t, return_dataset=__A\t, )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= time.time()\r\n\t\t\t\t\t\t\t\t\t\t\t\ttorch.save(\r\n\t\t\t\t\t\t\t\t\t\t\t\t {\"\"\"features\"\"\": self.features, \"\"\"dataset\"\"\": self.dataset, \"\"\"examples\"\"\": self.examples}\t, __A\t, )\r\n\t\t\t\t\t\t\t\t\t\t\t\t# ^ This seems to take a lot of time so I want to investigate why and how we can improve.\r\n\t\t\t\t\t\t\t\t\t\t\t\tlogger.info(\r\n\t\t\t\t\t\t\t\t\t\t\t\t f\"\"\"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __len__( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\treturn len(self.features\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __getitem__( self\t, __A\t) ->\t\t\t\t\tDict[str, torch.Tensor]:\r\n\t\t\t\t\t\t# Convert to Tensors and build dataset\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.features[i]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.tensor(feature.input_ids\t, dtype=torch.long\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.tensor(feature.attention_mask\t, dtype=torch.long\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.tensor(feature.token_type_ids\t, dtype=torch.long\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= torch.tensor(feature.cls_index\t, dtype=torch.long\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= torch.tensor(feature.p_mask\t, dtype=torch.float\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= torch.tensor(feature.is_impossible\t, dtype=torch.float\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"input_ids\"\"\": input_ids,\r\n\t\t\t\t\t\t \"\"\"attention_mask\"\"\": attention_mask,\r\n\t\t\t\t\t\t \"\"\"token_type_ids\"\"\": token_type_ids,\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\tif self.args.model_type in [\"xlm\", \"roberta\", \"distilbert\", \"camembert\"]:\r\n\t\t\t\t\t\t\t\t\tdel inputs[\"token_type_ids\"]\r\n\r\n\t\t\t\t\t\tif self.args.model_type in [\"xlnet\", \"xlm\"]:\r\n\t\t\t\t\t\t\t\t\tinputs.update({\"\"\"cls_index\"\"\": cls_index, \"\"\"p_mask\"\"\": p_mask}\t)\r\n\t\t\t\t\t\t\t\t\tif self.args.version_2_with_negative:\r\n\t\t\t\t\t\t\t\t\t\t\t\tinputs.update({\"\"\"is_impossible\"\"\": is_impossible}\t)\r\n\t\t\t\t\t\t\t\t\tif self.is_language_sensitive:\r\n\t\t\t\t\t\t\t\t\t\t\t\tinputs.update({\"\"\"langs\"\"\": (torch.ones(input_ids.shape\t, dtype=torch.intaa\t) * self.args.lang_id)}\t)\r\n\r\n\t\t\t\t\t\tif self.mode == Split.train:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= torch.tensor(feature.start_position\t, dtype=torch.long\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.tensor(feature.end_position\t, dtype=torch.long\t)\r\n\t\t\t\t\t\t\t\t\tinputs.update({\"\"\"start_positions\"\"\": start_positions, \"\"\"end_positions\"\"\": end_positions}\t)\r\n\r\n\t\t\t\t\t\treturn inputs\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: list\t\t\t\t,\t\t\t\t\tlowercase__\t\t: list\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif index == number_of_items:\r\n\t\t\t\t\t\treturn 0\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= knapsack(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tindex + 1\t\t)\r\n\t\t\tif weights[index] <= max_weight:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= values[index] + knapsack(\r\n\t\t\t\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tmax_weight - weights[index]\t\t\t\t,\t\t\t\t\tindex + 1\t\t)\r\n\t\t\treturn max(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":642,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport os\r\nfrom shutil import copyfile\r\nfrom typing import Any, Dict, List, Optional, Tuple\r\n\r\nimport sentencepiece as spm\r\n\r\nfrom ...tokenization_utils import PreTrainedTokenizer\r\nfrom ...utils import logging\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {'vocab_file': 'spm_char.model'}\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'vocab_file': {\r\n 'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',\r\n 'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',\r\n 'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',\r\n }\r\n}\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'microsoft/speecht5_asr': 10_24,\r\n 'microsoft/speecht5_tts': 10_24,\r\n 'microsoft/speecht5_vc': 10_24,\r\n}\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Dict \t\t\t=\t\t\t\t\t\tVOCAB_FILES_NAMES\r\n\t\t\tUpperCAmelCase_ :Union[str, Any] \t\t\t=\t\t\t\t\t\tPRETRAINED_VOCAB_FILES_MAP\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tPRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\r\n\t\t\tUpperCAmelCase_ :Optional[Any] \t\t\t=\t\t\t\t\t\t[\"input_ids\", \"attention_mask\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A=\"\"\t, __A=\"\"\t, __A=\"\"\t, __A=\"\"\t, __A = None\t, **__A\t, ) ->\t\t\t\t\tNone:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= {} if sp_model_kwargs is None else sp_model_kwargs\r\n\r\n\t\t\t\t\t\tsuper().__init__(\r\n\t\t\t\t\t\t bos_token=__A\t, eos_token=__A\t, unk_token=__A\t, pad_token=__A\t, sp_model_kwargs=self.sp_model_kwargs\t, **__A\t, )\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= vocab_file\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= spm.SentencePieceProcessor(**self.sp_model_kwargs\t)\r\n\t\t\t\t\t\tself.sp_model.Load(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\treturn self.sp_model.get_piece_size()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= {self.convert_ids_to_tokens(__A\t): i for i in range(self.vocab_size\t)}\r\n\t\t\t\t\t\tvocab.update(self.added_tokens_encoder\t)\r\n\t\t\t\t\t\treturn vocab\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __getstate__( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.__dict__.copy()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= None\r\n\t\t\t\t\t\treturn state\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __setstate__( self\t, __A\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= d\r\n\r\n\t\t\t\t\t\t# for backward compatibility\r\n\t\t\t\t\t\tif not hasattr(self\t, \"\"\"sp_model_kwargs\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= {}\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= spm.SentencePieceProcessor(**self.sp_model_kwargs\t)\r\n\t\t\t\t\t\tself.sp_model.Load(self.vocab_file\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\treturn self.sp_model.encode(__A\t, out_type=__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\treturn self.sp_model.piece_to_id(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.sp_model.IdToPiece(__A\t)\r\n\t\t\t\t\t\treturn token\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= []\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"\"\"\"\r\n\t\t\t\t\t\tfor token in tokens:\r\n\t\t\t\t\t\t\t\t\t# make sure that special tokens are not decoded using sentencepiece model\r\n\t\t\t\t\t\t\t\t\tif token in self.all_special_tokens:\r\n\t\t\t\t\t\t\t\t\t\t\t\tout_string += self.sp_model.decode(__A\t) + token\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= []\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tcurrent_sub_tokens.append(__A\t)\r\n\t\t\t\t\t\tout_string += self.sp_model.decode(__A\t)\r\n\t\t\t\t\t\treturn out_string.strip()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=None\t) ->\t\t\t\t\tList[int]:\r\n\t\t\t\t\t\tif token_ids_a is None:\r\n\t\t\t\t\t\t\t\t\treturn token_ids_a + [self.eos_token_id]\r\n\t\t\t\t\t\t# We don't expect to process pairs, but leave the pair logic for API consistency\r\n\t\t\t\t\t\treturn token_ids_a + token_ids_a + [self.eos_token_id]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A = None\t, __A = False\t) ->\t\t\t\t\tList[int]:\r\n\t\t\t\t\t\tif already_has_special_tokens:\r\n\t\t\t\t\t\t\t\t\treturn super().get_special_tokens_mask(\r\n\t\t\t\t\t\t\t\t\t token_ids_a=__A\t, token_ids_a=__A\t, already_has_special_tokens=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= [1]\r\n\t\t\t\t\t\tif token_ids_a is None:\r\n\t\t\t\t\t\t\t\t\treturn ([0] * len(__A\t)) + suffix_ones\r\n\t\t\t\t\t\treturn ([0] * len(__A\t)) + ([0] * len(__A\t)) + suffix_ones\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A = None\t) ->\t\t\t\t\tTuple[str]:\r\n\t\t\t\t\t\tif not os.path.isdir(__A\t):\r\n\t\t\t\t\t\t\t\t\tlogger.error(f\"\"\"Vocabulary path ({save_directory}) should be a directory\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\treturn\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= os.path.join(\r\n\t\t\t\t\t\t __A\t, (filename_prefix + \"\"\"-\"\"\" if filename_prefix else \"\"\"\"\"\") + VOCAB_FILES_NAMES[\"\"\"vocab_file\"\"\"]\t)\r\n\r\n\t\t\t\t\t\tif os.path.abspath(self.vocab_file\t) != os.path.abspath(__A\t) and os.path.isfile(self.vocab_file\t):\r\n\t\t\t\t\t\t\t\t\tcopyfile(self.vocab_file\t, __A\t)\r\n\t\t\t\t\t\telif not os.path.isfile(self.vocab_file\t):\r\n\t\t\t\t\t\t\t\t\twith open(__A\t, \"\"\"wb\"\"\"\t) as fi:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.sp_model.serialized_model_proto()\r\n\t\t\t\t\t\t\t\t\t\t\t\tfi.write(__A\t)\r\n\r\n\t\t\t\t\t\treturn (out_vocab_file,)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom .imports import is_tqdm_available\r\n\r\n\r\nif is_tqdm_available():\r\n\t\t\t\tfrom tqdm.auto import tqdm as _tqdm\r\n\r\nfrom ..state import PartialState\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: bool = True\t\t\t\t,\t\t\t\t\t*lowercase__\t\t: Optional[int]\t\t\t\t,\t\t\t\t\t**lowercase__\t\t: str\t\t) -> Optional[Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif not is_tqdm_available():\r\n\t\t\t\t\t\traise ImportError(\"\"\"Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= False\r\n\t\t\tif main_process_only:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= PartialState().local_process_index == 0\r\n\t\t\treturn _tqdm(*lowercase__\t\t\t\t,\t\t\t\t\t**lowercase__\t\t\t\t,\t\t\t\t\tdisable=lowercase__\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":643,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom collections import OrderedDict\r\nfrom typing import Mapping\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...onnx import OnnxConfig\r\nfrom ...utils import logging\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',\r\n 'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',\r\n 'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',\r\n 'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',\r\n 'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',\r\n 'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',\r\n 'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',\r\n 'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',\r\n 'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',\r\n 'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',\r\n}\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\t\"xlm\"\r\n\t\t\tUpperCAmelCase_ :Any \t\t\t=\t\t\t\t\t\t{\r\n\t\t\t \"hidden_size\": \"emb_dim\",\r\n\t\t\t \"num_attention_heads\": \"n_heads\",\r\n\t\t\t \"num_hidden_layers\": \"n_layers\",\r\n\t\t\t \"n_words\": \"vocab_size\", # For backward compatibility\r\n\t\t\t}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A=3_0145\t, __A=2048\t, __A=12\t, __A=16\t, __A=0.1\t, __A=0.1\t, __A=True\t, __A=False\t, __A=False\t, __A=False\t, __A=1\t, __A=True\t, __A=512\t, __A=2048**-0.5\t, __A=1E-12\t, __A=0.0_2\t, __A=0\t, __A=1\t, __A=2\t, __A=3\t, __A=5\t, __A=True\t, __A=\"first\"\t, __A=True\t, __A=None\t, __A=True\t, __A=0.1\t, __A=5\t, __A=5\t, __A=0\t, __A=0\t, __A=2\t, __A=0\t, **__A\t, ) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= vocab_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= emb_dim\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= n_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= n_heads\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= dropout\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= attention_dropout\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= gelu_activation\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= sinusoidal_embeddings\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= causal\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= asm\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= n_langs\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= use_lang_emb\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= layer_norm_eps\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= bos_index\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= eos_index\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= pad_index\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= unk_index\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= mask_index\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= is_encoder\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= max_position_embeddings\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= embed_init_std\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= init_std\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= summary_type\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= summary_use_proj\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= summary_activation\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= summary_proj_to_labels\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= summary_first_dropout\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= start_n_top\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= end_n_top\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= mask_token_id\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= lang_id\r\n\r\n\t\t\t\t\t\tif \"n_words\" in kwargs:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= kwargs[\"\"\"n_words\"\"\"]\r\n\r\n\t\t\t\t\t\tsuper().__init__(pad_token_id=__A\t, bos_token_id=__A\t, **__A\t)\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tMapping[str, Mapping[int, str]]:\r\n\t\t\t\t\t\tif self.task == \"multiple-choice\":\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {0: \"\"\"batch\"\"\", 1: \"\"\"choice\"\"\", 2: \"\"\"sequence\"\"\"}\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= {0: \"\"\"batch\"\"\", 1: \"\"\"sequence\"\"\"}\r\n\t\t\t\t\t\treturn OrderedDict(\r\n\t\t\t\t\t\t [\r\n\t\t\t\t\t\t (\"\"\"input_ids\"\"\", dynamic_axis),\r\n\t\t\t\t\t\t (\"\"\"attention_mask\"\"\", dynamic_axis),\r\n\t\t\t\t\t\t (\"\"\"token_type_ids\"\"\", dynamic_axis),\r\n\t\t\t\t\t\t ]\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport importlib\r\nimport json\r\nimport os\r\nimport sys\r\nimport tempfile\r\nimport unittest\r\nfrom pathlib import Path\r\n\r\nimport transformers\r\nimport transformers.models.auto\r\nfrom transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig\r\nfrom transformers.models.bert.configuration_bert import BertConfig\r\nfrom transformers.models.roberta.configuration_roberta import RobertaConfig\r\nfrom transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir\r\n\r\n\r\nsys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))\r\n\r\nfrom test_module.custom_configuration import CustomConfig # noqa E402\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= get_tests_dir('fixtures/dummy-config.json')\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 0\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tself.assertIsNotNone(transformers.models.auto.__spec__\t)\r\n\t\t\t\t\t\tself.assertIsNotNone(importlib.util.find_spec(\"\"\"transformers.models.auto\"\"\"\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"bert-base-uncased\"\"\"\t)\r\n\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= AutoConfig.from_pretrained(__A\t)\r\n\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= AutoConfig.from_pretrained(__A\t)\r\n\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= AutoConfig.for_model(\"\"\"roberta\"\"\"\t)\r\n\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmp_dir:\r\n\t\t\t\t\t\t\t\t\t# This model name contains bert and roberta, but roberta ends up being picked.\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= os.path.join(__A\t, \"\"\"fake-roberta\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\tos.makedirs(__A\t, exist_ok=__A\t)\r\n\t\t\t\t\t\t\t\t\twith open(os.path.join(__A\t, \"\"\"config.json\"\"\"\t)\t, \"\"\"w\"\"\"\t) as f:\r\n\t\t\t\t\t\t\t\t\t\t\t\tf.write(json.dumps({}\t)\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= AutoConfig.from_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(type(__A\t)\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\tAutoConfig.register(\"\"\"custom\"\"\"\t, __A\t)\r\n\t\t\t\t\t\t\t\t\t# Wrong model type will raise an error\r\n\t\t\t\t\t\t\t\t\twith self.assertRaises(__A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tAutoConfig.register(\"\"\"model\"\"\"\t, __A\t)\r\n\t\t\t\t\t\t\t\t\t# Trying to register something existing in the Transformers library will raise an error\r\n\t\t\t\t\t\t\t\t\twith self.assertRaises(__A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tAutoConfig.register(\"\"\"bert\"\"\"\t, __A\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Now that the config is registered, it can be used as any other config with the auto-API\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= CustomConfig()\r\n\t\t\t\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmp_dir:\r\n\t\t\t\t\t\t\t\t\t\t\t\tconfig.save_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= AutoConfig.from_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\t\t\t\t\t\tfinally:\r\n\t\t\t\t\t\t\t\t\tif \"custom\" in CONFIG_MAPPING._extra_content:\r\n\t\t\t\t\t\t\t\t\t\t\t\tdel CONFIG_MAPPING._extra_content[\"custom\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\twith self.assertRaisesRegex(\r\n\t\t\t\t\t\t __A\t, \"\"\"bert-base is not a local folder and is not a valid model identifier\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"bert-base\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\twith self.assertRaisesRegex(\r\n\t\t\t\t\t\t __A\t, r\"\"\"aaaaaa is not a valid git identifier \\(branch name, tag name or commit id\\)\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= AutoConfig.from_pretrained(__A\t, revision=\"\"\"aaaaaa\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\twith self.assertRaisesRegex(\r\n\t\t\t\t\t\t __A\t, \"\"\"hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.\"\"\"\t, ):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/no-config-test-repo\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\t# If remote code is not set, we will time out when asking whether to load the model.\r\n\t\t\t\t\t\twith self.assertRaises(__A\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t)\r\n\t\t\t\t\t\t# If remote code is disabled, we can't load this config.\r\n\t\t\t\t\t\twith self.assertRaises(__A\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t, trust_remote_code=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t, trust_remote_code=__A\t)\r\n\t\t\t\t\t\tself.assertEqual(config.__class__.__name__\t, \"\"\"NewModelConfig\"\"\"\t)\r\n\r\n\t\t\t\t\t\t# Test config can be reloaded.\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmp_dir:\r\n\t\t\t\t\t\t\t\t\tconfig.save_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= AutoConfig.from_pretrained(__A\t, trust_remote_code=__A\t)\r\n\t\t\t\t\t\tself.assertEqual(reloaded_config.__class__.__name__\t, \"\"\"NewModelConfig\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\t\t\t\t\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\t\"new-model\"\r\n\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\tAutoConfig.register(\"\"\"new-model\"\"\"\t, __A\t)\r\n\t\t\t\t\t\t\t\t\t# If remote code is not set, the default is to use local\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(config.__class__.__name__\t, \"\"\"NewModelConfigLocal\"\"\"\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# If remote code is disabled, we load the local one.\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t, trust_remote_code=__A\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(config.__class__.__name__\t, \"\"\"NewModelConfigLocal\"\"\"\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# If remote is enabled, we load from the Hub\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t, trust_remote_code=__A\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(config.__class__.__name__\t, \"\"\"NewModelConfig\"\"\"\t)\r\n\r\n\t\t\t\t\t\tfinally:\r\n\t\t\t\t\t\t\t\t\tif \"new-model\" in CONFIG_MAPPING._extra_content:\r\n\t\t\t\t\t\t\t\t\t\t\t\tdel CONFIG_MAPPING._extra_content[\"new-model\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":644,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport json\r\nimport os\r\nimport unittest\r\n\r\nfrom transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (\r\n VOCAB_FILES_NAMES,\r\n GPTSanJapaneseTokenizer,\r\n)\r\nfrom transformers.testing_utils import require_tokenizers, slow\r\n\r\nfrom ...test_tokenization_common import TokenizerTesterMixin\r\n\r\n\r\n\r\n\r\n@require_tokenizers\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tGPTSanJapaneseTokenizer\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tFalse\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\t{\"do_clean_text\": False, \"add_prefix_space\": False}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tsuper().setUp()\r\n\r\n\t\t\t\t\t\t# fmt: off\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= [\"\"\"ใ“ใ‚“\"\"\", \"\"\"ใ“ใ‚“ใซ\"\"\", \"\"\"ใซใกใฏ\"\"\", \"\"\"ใฐใ‚“ใฏ\"\"\", \"\"\"ไธ–็•Œ,ใ”บ็•Œ\"\"\", \"\"\"ใ€\"\"\", \"\"\"ใ€‚\"\"\", \"\"\"
\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"<|emoji1|>\"\"\", \"\"\"\"\"\", \"\"\"<|bagoftoken|>\"\"\", \"\"\"<|endoftext|>\"\"\"]\r\n\t\t\t\t\t\t# fmt: on\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {\"\"\"emoji\"\"\": {\"\"\"\\ud83d\\ude00\"\"\": \"\"\"<|emoji1|>\"\"\"}, \"\"\"emoji_inv\"\"\": {\"\"\"<|emoji1|>\"\"\": \"\"\"\\ud83d\\ude00\"\"\"}} # ๐Ÿ˜€\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= {\"\"\"unk_token\"\"\": \"\"\"\"\"\"}\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= os.path.join(self.tmpdirname\t, VOCAB_FILES_NAMES[\"\"\"vocab_file\"\"\"]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= os.path.join(self.tmpdirname\t, VOCAB_FILES_NAMES[\"\"\"emoji_file\"\"\"]\t)\r\n\t\t\t\t\t\twith open(self.vocab_file\t, \"\"\"w\"\"\"\t, encoding=\"\"\"utf-8\"\"\"\t) as vocab_writer:\r\n\t\t\t\t\t\t\t\t\tvocab_writer.write(\"\"\"\"\"\".join([x + \"\"\"\\n\"\"\" for x in vocab_tokens]\t)\t)\r\n\t\t\t\t\t\twith open(self.emoji_file\t, \"\"\"w\"\"\"\t) as emoji_writer:\r\n\t\t\t\t\t\t\t\t\temoji_writer.write(json.dumps(__A\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, **__A\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tkwargs.update(self.special_tokens_map\t)\r\n\t\t\t\t\t\treturn GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \\nใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \\nใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\t\t\t\t\t\treturn input_text, output_text\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.get_input_output_texts(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer.encode(__A\t, add_special_tokens=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokenizer.decode(__A\t, clean_up_tokenization_spaces=__A\t)\r\n\t\t\t\t\t\treturn text, ids\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tpass # TODO add if relevant\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tpass # TODO add if relevant\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tpass # TODO add if relevant\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\t# Testing tokenization\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ€€ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= [\"\"\"ใ“ใ‚“\"\"\", \"\"\"ใซใกใฏ\"\"\", \"\"\"ใ€\"\"\", \"\"\"ไธ–็•Œ\"\"\", \"\"\"ใ€‚\"\"\", \"\"\"\"\"\", \"\"\"ใ“ใ‚“\"\"\", \"\"\"ใฐใ‚“ใฏ\"\"\", \"\"\"ใ€\"\"\", \"\"\"ใ”บ็•Œ\"\"\", \"\"\"ใ€‚\"\"\"]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= tokenizer.tokenize(__A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\t\t\t\t\t\t# Testing conversion to ids without special tokens\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer.convert_tokens_to_ids(__A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\t\t\t\t\t\t# Testing conversion to ids with special tokens\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= tokens + [tokenizer.unk_token]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= tokenizer.convert_tokens_to_ids(__A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\t# Testing tokenization\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€<|bagoftoken|>ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€<|bagoftoken|>ใ”บ็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokenizer.encode(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer.decode(__A\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.tokenizer_class.from_pretrained(\"\"\"Tanrei/GPTSAN-japanese\"\"\"\t)\r\n\r\n\t\t\t\t\t\t# Testing tokenization\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= \"\"\"ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tokenizer.encode(prefix_text + input_text\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer.encode(\"\"\"\"\"\"\t, prefix_text=prefix_text + input_text\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer.encode(__A\t, prefix_text=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer.decode(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer.decode(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= tokenizer.decode(__A\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.tokenizer_class.from_pretrained(\"\"\"Tanrei/GPTSAN-japanese\"\"\"\t)\r\n\r\n\t\t\t\t\t\t# Testing tokenization\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= len(tokenizer.encode(__A\t)\t) - 2\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= len(tokenizer.encode(__A\t)\t) - 2\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [1] + [0] * (len_prefix + len_text + 1)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= [1] * (len_prefix + len_text + 1) + [0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= [1] + [1] * (len_prefix) + [0] * (len_text + 1)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tokenizer(prefix_text + input_text\t).token_type_ids\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer(\"\"\"\"\"\"\t, prefix_text=prefix_text + input_text\t).token_type_ids\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tokenizer(__A\t, prefix_text=__A\t).token_type_ids\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.tokenizer_class.from_pretrained(\"\"\"Tanrei/GPTSAN-japanese\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer.encode(\"\"\"ใ‚ใƒณใ„ใƒฏ\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tokenizer.encode(\"\"\"\"\"\"\t, prefix_text=\"\"\"ใ‚ใƒณใ„ใƒฏ\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer.encode(\"\"\"ใ„ใƒฏ\"\"\"\t, prefix_text=\"\"\"ใ‚ใƒณ\"\"\"\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(tokenizer.decode(__A\t)\t, tokenizer.decode(__A\t)\t)\r\n\t\t\t\t\t\tself.assertEqual(tokenizer.decode(__A\t)\t, tokenizer.decode(__A\t)\t)\r\n\t\t\t\t\t\tself.assertNotEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertNotEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertEqual(x_token_a[1]\t, x_token_a[-1]\t) # SEG token\r\n\t\t\t\t\t\tself.assertEqual(x_token_a[1]\t, x_token_a[3]\t) # SEG token\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.tokenizer_class.from_pretrained(\"\"\"Tanrei/GPTSAN-japanese\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [[\"\"\"ๆญฆ็”ฐไฟก็Ž„\"\"\", \"\"\"ใฏใ€\"\"\"], [\"\"\"็น”็”ฐไฟก้•ท\"\"\", \"\"\"ใฎ้…ไธ‹ใฎใ€\"\"\"]]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer(__A\t, padding=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= tokenizer.batch_encode_plus(__A\t, padding=__A\t)\r\n\r\n\t\t\t\t\t\t# fmt: off\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]\r\n\t\t\t\t\t\t# fmt: on\r\n\t\t\t\t\t\tself.assertListEqual(x_token.input_ids\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token.token_type_ids\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token.attention_mask\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token_a.input_ids\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token_a.token_type_ids\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token_a.attention_mask\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\t# Intentionally convert some words to accommodate character fluctuations unique to Japanese\r\n\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\t# tokenizer has no padding token\r\n\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport json\r\nimport os\r\nimport unittest\r\n\r\nfrom transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (\r\n VOCAB_FILES_NAMES,\r\n GPTSanJapaneseTokenizer,\r\n)\r\nfrom transformers.testing_utils import require_tokenizers, slow\r\n\r\nfrom ...test_tokenization_common import TokenizerTesterMixin\r\n\r\n\r\n\r\n\r\n@require_tokenizers\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tGPTSanJapaneseTokenizer\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tFalse\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\t{\"do_clean_text\": False, \"add_prefix_space\": False}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tsuper().setUp()\r\n\r\n\t\t\t\t\t\t# fmt: off\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= [\"\"\"ใ“ใ‚“\"\"\", \"\"\"ใ“ใ‚“ใซ\"\"\", \"\"\"ใซใกใฏ\"\"\", \"\"\"ใฐใ‚“ใฏ\"\"\", \"\"\"ไธ–็•Œ,ใ”บ็•Œ\"\"\", \"\"\"ใ€\"\"\", \"\"\"ใ€‚\"\"\", \"\"\"
\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"<|emoji1|>\"\"\", \"\"\"\"\"\", \"\"\"<|bagoftoken|>\"\"\", \"\"\"<|endoftext|>\"\"\"]\r\n\t\t\t\t\t\t# fmt: on\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {\"\"\"emoji\"\"\": {\"\"\"\\ud83d\\ude00\"\"\": \"\"\"<|emoji1|>\"\"\"}, \"\"\"emoji_inv\"\"\": {\"\"\"<|emoji1|>\"\"\": \"\"\"\\ud83d\\ude00\"\"\"}} # ๐Ÿ˜€\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= {\"\"\"unk_token\"\"\": \"\"\"\"\"\"}\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= os.path.join(self.tmpdirname\t, VOCAB_FILES_NAMES[\"\"\"vocab_file\"\"\"]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= os.path.join(self.tmpdirname\t, VOCAB_FILES_NAMES[\"\"\"emoji_file\"\"\"]\t)\r\n\t\t\t\t\t\twith open(self.vocab_file\t, \"\"\"w\"\"\"\t, encoding=\"\"\"utf-8\"\"\"\t) as vocab_writer:\r\n\t\t\t\t\t\t\t\t\tvocab_writer.write(\"\"\"\"\"\".join([x + \"\"\"\\n\"\"\" for x in vocab_tokens]\t)\t)\r\n\t\t\t\t\t\twith open(self.emoji_file\t, \"\"\"w\"\"\"\t) as emoji_writer:\r\n\t\t\t\t\t\t\t\t\temoji_writer.write(json.dumps(__A\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, **__A\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tkwargs.update(self.special_tokens_map\t)\r\n\t\t\t\t\t\treturn GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \\nใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \\nใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\t\t\t\t\t\treturn input_text, output_text\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.get_input_output_texts(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer.encode(__A\t, add_special_tokens=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokenizer.decode(__A\t, clean_up_tokenization_spaces=__A\t)\r\n\t\t\t\t\t\treturn text, ids\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tpass # TODO add if relevant\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tpass # TODO add if relevant\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tpass # TODO add if relevant\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\t# Testing tokenization\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ€€ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= [\"\"\"ใ“ใ‚“\"\"\", \"\"\"ใซใกใฏ\"\"\", \"\"\"ใ€\"\"\", \"\"\"ไธ–็•Œ\"\"\", \"\"\"ใ€‚\"\"\", \"\"\"\"\"\", \"\"\"ใ“ใ‚“\"\"\", \"\"\"ใฐใ‚“ใฏ\"\"\", \"\"\"ใ€\"\"\", \"\"\"ใ”บ็•Œ\"\"\", \"\"\"ใ€‚\"\"\"]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= tokenizer.tokenize(__A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\t\t\t\t\t\t# Testing conversion to ids without special tokens\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer.convert_tokens_to_ids(__A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\t\t\t\t\t\t# Testing conversion to ids with special tokens\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= tokens + [tokenizer.unk_token]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= tokenizer.convert_tokens_to_ids(__A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\t# Testing tokenization\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€<|bagoftoken|>ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€<|bagoftoken|>ใ”บ็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokenizer.encode(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer.decode(__A\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.tokenizer_class.from_pretrained(\"\"\"Tanrei/GPTSAN-japanese\"\"\"\t)\r\n\r\n\t\t\t\t\t\t# Testing tokenization\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= \"\"\"ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tokenizer.encode(prefix_text + input_text\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer.encode(\"\"\"\"\"\"\t, prefix_text=prefix_text + input_text\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer.encode(__A\t, prefix_text=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer.decode(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer.decode(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= tokenizer.decode(__A\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.tokenizer_class.from_pretrained(\"\"\"Tanrei/GPTSAN-japanese\"\"\"\t)\r\n\r\n\t\t\t\t\t\t# Testing tokenization\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= len(tokenizer.encode(__A\t)\t) - 2\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= len(tokenizer.encode(__A\t)\t) - 2\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [1] + [0] * (len_prefix + len_text + 1)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= [1] * (len_prefix + len_text + 1) + [0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= [1] + [1] * (len_prefix) + [0] * (len_text + 1)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tokenizer(prefix_text + input_text\t).token_type_ids\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer(\"\"\"\"\"\"\t, prefix_text=prefix_text + input_text\t).token_type_ids\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tokenizer(__A\t, prefix_text=__A\t).token_type_ids\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.tokenizer_class.from_pretrained(\"\"\"Tanrei/GPTSAN-japanese\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer.encode(\"\"\"ใ‚ใƒณใ„ใƒฏ\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tokenizer.encode(\"\"\"\"\"\"\t, prefix_text=\"\"\"ใ‚ใƒณใ„ใƒฏ\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer.encode(\"\"\"ใ„ใƒฏ\"\"\"\t, prefix_text=\"\"\"ใ‚ใƒณ\"\"\"\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(tokenizer.decode(__A\t)\t, tokenizer.decode(__A\t)\t)\r\n\t\t\t\t\t\tself.assertEqual(tokenizer.decode(__A\t)\t, tokenizer.decode(__A\t)\t)\r\n\t\t\t\t\t\tself.assertNotEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertNotEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertEqual(x_token_a[1]\t, x_token_a[-1]\t) # SEG token\r\n\t\t\t\t\t\tself.assertEqual(x_token_a[1]\t, x_token_a[3]\t) # SEG token\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.tokenizer_class.from_pretrained(\"\"\"Tanrei/GPTSAN-japanese\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [[\"\"\"ๆญฆ็”ฐไฟก็Ž„\"\"\", \"\"\"ใฏใ€\"\"\"], [\"\"\"็น”็”ฐไฟก้•ท\"\"\", \"\"\"ใฎ้…ไธ‹ใฎใ€\"\"\"]]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer(__A\t, padding=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= tokenizer.batch_encode_plus(__A\t, padding=__A\t)\r\n\r\n\t\t\t\t\t\t# fmt: off\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]\r\n\t\t\t\t\t\t# fmt: on\r\n\t\t\t\t\t\tself.assertListEqual(x_token.input_ids\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token.token_type_ids\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token.attention_mask\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token_a.input_ids\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token_a.token_type_ids\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token_a.attention_mask\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\t# Intentionally convert some words to accommodate character fluctuations unique to Japanese\r\n\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\t# tokenizer has no padding token\r\n\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":645,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t) -> str:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif a < 0 or b < 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"the value of both inputs must be positive\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= str(bin(lowercase__\t\t)\t\t)[2:] # remove the leading \"0b\"\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= str(bin(lowercase__\t\t)\t\t)[2:]\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= max(len(lowercase__\t\t)\t\t\t\t,\t\t\t\t\tlen(lowercase__\t\t)\t\t)\r\n\t\t\treturn \"0b\" + \"\".join(\r\n\t\t\t str(int(\"\"\"1\"\"\" in (char_a, char_b)\t\t)\t\t)\r\n\t\t\t for char_a, char_b in zip(a_binary.zfill(lowercase__\t\t)\t\t\t\t,\t\t\t\t\tb_binary.zfill(lowercase__\t\t)\t\t)\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nfrom sklearn.linear_model import LinearRegression\r\n\r\n# Splitting the dataset into the Training set and Test set\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# Fitting Polynomial Regression to the dataset\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\n\r\n# Importing the dataset\r\n__UpperCAmelCase\t\t\t\t\t\t\t= pd.read_csv(\r\n 'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'\r\n 'position_salaries.csv'\r\n)\r\n__UpperCAmelCase\t\t\t\t\t\t\t= dataset.iloc[:, 1:2].values\r\n__UpperCAmelCase\t\t\t\t\t\t\t= dataset.iloc[:, 2].values\r\n\r\n\r\n__UpperCAmelCase\t\t\t,\t\t\t__UpperCAmelCase\t\t\t,\t\t\t__UpperCAmelCase\t\t\t,\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= train_test_split(X, y, test_size=0.2, random_state=0)\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= PolynomialFeatures(degree=4)\r\n__UpperCAmelCase\t\t\t\t\t\t\t= poly_reg.fit_transform(X)\r\n__UpperCAmelCase\t\t\t\t\t\t\t= LinearRegression()\r\npol_reg.fit(X_poly, y)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> str:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tplt.scatter(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tcolor=\"\"\"red\"\"\"\t\t)\r\n\t\t\tplt.plot(lowercase__\t\t\t\t,\t\t\t\t\tpol_reg.predict(poly_reg.fit_transform(lowercase__\t\t)\t\t)\t\t\t\t,\t\t\t\t\tcolor=\"\"\"blue\"\"\"\t\t)\r\n\t\t\tplt.title(\"\"\"Truth or Bluff (Linear Regression)\"\"\"\t\t)\r\n\t\t\tplt.xlabel(\"\"\"Position level\"\"\"\t\t)\r\n\t\t\tplt.ylabel(\"\"\"Salary\"\"\"\t\t)\r\n\t\t\tplt.show()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tviz_polymonial()\r\n\r\n\t\t\t\t# Predicting a new result with Polymonial Regression\r\n\t\t\t\tpol_reg.predict(poly_reg.fit_transform([[5.5]]))\r\n\t\t\t\t# output should be 132148.43750003\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":646,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport unittest\r\n\r\nfrom transformers import load_tool\r\n\r\nfrom .test_tools_common import ToolTesterMixin\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase ,\t\t\tA__\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= load_tool(\"\"\"text-classification\"\"\"\t)\r\n\t\t\t\t\t\tself.tool.setup()\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= load_tool(\"\"\"text-classification\"\"\"\t, remote=__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.tool(\"\"\"That's quite cool\"\"\"\t, [\"\"\"positive\"\"\", \"\"\"negative\"\"\"]\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, \"\"\"positive\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.remote_tool(\"\"\"That's quite cool\"\"\"\t, [\"\"\"positive\"\"\", \"\"\"negative\"\"\"]\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, \"\"\"positive\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.tool(text=\"\"\"That's quite cool\"\"\"\t, labels=[\"\"\"positive\"\"\", \"\"\"negative\"\"\"]\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, \"\"\"positive\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.remote_tool(text=\"\"\"That's quite cool\"\"\"\t, labels=[\"\"\"positive\"\"\", \"\"\"negative\"\"\"]\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, \"\"\"positive\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 1.6021e-19 # units = C\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: float\t\t\t\t,\t\t\t\t\tlowercase__\t\t: float\t\t\t\t,\t\t\t\t\tlowercase__\t\t: float\t\t\t\t,\t\t\t\t\t) -> tuple[str, float]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif (conductivity, electron_conc, mobility).count(0\t\t) != 1:\r\n\t\t\t\t\t\traise ValueError(\"\"\"You cannot supply more or less than 2 values\"\"\"\t\t)\r\n\t\t\telif conductivity < 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"Conductivity cannot be negative\"\"\"\t\t)\r\n\t\t\telif electron_conc < 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"Electron concentration cannot be negative\"\"\"\t\t)\r\n\t\t\telif mobility < 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"mobility cannot be negative\"\"\"\t\t)\r\n\t\t\telif conductivity == 0:\r\n\t\t\t\t\t\treturn (\r\n\t\t\t\t\t\t \"conductivity\",\r\n\t\t\t\t\t\t mobility * electron_conc * ELECTRON_CHARGE,\r\n\t\t\t\t\t\t)\r\n\t\t\telif electron_conc == 0:\r\n\t\t\t\t\t\treturn (\r\n\t\t\t\t\t\t \"electron_conc\",\r\n\t\t\t\t\t\t conductivity / (mobility * ELECTRON_CHARGE),\r\n\t\t\t\t\t\t)\r\n\t\t\telse:\r\n\t\t\t\t\t\treturn (\r\n\t\t\t\t\t\t \"mobility\",\r\n\t\t\t\t\t\t conductivity / (electron_conc * ELECTRON_CHARGE),\r\n\t\t\t\t\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":647,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport json\r\nimport os\r\nfrom functools import lru_cache\r\nfrom typing import List, Optional, Tuple\r\n\r\nimport regex as re\r\n\r\nfrom ...tokenization_utils import AddedToken, PreTrainedTokenizer\r\nfrom ...utils import logging\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'vocab_file': {\r\n 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',\r\n 'allenai/longformer-large-4096': (\r\n 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'\r\n ),\r\n 'allenai/longformer-large-4096-finetuned-triviaqa': (\r\n 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'\r\n ),\r\n 'allenai/longformer-base-4096-extra.pos.embd.only': (\r\n 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'\r\n ),\r\n 'allenai/longformer-large-4096-extra.pos.embd.only': (\r\n 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'\r\n ),\r\n },\r\n 'merges_file': {\r\n 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',\r\n 'allenai/longformer-large-4096': (\r\n 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'\r\n ),\r\n 'allenai/longformer-large-4096-finetuned-triviaqa': (\r\n 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'\r\n ),\r\n 'allenai/longformer-base-4096-extra.pos.embd.only': (\r\n 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'\r\n ),\r\n 'allenai/longformer-large-4096-extra.pos.embd.only': (\r\n 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'\r\n ),\r\n },\r\n}\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'allenai/longformer-base-4096': 40_96,\r\n 'allenai/longformer-large-4096': 40_96,\r\n 'allenai/longformer-large-4096-finetuned-triviaqa': 40_96,\r\n 'allenai/longformer-base-4096-extra.pos.embd.only': 40_96,\r\n 'allenai/longformer-large-4096-extra.pos.embd.only': 40_96,\r\n}\r\n\r\n\r\n\r\n\r\n\r\n\r\n@lru_cache()\r\n# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> List[str]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= (\r\n\t\t\t list(range(ord(\"\"\"!\"\"\"\t\t)\t\t\t\t,\t\t\t\t\tord(\"\"\"~\"\"\"\t\t) + 1\t\t)\t\t) + list(range(ord(\"\"\"ยก\"\"\"\t\t)\t\t\t\t,\t\t\t\t\tord(\"\"\"ยฌ\"\"\"\t\t) + 1\t\t)\t\t) + list(range(ord(\"\"\"ยฎ\"\"\"\t\t)\t\t\t\t,\t\t\t\t\tord(\"\"\"รฟ\"\"\"\t\t) + 1\t\t)\t\t)\r\n\t\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= bs[:]\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 0\r\n\t\t\tfor b in range(2**8\t\t):\r\n\t\t\t\t\t\tif b not in bs:\r\n\t\t\t\t\t\t\t\t\tbs.append(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tcs.append(2**8 + n\t\t)\r\n\t\t\t\t\t\t\t\t\tn += 1\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= [chr(lowercase__\t\t) for n in cs]\r\n\t\t\treturn dict(zip(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: List[str]\t\t) -> List[str]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= set()\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= word[0]\r\n\t\t\tfor char in word[1:]:\r\n\t\t\t\t\t\tpairs.add((prev_char, char)\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= char\r\n\t\t\treturn pairs\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tVOCAB_FILES_NAMES\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\tPRETRAINED_VOCAB_FILES_MAP\r\n\t\t\tUpperCAmelCase_ :Dict \t\t\t=\t\t\t\t\t\tPRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\r\n\t\t\tUpperCAmelCase_ :Optional[Any] \t\t\t=\t\t\t\t\t\t[\"input_ids\", \"attention_mask\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A\t, __A=\"replace\"\t, __A=\"\"\t, __A=\"\"\t, __A=\"
\"\t, __A=\"\"\t, __A=\"\"\t, __A=\"\"\t, __A=\"\"\t, __A=False\t, **__A\t, ) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= AddedToken(__A\t, lstrip=__A\t, rstrip=__A\t) if isinstance(__A\t, __A\t) else bos_token\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= AddedToken(__A\t, lstrip=__A\t, rstrip=__A\t) if isinstance(__A\t, __A\t) else eos_token\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AddedToken(__A\t, lstrip=__A\t, rstrip=__A\t) if isinstance(__A\t, __A\t) else sep_token\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AddedToken(__A\t, lstrip=__A\t, rstrip=__A\t) if isinstance(__A\t, __A\t) else cls_token\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= AddedToken(__A\t, lstrip=__A\t, rstrip=__A\t) if isinstance(__A\t, __A\t) else unk_token\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= AddedToken(__A\t, lstrip=__A\t, rstrip=__A\t) if isinstance(__A\t, __A\t) else pad_token\r\n\r\n\t\t\t\t\t\t# Mask token behave like a normal word, i.e. include the space before it\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= AddedToken(__A\t, lstrip=__A\t, rstrip=__A\t) if isinstance(__A\t, __A\t) else mask_token\r\n\r\n\t\t\t\t\t\tsuper().__init__(\r\n\t\t\t\t\t\t errors=__A\t, bos_token=__A\t, eos_token=__A\t, unk_token=__A\t, sep_token=__A\t, cls_token=__A\t, pad_token=__A\t, mask_token=__A\t, add_prefix_space=__A\t, **__A\t, )\r\n\r\n\t\t\t\t\t\twith open(__A\t, encoding=\"\"\"utf-8\"\"\"\t) as vocab_handle:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= json.load(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= {v: k for k, v in self.encoder.items()}\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= errors # how to handle errors in decoding\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= bytes_to_unicode()\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {v: k for k, v in self.byte_encoder.items()}\r\n\t\t\t\t\t\twith open(__A\t, encoding=\"\"\"utf-8\"\"\"\t) as merges_handle:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= merges_handle.read().split(\"\"\"\\n\"\"\"\t)[1:-1]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= [tuple(merge.split()\t) for merge in bpe_merges]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= dict(zip(__A\t, range(len(__A\t)\t)\t)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= {}\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= add_prefix_space\r\n\r\n\t\t\t\t\t\t# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= re.compile(r\"\"\"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\treturn len(self.encoder\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\treturn dict(self.encoder\t, **self.added_tokens_encoder\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tif token in self.cache:\r\n\t\t\t\t\t\t\t\t\treturn self.cache[token]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tuple(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= get_pairs(__A\t)\r\n\r\n\t\t\t\t\t\tif not pairs:\r\n\t\t\t\t\t\t\t\t\treturn token\r\n\r\n\t\t\t\t\t\twhile True:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= min(__A\t, key=lambda __A\t: self.bpe_ranks.get(__A\t, float(\"\"\"inf\"\"\"\t)\t)\t)\r\n\t\t\t\t\t\t\t\t\tif bigram not in self.bpe_ranks:\r\n\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Dict \t\t\t\t\t= bigram\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= []\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= 0\r\n\t\t\t\t\t\t\t\t\twhile i < len(__A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= word.index(__A\t, __A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\texcept ValueError:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnew_word.extend(word[i:]\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnew_word.extend(word[i:j]\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= j\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\tif word[i] == first and i < len(__A\t) - 1 and word[i + 1] == second:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnew_word.append(first + second\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ti += 2\r\n\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnew_word.append(word[i]\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ti += 1\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= tuple(__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= new_word\r\n\t\t\t\t\t\t\t\t\tif len(__A\t) == 1:\r\n\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= get_pairs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= \"\"\" \"\"\".join(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= word\r\n\t\t\t\t\t\treturn word\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= []\r\n\t\t\t\t\t\tfor token in re.findall(self.pat\t, __A\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"\"\"\".join(\r\n\t\t\t\t\t\t\t\t\t self.byte_encoder[b] for b in token.encode(\"\"\"utf-8\"\"\"\t)\t) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)\r\n\t\t\t\t\t\t\t\t\tbpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A\t).split(\"\"\" \"\"\"\t)\t)\r\n\t\t\t\t\t\treturn bpe_tokens\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\treturn self.encoder.get(__A\t, self.encoder.get(self.unk_token\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\treturn self.decoder.get(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"\"\"\".join(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= bytearray([self.byte_decoder[c] for c in text]\t).decode(\"\"\"utf-8\"\"\"\t, errors=self.errors\t)\r\n\t\t\t\t\t\treturn text\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A = None\t) ->\t\t\t\t\tTuple[str]:\r\n\t\t\t\t\t\tif not os.path.isdir(__A\t):\r\n\t\t\t\t\t\t\t\t\tlogger.error(f\"\"\"Vocabulary path ({save_directory}) should be a directory\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\treturn\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= os.path.join(\r\n\t\t\t\t\t\t __A\t, (filename_prefix + \"\"\"-\"\"\" if filename_prefix else \"\"\"\"\"\") + VOCAB_FILES_NAMES[\"\"\"vocab_file\"\"\"]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= os.path.join(\r\n\t\t\t\t\t\t __A\t, (filename_prefix + \"\"\"-\"\"\" if filename_prefix else \"\"\"\"\"\") + VOCAB_FILES_NAMES[\"\"\"merges_file\"\"\"]\t)\r\n\r\n\t\t\t\t\t\twith open(__A\t, \"\"\"w\"\"\"\t, encoding=\"\"\"utf-8\"\"\"\t) as f:\r\n\t\t\t\t\t\t\t\t\tf.write(json.dumps(self.encoder\t, indent=2\t, sort_keys=__A\t, ensure_ascii=__A\t) + \"\"\"\\n\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 0\r\n\t\t\t\t\t\twith open(__A\t, \"\"\"w\"\"\"\t, encoding=\"\"\"utf-8\"\"\"\t) as writer:\r\n\t\t\t\t\t\t\t\t\twriter.write(\"\"\"#version: 0.2\\n\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\tfor bpe_tokens, token_index in sorted(self.bpe_ranks.items()\t, key=lambda __A\t: kv[1]\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tif index != token_index:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.warning(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t f\"\"\"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"\"\" Please check that the tokenizer is not corrupted!\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= token_index\r\n\t\t\t\t\t\t\t\t\t\t\t\twriter.write(\"\"\" \"\"\".join(__A\t) + \"\"\"\\n\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tindex += 1\r\n\r\n\t\t\t\t\t\treturn vocab_file, merge_file\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A = None\t) ->\t\t\t\t\tList[int]:\r\n\t\t\t\t\t\tif token_ids_a is None:\r\n\t\t\t\t\t\t\t\t\treturn [self.cls_token_id] + token_ids_a + [self.sep_token_id]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= [self.cls_token_id]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= [self.sep_token_id]\r\n\t\t\t\t\t\treturn cls + token_ids_a + sep + sep + token_ids_a + sep\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A = None\t, __A = False\t) ->\t\t\t\t\tList[int]:\r\n\t\t\t\t\t\tif already_has_special_tokens:\r\n\t\t\t\t\t\t\t\t\treturn super().get_special_tokens_mask(\r\n\t\t\t\t\t\t\t\t\t token_ids_a=__A\t, token_ids_a=__A\t, already_has_special_tokens=__A\t)\r\n\r\n\t\t\t\t\t\tif token_ids_a is None:\r\n\t\t\t\t\t\t\t\t\treturn [1] + ([0] * len(__A\t)) + [1]\r\n\t\t\t\t\t\treturn [1] + ([0] * len(__A\t)) + [1, 1] + ([0] * len(__A\t)) + [1]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A = None\t) ->\t\t\t\t\tList[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= [self.sep_token_id]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= [self.cls_token_id]\r\n\r\n\t\t\t\t\t\tif token_ids_a is None:\r\n\t\t\t\t\t\t\t\t\treturn len(cls + token_ids_a + sep\t) * [0]\r\n\t\t\t\t\t\treturn len(cls + token_ids_a + sep + sep + token_ids_a + sep\t) * [0]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=False\t, **__A\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= kwargs.pop(\"\"\"add_prefix_space\"\"\"\t, self.add_prefix_space\t)\r\n\t\t\t\t\t\tif (is_split_into_words or add_prefix_space) and (len(__A\t) > 0 and not text[0].isspace()):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\" \"\"\" + text\r\n\t\t\t\t\t\treturn (text, kwargs)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport warnings\r\n\r\nfrom ...utils import logging\r\nfrom .image_processing_clip import CLIPImageProcessor\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, *__A\t, **__A\t) ->\t\t\t\t\tNone:\r\n\t\t\t\t\t\twarnings.warn(\r\n\t\t\t\t\t\t \"\"\"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please\"\"\"\r\n\t\t\t\t\t\t \"\"\" use CLIPImageProcessor instead.\"\"\"\t, __A\t, )\r\n\t\t\t\t\t\tsuper().__init__(*__A\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":648,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport importlib\r\nimport json\r\nimport os\r\nimport sys\r\nimport tempfile\r\nimport unittest\r\nfrom pathlib import Path\r\n\r\nimport transformers\r\nimport transformers.models.auto\r\nfrom transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig\r\nfrom transformers.models.bert.configuration_bert import BertConfig\r\nfrom transformers.models.roberta.configuration_roberta import RobertaConfig\r\nfrom transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir\r\n\r\n\r\nsys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))\r\n\r\nfrom test_module.custom_configuration import CustomConfig # noqa E402\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= get_tests_dir('fixtures/dummy-config.json')\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 0\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tself.assertIsNotNone(transformers.models.auto.__spec__\t)\r\n\t\t\t\t\t\tself.assertIsNotNone(importlib.util.find_spec(\"\"\"transformers.models.auto\"\"\"\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"bert-base-uncased\"\"\"\t)\r\n\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= AutoConfig.from_pretrained(__A\t)\r\n\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= AutoConfig.from_pretrained(__A\t)\r\n\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= AutoConfig.for_model(\"\"\"roberta\"\"\"\t)\r\n\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmp_dir:\r\n\t\t\t\t\t\t\t\t\t# This model name contains bert and roberta, but roberta ends up being picked.\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= os.path.join(__A\t, \"\"\"fake-roberta\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\tos.makedirs(__A\t, exist_ok=__A\t)\r\n\t\t\t\t\t\t\t\t\twith open(os.path.join(__A\t, \"\"\"config.json\"\"\"\t)\t, \"\"\"w\"\"\"\t) as f:\r\n\t\t\t\t\t\t\t\t\t\t\t\tf.write(json.dumps({}\t)\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= AutoConfig.from_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(type(__A\t)\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\tAutoConfig.register(\"\"\"custom\"\"\"\t, __A\t)\r\n\t\t\t\t\t\t\t\t\t# Wrong model type will raise an error\r\n\t\t\t\t\t\t\t\t\twith self.assertRaises(__A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tAutoConfig.register(\"\"\"model\"\"\"\t, __A\t)\r\n\t\t\t\t\t\t\t\t\t# Trying to register something existing in the Transformers library will raise an error\r\n\t\t\t\t\t\t\t\t\twith self.assertRaises(__A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tAutoConfig.register(\"\"\"bert\"\"\"\t, __A\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Now that the config is registered, it can be used as any other config with the auto-API\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= CustomConfig()\r\n\t\t\t\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmp_dir:\r\n\t\t\t\t\t\t\t\t\t\t\t\tconfig.save_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= AutoConfig.from_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\t\t\t\t\t\tfinally:\r\n\t\t\t\t\t\t\t\t\tif \"custom\" in CONFIG_MAPPING._extra_content:\r\n\t\t\t\t\t\t\t\t\t\t\t\tdel CONFIG_MAPPING._extra_content[\"custom\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\twith self.assertRaisesRegex(\r\n\t\t\t\t\t\t __A\t, \"\"\"bert-base is not a local folder and is not a valid model identifier\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"bert-base\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\twith self.assertRaisesRegex(\r\n\t\t\t\t\t\t __A\t, r\"\"\"aaaaaa is not a valid git identifier \\(branch name, tag name or commit id\\)\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= AutoConfig.from_pretrained(__A\t, revision=\"\"\"aaaaaa\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\twith self.assertRaisesRegex(\r\n\t\t\t\t\t\t __A\t, \"\"\"hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.\"\"\"\t, ):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/no-config-test-repo\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\t# If remote code is not set, we will time out when asking whether to load the model.\r\n\t\t\t\t\t\twith self.assertRaises(__A\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t)\r\n\t\t\t\t\t\t# If remote code is disabled, we can't load this config.\r\n\t\t\t\t\t\twith self.assertRaises(__A\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t, trust_remote_code=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t, trust_remote_code=__A\t)\r\n\t\t\t\t\t\tself.assertEqual(config.__class__.__name__\t, \"\"\"NewModelConfig\"\"\"\t)\r\n\r\n\t\t\t\t\t\t# Test config can be reloaded.\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmp_dir:\r\n\t\t\t\t\t\t\t\t\tconfig.save_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= AutoConfig.from_pretrained(__A\t, trust_remote_code=__A\t)\r\n\t\t\t\t\t\tself.assertEqual(reloaded_config.__class__.__name__\t, \"\"\"NewModelConfig\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\t\t\t\t\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\t\"new-model\"\r\n\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\tAutoConfig.register(\"\"\"new-model\"\"\"\t, __A\t)\r\n\t\t\t\t\t\t\t\t\t# If remote code is not set, the default is to use local\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(config.__class__.__name__\t, \"\"\"NewModelConfigLocal\"\"\"\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# If remote code is disabled, we load the local one.\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t, trust_remote_code=__A\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(config.__class__.__name__\t, \"\"\"NewModelConfigLocal\"\"\"\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# If remote is enabled, we load from the Hub\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t, trust_remote_code=__A\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(config.__class__.__name__\t, \"\"\"NewModelConfig\"\"\"\t)\r\n\r\n\t\t\t\t\t\tfinally:\r\n\t\t\t\t\t\t\t\t\tif \"new-model\" in CONFIG_MAPPING._extra_content:\r\n\t\t\t\t\t\t\t\t\t\t\t\tdel CONFIG_MAPPING._extra_content[\"new-model\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom itertools import zip_longest\r\n\r\nimport requests\r\nfrom bsa import BeautifulSoup\r\nfrom pandas import DataFrame\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str = \"laptop\"\t\t) -> DataFrame:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= f\"\"\"https://www.amazon.in/laptop/s?k={product}\"\"\"\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {\r\n\t\t\t \"\"\"User-Agent\"\"\": \"\"\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36\"\"\",\r\n\t\t\t \"\"\"Accept-Language\"\"\": \"\"\"en-US, en;q=0.5\"\"\",\r\n\t\t\t}\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= BeautifulSoup(requests.get(lowercase__\t\t\t\t,\t\t\t\t\theaders=lowercase__\t\t).text\t\t)\r\n\t\t\t# Initialize a Pandas dataframe with the column titles\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= DataFrame(\r\n\t\t\t columns=[\r\n\t\t\t \"\"\"Product Title\"\"\",\r\n\t\t\t \"\"\"Product Link\"\"\",\r\n\t\t\t \"\"\"Current Price of the product\"\"\",\r\n\t\t\t \"\"\"Product Rating\"\"\",\r\n\t\t\t \"\"\"MRP of the product\"\"\",\r\n\t\t\t \"\"\"Discount\"\"\",\r\n\t\t\t ]\t\t)\r\n\t\t\t# Loop through each entry and store them in the dataframe\r\n\t\t\tfor item, _ in zip_longest(\r\n\t\t\t soup.find_all(\r\n\t\t\t \"\"\"div\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"s-result-item\"\"\", \"\"\"data-component-type\"\"\": \"\"\"s-search-result\"\"\"}\t\t\t\t,\t\t\t\t\t)\t\t\t\t,\t\t\t\t\tsoup.find_all(\"\"\"div\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-row a-size-base a-color-base\"\"\"}\t\t)\t\t\t\t,\t\t\t\t\t):\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= item.ha.text\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= \"\"\"https://www.amazon.in/\"\"\" + item.ha.a[\"\"\"href\"\"\"]\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= item.find(\"\"\"span\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-offscreen\"\"\"}\t\t).text\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= item.find(\"\"\"span\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-icon-alt\"\"\"}\t\t).text\r\n\t\t\t\t\t\t\t\t\texcept AttributeError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"Not available\"\"\"\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= (\r\n\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"โ‚น\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t + item.find(\r\n\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"span\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-price a-text-price\"\"\"}\t\t).text.split(\"\"\"โ‚น\"\"\"\t\t)[1]\r\n\t\t\t\t\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\texcept AttributeError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= \"\"\"\"\"\"\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= float(\r\n\t\t\t\t\t\t\t\t\t\t\t\t (\r\n\t\t\t\t\t\t\t\t\t\t\t\t (\r\n\t\t\t\t\t\t\t\t\t\t\t\t float(product_mrp.strip(\"\"\"โ‚น\"\"\"\t\t).replace(\"\"\",\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"\"\"\"\t\t)\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t - float(product_price.strip(\"\"\"โ‚น\"\"\"\t\t).replace(\"\"\",\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"\"\"\"\t\t)\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t )\r\n\t\t\t\t\t\t\t\t\t\t\t\t / float(product_mrp.strip(\"\"\"โ‚น\"\"\"\t\t).replace(\"\"\",\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"\"\"\"\t\t)\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t )\r\n\t\t\t\t\t\t\t\t\t\t\t\t * 1_0_0\t\t)\r\n\t\t\t\t\t\t\t\t\texcept ValueError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= float(\"\"\"nan\"\"\"\t\t)\r\n\t\t\t\t\t\texcept AttributeError:\r\n\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= [\r\n\t\t\t\t\t\t product_title,\r\n\t\t\t\t\t\t product_link,\r\n\t\t\t\t\t\t product_price,\r\n\t\t\t\t\t\t product_rating,\r\n\t\t\t\t\t\t product_mrp,\r\n\t\t\t\t\t\t discount,\r\n\t\t\t\t\t\t]\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\" \"\"\"\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= \"\"\" \"\"\"\r\n\t\t\tdata_frame.index += 1\r\n\t\t\treturn data_frame\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= 'headphones'\r\n\t\t\t\tget_amazon_product_data(product).to_csv(F\"\"\"Amazon Product Data for {product}.csv\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":649,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],\r\n}\r\n\r\ntry:\r\n\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\r\n\t\t\t\t 'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',\r\n\t\t\t\t 'ErnieForCausalLM',\r\n\t\t\t\t 'ErnieForMaskedLM',\r\n\t\t\t\t 'ErnieForMultipleChoice',\r\n\t\t\t\t 'ErnieForNextSentencePrediction',\r\n\t\t\t\t 'ErnieForPreTraining',\r\n\t\t\t\t 'ErnieForQuestionAnswering',\r\n\t\t\t\t 'ErnieForSequenceClassification',\r\n\t\t\t\t 'ErnieForTokenClassification',\r\n\t\t\t\t 'ErnieModel',\r\n\t\t\t\t 'ErniePreTrainedModel',\r\n\t\t\t\t]\r\n\r\nif TYPE_CHECKING:\r\n\t\t\t\tfrom .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .modeling_ernie import (\r\n\t\t\t\t\t\t\t\t ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t ErnieForCausalLM,\r\n\t\t\t\t\t\t\t\t ErnieForMaskedLM,\r\n\t\t\t\t\t\t\t\t ErnieForMultipleChoice,\r\n\t\t\t\t\t\t\t\t ErnieForNextSentencePrediction,\r\n\t\t\t\t\t\t\t\t ErnieForPreTraining,\r\n\t\t\t\t\t\t\t\t ErnieForQuestionAnswering,\r\n\t\t\t\t\t\t\t\t ErnieForSequenceClassification,\r\n\t\t\t\t\t\t\t\t ErnieForTokenClassification,\r\n\t\t\t\t\t\t\t\t ErnieModel,\r\n\t\t\t\t\t\t\t\t ErniePreTrainedModel,\r\n\t\t\t\t\t\t\t\t)\r\n\r\nelse:\r\n\t\t\t\timport sys\r\n\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport shutil\r\nimport tempfile\r\nimport unittest\r\n\r\nfrom transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast\r\nfrom transformers.testing_utils import require_sentencepiece, require_torchaudio\r\n\r\nfrom .test_feature_extraction_clap import floats_list\r\n\r\n\r\n\r\n\r\n@require_torchaudio\r\n@require_sentencepiece\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= \"\"\"laion/clap-htsat-unfused\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tempfile.mkdtemp()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, **__A\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\treturn RobertaTokenizer.from_pretrained(self.checkpoint\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, **__A\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\treturn ClapFeatureExtractor.from_pretrained(self.checkpoint\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tshutil.rmtree(self.tmpdirname\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_tokenizer()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_feature_extractor()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tprocessor.save_pretrained(self.tmpdirname\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= ClapProcessor.from_pretrained(self.tmpdirname\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor.tokenizer.get_vocab()\t, tokenizer.get_vocab()\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor.tokenizer\t, __A\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor.feature_extractor.to_json_string()\t, feature_extractor.to_json_string()\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor.feature_extractor\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= ClapProcessor(tokenizer=self.get_tokenizer()\t, feature_extractor=self.get_feature_extractor()\t)\r\n\t\t\t\t\t\tprocessor.save_pretrained(self.tmpdirname\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.get_tokenizer(bos_token=\"\"\"(BOS)\"\"\"\t, eos_token=\"\"\"(EOS)\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.get_feature_extractor(do_normalize=__A\t, padding_value=1.0\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= ClapProcessor.from_pretrained(\r\n\t\t\t\t\t\t self.tmpdirname\t, bos_token=\"\"\"(BOS)\"\"\"\t, eos_token=\"\"\"(EOS)\"\"\"\t, do_normalize=__A\t, padding_value=1.0\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor.tokenizer.get_vocab()\t, tokenizer_add_kwargs.get_vocab()\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor.tokenizer\t, __A\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor.feature_extractor.to_json_string()\t, feature_extractor_add_kwargs.to_json_string()\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor.feature_extractor\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.get_feature_extractor()\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= floats_list((3, 1000)\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= feature_extractor(__A\t, return_tensors=\"\"\"np\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= processor(audios=__A\t, return_tensors=\"\"\"np\"\"\"\t)\r\n\r\n\t\t\t\t\t\tfor key in input_feat_extract.keys():\r\n\t\t\t\t\t\t\t\t\tself.assertAlmostEqual(input_feat_extract[key].sum()\t, input_processor[key].sum()\t, delta=1E-2\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.get_feature_extractor()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"This is a test string\"\"\"\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= processor(text=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer(__A\t)\r\n\r\n\t\t\t\t\t\tfor key in encoded_tok.keys():\r\n\t\t\t\t\t\t\t\t\tself.assertListEqual(encoded_tok[key]\t, encoded_processor[key]\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.get_feature_extractor()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= processor.batch_decode(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tokenizer.batch_decode(__A\t)\r\n\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_feature_extractor()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tself.assertListEqual(\r\n\t\t\t\t\t\t processor.model_input_names[2:]\t, feature_extractor.model_input_names\t, msg=\"\"\"`processor` and `feature_extractor` model input names do not match\"\"\"\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":650,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}\r\n__UpperCAmelCase\t\t\t\t\t\t\t= ['a', 'b', 'c', 'd', 'e']\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: List[Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Any\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t) -> str:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= start\r\n\t\t\t# add current to visited\r\n\t\t\tvisited.append(lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= edges[current]\r\n\t\t\tfor neighbor in neighbors:\r\n\t\t\t\t\t\t# if neighbor not in visited, visit\r\n\t\t\t\t\t\tif neighbor not in visited:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= topological_sort(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n # if all neighbors visited add current to sort\r\n\t\t\tsort.append(lowercase__\t\t)\r\n\t\t\t# if all vertices haven't been visited select a new one to visit\r\n\t\t\tif len(lowercase__\t\t) != len(lowercase__\t\t):\r\n\t\t\t\t\t\tfor vertice in vertices:\r\n\t\t\t\t\t\t\t\t\tif vertice not in visited:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= topological_sort(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n # return sort\r\n\t\t\treturn sort\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= topological_sort('a', [], [])\r\n\t\t\t\tprint(sort)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport os\r\nfrom math import logaa\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str = \"base_exp.txt\"\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :float \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 0\r\n\t\t\tfor i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__\t\t)\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\t\t)\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= list(map(lowercase__\t\t\t\t,\t\t\t\t\tline.split(\"\"\",\"\"\"\t\t)\t\t)\t\t)\r\n\t\t\t\t\t\tif x * logaa(lowercase__\t\t) > largest:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= x * logaa(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= i + 1\r\n\t\t\treturn result\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tprint(solution())\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":651,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 1.6021e-19 # units = C\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: float\t\t\t\t,\t\t\t\t\tlowercase__\t\t: float\t\t\t\t,\t\t\t\t\tlowercase__\t\t: float\t\t\t\t,\t\t\t\t\t) -> tuple[str, float]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif (conductivity, electron_conc, mobility).count(0\t\t) != 1:\r\n\t\t\t\t\t\traise ValueError(\"\"\"You cannot supply more or less than 2 values\"\"\"\t\t)\r\n\t\t\telif conductivity < 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"Conductivity cannot be negative\"\"\"\t\t)\r\n\t\t\telif electron_conc < 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"Electron concentration cannot be negative\"\"\"\t\t)\r\n\t\t\telif mobility < 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"mobility cannot be negative\"\"\"\t\t)\r\n\t\t\telif conductivity == 0:\r\n\t\t\t\t\t\treturn (\r\n\t\t\t\t\t\t \"conductivity\",\r\n\t\t\t\t\t\t mobility * electron_conc * ELECTRON_CHARGE,\r\n\t\t\t\t\t\t)\r\n\t\t\telif electron_conc == 0:\r\n\t\t\t\t\t\treturn (\r\n\t\t\t\t\t\t \"electron_conc\",\r\n\t\t\t\t\t\t conductivity / (mobility * ELECTRON_CHARGE),\r\n\t\t\t\t\t\t)\r\n\t\t\telse:\r\n\t\t\t\t\t\treturn (\r\n\t\t\t\t\t\t \"mobility\",\r\n\t\t\t\t\t\t conductivity / (electron_conc * ELECTRON_CHARGE),\r\n\t\t\t\t\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport itertools\r\nimport math\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int\t\t) -> bool:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif 1 < number < 4:\r\n\t\t\t\t\t\t# 2 and 3 are primes\r\n\t\t\t\t\t\treturn True\r\n\t\t\telif number < 2 or number % 2 == 0 or number % 3 == 0:\r\n\t\t\t\t\t\t# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes\r\n\t\t\t\t\t\treturn False\r\n\r\n\t\t\t# All primes number are in format of 6k +/- 1\r\n\t\t\tfor i in range(5\t\t\t\t,\t\t\t\t\tint(math.sqrt(lowercase__\t\t) + 1\t\t)\t\t\t\t,\t\t\t\t\t6\t\t):\r\n\t\t\t\t\t\tif number % i == 0 or number % (i + 2) == 0:\r\n\t\t\t\t\t\t\t\t\treturn False\r\n\t\t\treturn True\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> Dict:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 2\r\n\t\t\twhile True:\r\n\t\t\t\t\t\tif is_prime(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\tyield num\r\n\t\t\t\t\t\tnum += 1\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int = 1_0_0_0_1\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\treturn next(itertools.islice(prime_generator()\t\t\t\t,\t\t\t\t\tnth - 1\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tprint(F\"\"\"{solution() = }\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":652,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom dataclasses import dataclass, field\r\nfrom typing import Tuple\r\n\r\nfrom ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends\r\nfrom .benchmark_args_utils import BenchmarkArguments\r\n\r\n\r\nif is_torch_available():\r\n\t\t\t\timport torch\r\n\r\nif is_torch_tpu_available(check_device=False):\r\n\t\t\t\timport torch_xla.core.xla_model as xm\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n\r\n\r\n\r\n@dataclass\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Optional[Any] \t\t\t=\t\t\t\t\t\t[\r\n\t\t\t \"no_inference\",\r\n\t\t\t \"no_cuda\",\r\n\t\t\t \"no_tpu\",\r\n\t\t\t \"no_speed\",\r\n\t\t\t \"no_memory\",\r\n\t\t\t \"no_env_print\",\r\n\t\t\t \"no_multi_process\",\r\n\t\t\t]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, **__A\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tfor deprecated_arg in self.deprecated_args:\r\n\t\t\t\t\t\t\t\t\tif deprecated_arg in kwargs:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= deprecated_arg[3:]\r\n\t\t\t\t\t\t\t\t\t\t\t\tsetattr(self\t, __A\t, not kwargs.pop(__A\t)\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlogger.warning(\r\n\t\t\t\t\t\t\t\t\t\t\t\t f\"\"\"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t f\"\"\" {positive_arg}={kwargs[positive_arg]}\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= kwargs.pop(\"\"\"torchscript\"\"\"\t, self.torchscript\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= kwargs.pop(\"\"\"torch_xla_tpu_print_metrics\"\"\"\t, self.torch_xla_tpu_print_metrics\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= kwargs.pop(\"\"\"fp16_opt_level\"\"\"\t, self.fpaa_opt_level\t)\r\n\t\t\t\t\t\tsuper().__init__(**__A\t)\r\n\r\n\t\t\tUpperCAmelCase_ :bool \t\t\t=\t\t\t\t\t\tfield(default=A__ ,\t\t\tmetadata={\"help\": \"Trace the models using torchscript\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :bool \t\t\t=\t\t\t\t\t\tfield(default=A__ ,\t\t\tmetadata={\"help\": \"Print Xla/PyTorch tpu metrics\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tfield(\r\n\t\t\t default=\"O1\" ,\t\t\tmetadata={\r\n\t\t\t \"help\": (\r\n\t\t\t \"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. \"\r\n\t\t\t \"See details at https://nvidia.github.io/apex/amp.html\"\r\n\t\t\t )\r\n\t\t\t } ,\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@cached_property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple[\"torch.device\", int]:\r\n\t\t\t\t\t\trequires_backends(self\t, [\"\"\"torch\"\"\"]\t)\r\n\t\t\t\t\t\tlogger.info(\"\"\"PyTorch: setting up devices\"\"\"\t)\r\n\t\t\t\t\t\tif not self.cuda:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= torch.device(\"\"\"cpu\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= 0\r\n\t\t\t\t\t\telif is_torch_tpu_available():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= xm.xla_device()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= 0\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.device(\"\"\"cuda\"\"\" if torch.cuda.is_available() else \"\"\"cpu\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= torch.cuda.device_count()\r\n\t\t\t\t\t\treturn device, n_gpu\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn is_torch_tpu_available() and self.tpu\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\trequires_backends(self\t, [\"\"\"torch\"\"\"]\t)\r\n\t\t\t\t\t\t# TODO(PVP): currently only single GPU is supported\r\n\t\t\t\t\t\treturn torch.cuda.current_device()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\t\"torch.device\":\r\n\t\t\t\t\t\trequires_backends(self\t, [\"\"\"torch\"\"\"]\t)\r\n\t\t\t\t\t\treturn self._setup_devices[0]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\trequires_backends(self\t, [\"\"\"torch\"\"\"]\t)\r\n\t\t\t\t\t\treturn self._setup_devices[1]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\treturn self.n_gpu > 0\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int = 5_0\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [1] * (length + 1)\r\n\r\n\t\t\tfor row_length in range(3\t\t\t\t,\t\t\t\t\tlength + 1\t\t):\r\n\t\t\t\t\t\tfor block_length in range(3\t\t\t\t,\t\t\t\t\trow_length + 1\t\t):\r\n\t\t\t\t\t\t\t\t\tfor block_start in range(row_length - block_length\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tways_number[row_length] += ways_number[\r\n\t\t\t\t\t\t\t\t\t\t\t\t row_length - block_start - block_length - 1\r\n\t\t\t\t\t\t\t\t\t\t\t\t]\r\n\r\n\t\t\t\t\t\t\t\t\tways_number[row_length] += 1\r\n\r\n\t\t\treturn ways_number[length]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tprint(F\"\"\"{solution() = }\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":653,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom itertools import zip_longest\r\n\r\nimport requests\r\nfrom bsa import BeautifulSoup\r\nfrom pandas import DataFrame\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str = \"laptop\"\t\t) -> DataFrame:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= f\"\"\"https://www.amazon.in/laptop/s?k={product}\"\"\"\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {\r\n\t\t\t \"\"\"User-Agent\"\"\": \"\"\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36\"\"\",\r\n\t\t\t \"\"\"Accept-Language\"\"\": \"\"\"en-US, en;q=0.5\"\"\",\r\n\t\t\t}\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= BeautifulSoup(requests.get(lowercase__\t\t\t\t,\t\t\t\t\theaders=lowercase__\t\t).text\t\t)\r\n\t\t\t# Initialize a Pandas dataframe with the column titles\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= DataFrame(\r\n\t\t\t columns=[\r\n\t\t\t \"\"\"Product Title\"\"\",\r\n\t\t\t \"\"\"Product Link\"\"\",\r\n\t\t\t \"\"\"Current Price of the product\"\"\",\r\n\t\t\t \"\"\"Product Rating\"\"\",\r\n\t\t\t \"\"\"MRP of the product\"\"\",\r\n\t\t\t \"\"\"Discount\"\"\",\r\n\t\t\t ]\t\t)\r\n\t\t\t# Loop through each entry and store them in the dataframe\r\n\t\t\tfor item, _ in zip_longest(\r\n\t\t\t soup.find_all(\r\n\t\t\t \"\"\"div\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"s-result-item\"\"\", \"\"\"data-component-type\"\"\": \"\"\"s-search-result\"\"\"}\t\t\t\t,\t\t\t\t\t)\t\t\t\t,\t\t\t\t\tsoup.find_all(\"\"\"div\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-row a-size-base a-color-base\"\"\"}\t\t)\t\t\t\t,\t\t\t\t\t):\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= item.ha.text\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= \"\"\"https://www.amazon.in/\"\"\" + item.ha.a[\"\"\"href\"\"\"]\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= item.find(\"\"\"span\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-offscreen\"\"\"}\t\t).text\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= item.find(\"\"\"span\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-icon-alt\"\"\"}\t\t).text\r\n\t\t\t\t\t\t\t\t\texcept AttributeError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"Not available\"\"\"\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= (\r\n\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"โ‚น\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t + item.find(\r\n\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"span\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-price a-text-price\"\"\"}\t\t).text.split(\"\"\"โ‚น\"\"\"\t\t)[1]\r\n\t\t\t\t\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\texcept AttributeError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= \"\"\"\"\"\"\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= float(\r\n\t\t\t\t\t\t\t\t\t\t\t\t (\r\n\t\t\t\t\t\t\t\t\t\t\t\t (\r\n\t\t\t\t\t\t\t\t\t\t\t\t float(product_mrp.strip(\"\"\"โ‚น\"\"\"\t\t).replace(\"\"\",\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"\"\"\"\t\t)\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t - float(product_price.strip(\"\"\"โ‚น\"\"\"\t\t).replace(\"\"\",\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"\"\"\"\t\t)\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t )\r\n\t\t\t\t\t\t\t\t\t\t\t\t / float(product_mrp.strip(\"\"\"โ‚น\"\"\"\t\t).replace(\"\"\",\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"\"\"\"\t\t)\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t )\r\n\t\t\t\t\t\t\t\t\t\t\t\t * 1_0_0\t\t)\r\n\t\t\t\t\t\t\t\t\texcept ValueError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= float(\"\"\"nan\"\"\"\t\t)\r\n\t\t\t\t\t\texcept AttributeError:\r\n\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= [\r\n\t\t\t\t\t\t product_title,\r\n\t\t\t\t\t\t product_link,\r\n\t\t\t\t\t\t product_price,\r\n\t\t\t\t\t\t product_rating,\r\n\t\t\t\t\t\t product_mrp,\r\n\t\t\t\t\t\t discount,\r\n\t\t\t\t\t\t]\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\" \"\"\"\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= \"\"\" \"\"\"\r\n\t\t\tdata_frame.index += 1\r\n\t\t\treturn data_frame\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= 'headphones'\r\n\t\t\t\tget_amazon_product_data(product).to_csv(F\"\"\"Amazon Product Data for {product}.csv\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/\r\n\r\nimport gc\r\nimport random\r\nimport tempfile\r\nimport unittest\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom PIL import Image\r\nfrom transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer\r\n\r\nfrom diffusers import (\r\n AutoencoderKL,\r\n ControlNetModel,\r\n DDIMScheduler,\r\n StableDiffusionControlNetImgaImgPipeline,\r\n UNetaDConditionModel,\r\n)\r\nfrom diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel\r\nfrom diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device\r\nfrom diffusers.utils.import_utils import is_xformers_available\r\nfrom diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu\r\n\r\nfrom ..pipeline_params import (\r\n IMAGE_TO_IMAGE_IMAGE_PARAMS,\r\n TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,\r\n TEXT_GUIDED_IMAGE_VARIATION_PARAMS,\r\n)\r\nfrom ..test_pipelines_common import (\r\n PipelineKarrasSchedulerTesterMixin,\r\n PipelineLatentTesterMixin,\r\n PipelineTesterMixin,\r\n)\r\n\r\n\r\nenable_full_determinism()\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tA__ ,\t\t\tA__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tStableDiffusionControlNetImgaImgPipeline\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_PARAMS - {\"height\", \"width\"}\r\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\tIMAGE_TO_IMAGE_IMAGE_PARAMS.union({\"control_image\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[Any] \t\t\t=\t\t\t\t\t\tIMAGE_TO_IMAGE_IMAGE_PARAMS\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= UNetaDConditionModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, sample_size=32\t, in_channels=4\t, out_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, up_block_types=(\"\"\"CrossAttnUpBlock2D\"\"\", \"\"\"UpBlock2D\"\"\")\t, cross_attention_dim=32\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= ControlNetModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, in_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, cross_attention_dim=32\t, conditioning_embedding_out_channels=(16, 32)\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= DDIMScheduler(\r\n\t\t\t\t\t\t beta_start=0.0_0_0_8_5\t, beta_end=0.0_1_2\t, beta_schedule=\"\"\"scaled_linear\"\"\"\t, clip_sample=__A\t, set_alpha_to_one=__A\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoencoderKL(\r\n\t\t\t\t\t\t block_out_channels=[32, 64]\t, in_channels=3\t, out_channels=3\t, down_block_types=[\"\"\"DownEncoderBlock2D\"\"\", \"\"\"DownEncoderBlock2D\"\"\"]\t, up_block_types=[\"\"\"UpDecoderBlock2D\"\"\", \"\"\"UpDecoderBlock2D\"\"\"]\t, latent_channels=4\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= CLIPTextConfig(\r\n\t\t\t\t\t\t bos_token_id=0\t, eos_token_id=2\t, hidden_size=32\t, intermediate_size=37\t, layer_norm_eps=1E-05\t, num_attention_heads=4\t, num_hidden_layers=5\t, pad_token_id=1\t, vocab_size=1000\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= CLIPTextModel(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= CLIPTokenizer.from_pretrained(\"\"\"hf-internal-testing/tiny-random-clip\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"unet\"\"\": unet,\r\n\t\t\t\t\t\t \"\"\"controlnet\"\"\": controlnet,\r\n\t\t\t\t\t\t \"\"\"scheduler\"\"\": scheduler,\r\n\t\t\t\t\t\t \"\"\"vae\"\"\": vae,\r\n\t\t\t\t\t\t \"\"\"text_encoder\"\"\": text_encoder,\r\n\t\t\t\t\t\t \"\"\"tokenizer\"\"\": tokenizer,\r\n\t\t\t\t\t\t \"\"\"safety_checker\"\"\": None,\r\n\t\t\t\t\t\t \"\"\"feature_extractor\"\"\": None,\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\treturn components\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=0\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tif str(__A\t).startswith(\"\"\"mps\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.manual_seed(__A\t)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= torch.Generator(device=__A\t).manual_seed(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 2\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= randn_tensor(\r\n\t\t\t\t\t\t (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor)\t, generator=__A\t, device=torch.device(__A\t)\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= floats_tensor(control_image.shape\t, rng=random.Random(__A\t)\t).to(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= image.cpu().permute(0\t, 2\t, 3\t, 1\t)[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= Image.fromarray(np.uinta(__A\t)\t).convert(\"\"\"RGB\"\"\"\t).resize((64, 64)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"prompt\"\"\": \"\"\"A painting of a squirrel eating a burger\"\"\",\r\n\t\t\t\t\t\t \"\"\"generator\"\"\": generator,\r\n\t\t\t\t\t\t \"\"\"num_inference_steps\"\"\": 2,\r\n\t\t\t\t\t\t \"\"\"guidance_scale\"\"\": 6.0,\r\n\t\t\t\t\t\t \"\"\"output_type\"\"\": \"\"\"numpy\"\"\",\r\n\t\t\t\t\t\t \"\"\"image\"\"\": image,\r\n\t\t\t\t\t\t \"\"\"control_image\"\"\": control_image,\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\treturn inputs\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn self._test_attention_slicing_forward_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@unittest.skipIf(\r\n\t\t\t torch_device != \"\"\"cuda\"\"\" or not is_xformers_available()\t, reason=\"\"\"XFormers attention is only available with CUDA and `xformers` installed\"\"\"\t, )\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tself._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tself._test_inference_batch_single_identical(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tA__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\tStableDiffusionControlNetImgaImgPipeline\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_PARAMS - {\"height\", \"width\"}\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tfrozenset([]\t\t\t\t\t\t\t) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= UNetaDConditionModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, sample_size=32\t, in_channels=4\t, out_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, up_block_types=(\"\"\"CrossAttnUpBlock2D\"\"\", \"\"\"UpBlock2D\"\"\")\t, cross_attention_dim=32\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\r\n\t\t\t\t\t\tdef init_weights(__A\t):\r\n\t\t\t\t\t\t\t\t\tif isinstance(__A\t, torch.nn.Convad\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\ttorch.nn.init.normal(m.weight\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tm.bias.data.fill_(1.0\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= ControlNetModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, in_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, cross_attention_dim=32\t, conditioning_embedding_out_channels=(16, 32)\t, )\r\n\t\t\t\t\t\tcontrolneta.controlnet_down_blocks.apply(__A\t)\r\n\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= ControlNetModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, in_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, cross_attention_dim=32\t, conditioning_embedding_out_channels=(16, 32)\t, )\r\n\t\t\t\t\t\tcontrolneta.controlnet_down_blocks.apply(__A\t)\r\n\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= DDIMScheduler(\r\n\t\t\t\t\t\t beta_start=0.0_0_0_8_5\t, beta_end=0.0_1_2\t, beta_schedule=\"\"\"scaled_linear\"\"\"\t, clip_sample=__A\t, set_alpha_to_one=__A\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= AutoencoderKL(\r\n\t\t\t\t\t\t block_out_channels=[32, 64]\t, in_channels=3\t, out_channels=3\t, down_block_types=[\"\"\"DownEncoderBlock2D\"\"\", \"\"\"DownEncoderBlock2D\"\"\"]\t, up_block_types=[\"\"\"UpDecoderBlock2D\"\"\", \"\"\"UpDecoderBlock2D\"\"\"]\t, latent_channels=4\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= CLIPTextConfig(\r\n\t\t\t\t\t\t bos_token_id=0\t, eos_token_id=2\t, hidden_size=32\t, intermediate_size=37\t, layer_norm_eps=1E-05\t, num_attention_heads=4\t, num_hidden_layers=5\t, pad_token_id=1\t, vocab_size=1000\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= CLIPTextModel(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= CLIPTokenizer.from_pretrained(\"\"\"hf-internal-testing/tiny-random-clip\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= MultiControlNetModel([controlneta, controlneta]\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"unet\"\"\": unet,\r\n\t\t\t\t\t\t \"\"\"controlnet\"\"\": controlnet,\r\n\t\t\t\t\t\t \"\"\"scheduler\"\"\": scheduler,\r\n\t\t\t\t\t\t \"\"\"vae\"\"\": vae,\r\n\t\t\t\t\t\t \"\"\"text_encoder\"\"\": text_encoder,\r\n\t\t\t\t\t\t \"\"\"tokenizer\"\"\": tokenizer,\r\n\t\t\t\t\t\t \"\"\"safety_checker\"\"\": None,\r\n\t\t\t\t\t\t \"\"\"feature_extractor\"\"\": None,\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\treturn components\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=0\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tif str(__A\t).startswith(\"\"\"mps\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= torch.manual_seed(__A\t)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.Generator(device=__A\t).manual_seed(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= 2\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= [\r\n\t\t\t\t\t\t randn_tensor(\r\n\t\t\t\t\t\t (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor)\t, generator=__A\t, device=torch.device(__A\t)\t, ),\r\n\t\t\t\t\t\t randn_tensor(\r\n\t\t\t\t\t\t (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor)\t, generator=__A\t, device=torch.device(__A\t)\t, ),\r\n\t\t\t\t\t\t]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= floats_tensor(control_image[0].shape\t, rng=random.Random(__A\t)\t).to(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= image.cpu().permute(0\t, 2\t, 3\t, 1\t)[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= Image.fromarray(np.uinta(__A\t)\t).convert(\"\"\"RGB\"\"\"\t).resize((64, 64)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"prompt\"\"\": \"\"\"A painting of a squirrel eating a burger\"\"\",\r\n\t\t\t\t\t\t \"\"\"generator\"\"\": generator,\r\n\t\t\t\t\t\t \"\"\"num_inference_steps\"\"\": 2,\r\n\t\t\t\t\t\t \"\"\"guidance_scale\"\"\": 6.0,\r\n\t\t\t\t\t\t \"\"\"output_type\"\"\": \"\"\"numpy\"\"\",\r\n\t\t\t\t\t\t \"\"\"image\"\"\": image,\r\n\t\t\t\t\t\t \"\"\"control_image\"\"\": control_image,\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\treturn inputs\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.get_dummy_components()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.pipeline_class(**__A\t)\r\n\t\t\t\t\t\tpipe.to(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 1_0.0\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 4\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= pipe(**__A\t)[0]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= pipe(**__A\t, control_guidance_start=0.1\t, control_guidance_end=0.2\t)[0]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= pipe(**__A\t, control_guidance_start=[0.1, 0.3]\t, control_guidance_end=[0.2, 0.7]\t)[0]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= pipe(**__A\t, control_guidance_start=0.4\t, control_guidance_end=[0.5, 0.8]\t)[0]\r\n\r\n\t\t\t\t\t\t# make sure that all outputs are different\r\n\t\t\t\t\t\tassert np.sum(np.abs(output_a - output_a\t)\t) > 1E-3\r\n\t\t\t\t\t\tassert np.sum(np.abs(output_a - output_a\t)\t) > 1E-3\r\n\t\t\t\t\t\tassert np.sum(np.abs(output_a - output_a\t)\t) > 1E-3\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\treturn self._test_attention_slicing_forward_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@unittest.skipIf(\r\n\t\t\t torch_device != \"\"\"cuda\"\"\" or not is_xformers_available()\t, reason=\"\"\"XFormers attention is only available with CUDA and `xformers` installed\"\"\"\t, )\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tself._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tself._test_inference_batch_single_identical(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.get_dummy_components()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.pipeline_class(**__A\t)\r\n\t\t\t\t\t\tpipe.to(__A\t)\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__A\t)\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdir:\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\t# save_pretrained is not implemented for Multi-ControlNet\r\n\t\t\t\t\t\t\t\t\t\t\t\tpipe.save_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\texcept NotImplementedError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n@slow\r\n@require_torch_gpu\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tsuper().tearDown()\r\n\t\t\t\t\t\tgc.collect()\r\n\t\t\t\t\t\ttorch.cuda.empty_cache()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= ControlNetModel.from_pretrained(\"\"\"lllyasviel/sd-controlnet-canny\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= StableDiffusionControlNetImgaImgPipeline.from_pretrained(\r\n\t\t\t\t\t\t \"\"\"runwayml/stable-diffusion-v1-5\"\"\"\t, safety_checker=__A\t, controlnet=__A\t)\r\n\t\t\t\t\t\tpipe.enable_model_cpu_offload()\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= torch.Generator(device=\"\"\"cpu\"\"\"\t).manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"evil space-punk bird\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= load_image(\r\n\t\t\t\t\t\t \"\"\"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png\"\"\"\t).resize((512, 512)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= load_image(\r\n\t\t\t\t\t\t \"\"\"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png\"\"\"\t).resize((512, 512)\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= pipe(\r\n\t\t\t\t\t\t __A\t, __A\t, control_image=__A\t, generator=__A\t, output_type=\"\"\"np\"\"\"\t, num_inference_steps=50\t, strength=0.6\t, )\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= output.images[0]\r\n\r\n\t\t\t\t\t\tassert image.shape == (512, 512, 3)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= load_numpy(\r\n\t\t\t\t\t\t \"\"\"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy\"\"\"\t)\r\n\r\n\t\t\t\t\t\tassert np.abs(expected_image - image\t).max() < 9E-2\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":654,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport qiskit\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t) -> qiskit.result.counts.Counts:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= qiskit.Aer.get_backend(\"\"\"aer_simulator\"\"\"\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= qiskit.QuantumCircuit(4\t\t\t\t,\t\t\t\t\t2\t\t)\r\n\t\t\t# encode inputs in qubits 0 and 1\r\n\t\t\tif bita == 1:\r\n\t\t\t\t\t\tqc_ha.x(0\t\t)\r\n\t\t\tif bita == 1:\r\n\t\t\t\t\t\tqc_ha.x(1\t\t)\r\n\t\t\tqc_ha.barrier()\r\n\r\n\t\t\t# use cnots to write XOR of the inputs on qubit2\r\n\t\t\tqc_ha.cx(0\t\t\t\t,\t\t\t\t\t2\t\t)\r\n\t\t\tqc_ha.cx(1\t\t\t\t,\t\t\t\t\t2\t\t)\r\n\r\n\t\t\t# use ccx / toffoli gate to write AND of the inputs on qubit3\r\n\t\t\tqc_ha.ccx(0\t\t\t\t,\t\t\t\t\t1\t\t\t\t,\t\t\t\t\t3\t\t)\r\n\t\t\tqc_ha.barrier()\r\n\r\n\t\t\t# extract outputs\r\n\t\t\tqc_ha.measure(2\t\t\t\t,\t\t\t\t\t0\t\t) # extract XOR value\r\n\t\t\tqc_ha.measure(3\t\t\t\t,\t\t\t\t\t1\t\t) # extract AND value\r\n\r\n\t\t\t# Execute the circuit on the qasm simulator\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= qiskit.execute(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tshots=1_0_0_0\t\t)\r\n\r\n\t\t\t# Return the histogram data of the results of the experiment\r\n\t\t\treturn job.result().get_counts(lowercase__\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= half_adder(1, 1)\r\n\t\t\t\tprint(F\"\"\"Half Adder Output Qubit Counts: {counts}\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import Optional\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom torch import nn\r\nfrom transformers import GPTaConfig, GPTaLMHeadModel\r\nfrom transformers.modeling_utils import ModuleUtilsMixin\r\n\r\nfrom ...configuration_utils import ConfigMixin, register_to_config\r\nfrom ...models import ModelMixin\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tA__ ,\t\t\tA__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\t[r\"h\\.\\d+\\.attn\\.bias\", r\"h\\.\\d+\\.attn\\.masked_bias\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@register_to_config\r\n\t\t\tdef __init__( self\t, __A\t, __A\t, __A = None\t, __A = 5_0257\t, __A = 1024\t, __A = 768\t, __A = 12\t, __A = 12\t, __A = None\t, __A = \"gelu_new\"\t, __A = 0.1\t, __A = 0.1\t, __A = 0.1\t, __A = 1E-5\t, __A = 0.0_2\t, __A = True\t, __A = True\t, __A = False\t, __A = False\t, ) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tsuper().__init__()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= prefix_length\r\n\r\n\t\t\t\t\t\tif prefix_inner_dim != n_embd and prefix_hidden_dim is None:\r\n\t\t\t\t\t\t\t\t\traise ValueError(\r\n\t\t\t\t\t\t\t\t\t f\"\"\"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and\"\"\"\r\n\t\t\t\t\t\t\t\t\t f\"\"\" `n_embd`: {n_embd} are not equal.\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= prefix_inner_dim\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= prefix_hidden_dim\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= (\r\n\t\t\t\t\t\t nn.Linear(self.prefix_inner_dim\t, self.prefix_hidden_dim\t)\r\n\t\t\t\t\t\t if self.prefix_hidden_dim is not None\r\n\t\t\t\t\t\t else nn.Identity()\r\n\t\t\t\t\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= (\r\n\t\t\t\t\t\t nn.Linear(self.prefix_hidden_dim\t, __A\t) if self.prefix_hidden_dim is not None else nn.Identity()\r\n\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= GPTaConfig(\r\n\t\t\t\t\t\t vocab_size=__A\t, n_positions=__A\t, n_embd=__A\t, n_layer=__A\t, n_head=__A\t, n_inner=__A\t, activation_function=__A\t, resid_pdrop=__A\t, embd_pdrop=__A\t, attn_pdrop=__A\t, layer_norm_epsilon=__A\t, initializer_range=__A\t, scale_attn_weights=__A\t, use_cache=__A\t, scale_attn_by_inverse_layer_idx=__A\t, reorder_and_upcast_attn=__A\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= GPTaLMHeadModel(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A = None\t, __A = None\t, ) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.transformer.transformer.wte(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.encode_prefix(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.decode_prefix(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= torch.cat((prefix_embeds, embedding_text)\t, dim=1\t)\r\n\r\n\t\t\t\t\t\tif labels is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.get_dummy_token(input_ids.shape[0]\t, input_ids.device\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= torch.cat((dummy_token, input_ids)\t, dim=1\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.transformer(inputs_embeds=__A\t, labels=__A\t, attention_mask=__A\t)\r\n\t\t\t\t\t\tif self.prefix_hidden_dim is not None:\r\n\t\t\t\t\t\t\t\t\treturn out, hidden\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\treturn out\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t) ->\t\t\t\t\ttorch.Tensor:\r\n\t\t\t\t\t\treturn torch.zeros(__A\t, self.prefix_length\t, dtype=torch.intaa\t, device=__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\treturn self.encode_prefix(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@torch.no_grad()\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.split(__A\t, 1\t, dim=0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= []\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= []\r\n\t\t\t\t\t\tfor feature in features:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.decode_prefix(feature.to(__A\t)\t) # back to the clip feature\r\n\t\t\t\t\t\t\t\t\t# Only support beam search for now\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.generate_beam(\r\n\t\t\t\t\t\t\t\t\t input_embeds=__A\t, device=__A\t, eos_token_id=__A\t)\r\n\t\t\t\t\t\t\t\t\tgenerated_tokens.append(output_tokens[0]\t)\r\n\t\t\t\t\t\t\t\t\tgenerated_seq_lengths.append(seq_lengths[0]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.stack(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= torch.stack(__A\t)\r\n\t\t\t\t\t\treturn generated_tokens, generated_seq_lengths\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@torch.no_grad()\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A=None\t, __A=None\t, __A=None\t, __A = 5\t, __A = 67\t, __A = 1.0\t, __A = None\t, ) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= eos_token_id\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= None\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= None\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= torch.ones(__A\t, device=__A\t, dtype=torch.int\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= torch.zeros(__A\t, device=__A\t, dtype=torch.bool\t)\r\n\r\n\t\t\t\t\t\tif input_embeds is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= input_embeds\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.transformer.transformer.wte(__A\t)\r\n\r\n\t\t\t\t\t\tfor i in range(__A\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.transformer(inputs_embeds=__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= outputs.logits\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= logits[:, -1, :] / (temperature if temperature > 0 else 1.0)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= logits.softmax(-1\t).log()\r\n\r\n\t\t\t\t\t\t\t\t\tif scores is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Any \t\t\t\t\t= logits.topk(__A\t, -1\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= generated.expand(__A\t, *generated.shape[1:]\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[str] \t\t\t\t\t= next_tokens.permute(1\t, 0\t), scores.squeeze(0\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tif tokens is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= next_tokens\r\n\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tokens.expand(__A\t, *tokens.shape[1:]\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= torch.cat((tokens, next_tokens)\t, dim=1\t)\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= -float(np.inf\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 0\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= scores[:, None] + logits\r\n\t\t\t\t\t\t\t\t\t\t\t\tseq_lengths[~is_stopped] += 1\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= scores_sum / seq_lengths[:, None]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Tuple \t\t\t\t\t= scores_sum_average.view(-1\t).topk(__A\t, -1\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= next_tokens // scores_sum.shape[1]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= seq_lengths[next_tokens_source]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= next_tokens % scores_sum.shape[1]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= next_tokens.unsqueeze(1\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokens[next_tokens_source]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.cat((tokens, next_tokens)\t, dim=1\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= generated[next_tokens_source]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= scores_sum_average * seq_lengths\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= is_stopped[next_tokens_source]\r\n\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.transformer.transformer.wte(next_tokens.squeeze()\t).view(generated.shape[0]\t, 1\t, -1\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.cat((generated, next_token_embed)\t, dim=1\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= is_stopped + next_tokens.eq(__A\t).squeeze()\r\n\t\t\t\t\t\t\t\t\tif is_stopped.all():\r\n\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= scores / seq_lengths\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= scores.argsort(descending=__A\t)\r\n\t\t\t\t\t\t# tokens tensors are already padded to max_seq_length\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= [tokens[i] for i in order]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= torch.stack(__A\t, dim=0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.tensor([seq_lengths[i] for i in order]\t, dtype=seq_lengths.dtype\t)\r\n\t\t\t\t\t\treturn output_texts, seq_lengths\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":655,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport os\r\nfrom shutil import copyfile\r\nfrom typing import List, Optional, Tuple\r\n\r\nfrom tokenizers import processors\r\n\r\nfrom ...tokenization_utils import AddedToken, BatchEncoding\r\nfrom ...tokenization_utils_fast import PreTrainedTokenizerFast\r\nfrom ...utils import is_sentencepiece_available, logging\r\n\r\n\r\nif is_sentencepiece_available():\r\n\t\t\t\tfrom .tokenization_nllb import NllbTokenizer\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= None\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'vocab_file': {\r\n 'facebook/nllb-200-distilled-600M': (\r\n 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'\r\n ),\r\n },\r\n 'tokenizer_file': {\r\n 'facebook/nllb-200-distilled-600M': (\r\n 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'\r\n ),\r\n },\r\n}\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'facebook/nllb-large-en-ro': 10_24,\r\n 'facebook/nllb-200-distilled-600M': 10_24,\r\n}\r\n\r\n# fmt: off\r\n__UpperCAmelCase\t\t\t\t\t\t\t= ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Any \t\t\t=\t\t\t\t\t\tVOCAB_FILES_NAMES\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tPRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tPRETRAINED_VOCAB_FILES_MAP\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\t[\"input_ids\", \"attention_mask\"]\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\tNllbTokenizer\r\n\r\n\t\t\tUpperCAmelCase_ :List[int] \t\t\t=\t\t\t\t\t\t[]\r\n\t\t\tUpperCAmelCase_ :List[int] \t\t\t=\t\t\t\t\t\t[]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A=None\t, __A=None\t, __A=\"\"\t, __A=\"\"\t, __A=\"\"\t, __A=\"\"\t, __A=\"\"\t, __A=\"\"\t, __A=\"\"\t, __A=None\t, __A=None\t, __A=None\t, __A=False\t, **__A\t, ) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\t# Mask token behave like a normal word, i.e. include the space before it\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= AddedToken(__A\t, lstrip=__A\t, rstrip=__A\t) if isinstance(__A\t, __A\t) else mask_token\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= legacy_behaviour\r\n\t\t\t\t\t\tsuper().__init__(\r\n\t\t\t\t\t\t vocab_file=__A\t, tokenizer_file=__A\t, bos_token=__A\t, eos_token=__A\t, sep_token=__A\t, cls_token=__A\t, unk_token=__A\t, pad_token=__A\t, mask_token=__A\t, src_lang=__A\t, tgt_lang=__A\t, additional_special_tokens=__A\t, legacy_behaviour=__A\t, **__A\t, )\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= vocab_file\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= False if not self.vocab_file else True\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= FAIRSEQ_LANGUAGE_CODES.copy()\r\n\r\n\t\t\t\t\t\tif additional_special_tokens is not None:\r\n\t\t\t\t\t\t\t\t\t# Only add those special tokens if they are not already there.\r\n\t\t\t\t\t\t\t\t\t_additional_special_tokens.extend(\r\n\t\t\t\t\t\t\t\t\t [t for t in additional_special_tokens if t not in _additional_special_tokens]\t)\r\n\r\n\t\t\t\t\t\tself.add_special_tokens({\"\"\"additional_special_tokens\"\"\": _additional_special_tokens}\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {\r\n\t\t\t\t\t\t lang_code: self.convert_tokens_to_ids(__A\t) for lang_code in FAIRSEQ_LANGUAGE_CODES\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= src_lang if src_lang is not None else \"\"\"eng_Latn\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.convert_tokens_to_ids(self._src_lang\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tgt_lang\r\n\t\t\t\t\t\tself.set_src_lang_special_tokens(self._src_lang\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\treturn self._src_lang\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@src_lang.setter\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tNone:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= new_src_lang\r\n\t\t\t\t\t\tself.set_src_lang_special_tokens(self._src_lang\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A = None\t) ->\t\t\t\t\tList[int]:\r\n\t\t\t\t\t\tif token_ids_a is None:\r\n\t\t\t\t\t\t\t\t\treturn self.prefix_tokens + token_ids_a + self.suffix_tokens\r\n\t\t\t\t\t\t# We don't expect to process pairs, but leave the pair logic for API consistency\r\n\t\t\t\t\t\treturn self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A = None\t) ->\t\t\t\t\tList[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= [self.sep_token_id]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= [self.cls_token_id]\r\n\r\n\t\t\t\t\t\tif token_ids_a is None:\r\n\t\t\t\t\t\t\t\t\treturn len(cls + token_ids_a + sep\t) * [0]\r\n\t\t\t\t\t\treturn len(cls + token_ids_a + sep + sep + token_ids_a + sep\t) * [0]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t, __A\t, **__A\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tif src_lang is None or tgt_lang is None:\r\n\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"Translation requires a `src_lang` and a `tgt_lang` for this model\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= src_lang\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self(__A\t, add_special_tokens=__A\t, return_tensors=__A\t, **__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.convert_tokens_to_ids(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= tgt_lang_id\r\n\t\t\t\t\t\treturn inputs\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A = \"eng_Latn\"\t, __A = None\t, __A = \"fra_Latn\"\t, **__A\t, ) ->\t\t\t\t\tBatchEncoding:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= src_lang\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tgt_lang\r\n\t\t\t\t\t\treturn super().prepare_seqaseq_batch(__A\t, __A\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\treturn self.set_src_lang_special_tokens(self.src_lang\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\treturn self.set_tgt_lang_special_tokens(self.tgt_lang\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tNone:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.convert_tokens_to_ids(__A\t)\r\n\r\n\t\t\t\t\t\tif self.legacy_behaviour:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= []\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= [self.eos_token_id, self.cur_lang_code]\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [self.cur_lang_code]\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= [self.eos_token_id]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.convert_ids_to_tokens(self.prefix_tokens\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.convert_ids_to_tokens(self.suffix_tokens\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= processors.TemplateProcessing(\r\n\t\t\t\t\t\t single=prefix_tokens_str + [\"\"\"$A\"\"\"] + suffix_tokens_str\t, pair=prefix_tokens_str + [\"\"\"$A\"\"\", \"\"\"$B\"\"\"] + suffix_tokens_str\t, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str\t, self.prefix_tokens + self.suffix_tokens\t)\t)\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tNone:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.convert_tokens_to_ids(__A\t)\r\n\t\t\t\t\t\tif self.legacy_behaviour:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= []\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= [self.eos_token_id, self.cur_lang_code]\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= [self.cur_lang_code]\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= [self.eos_token_id]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.convert_ids_to_tokens(self.prefix_tokens\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.convert_ids_to_tokens(self.suffix_tokens\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= processors.TemplateProcessing(\r\n\t\t\t\t\t\t single=prefix_tokens_str + [\"\"\"$A\"\"\"] + suffix_tokens_str\t, pair=prefix_tokens_str + [\"\"\"$A\"\"\", \"\"\"$B\"\"\"] + suffix_tokens_str\t, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str\t, self.prefix_tokens + self.suffix_tokens\t)\t)\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A = None\t) ->\t\t\t\t\tTuple[str]:\r\n\t\t\t\t\t\tif not self.can_save_slow_tokenizer:\r\n\t\t\t\t\t\t\t\t\traise ValueError(\r\n\t\t\t\t\t\t\t\t\t \"\"\"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow \"\"\"\r\n\t\t\t\t\t\t\t\t\t \"\"\"tokenizer.\"\"\"\t)\r\n\r\n\t\t\t\t\t\tif not os.path.isdir(__A\t):\r\n\t\t\t\t\t\t\t\t\tlogger.error(f\"\"\"Vocabulary path ({save_directory}) should be a directory.\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\treturn\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= os.path.join(\r\n\t\t\t\t\t\t __A\t, (filename_prefix + \"\"\"-\"\"\" if filename_prefix else \"\"\"\"\"\") + VOCAB_FILES_NAMES[\"\"\"vocab_file\"\"\"]\t)\r\n\r\n\t\t\t\t\t\tif os.path.abspath(self.vocab_file\t) != os.path.abspath(__A\t):\r\n\t\t\t\t\t\t\t\t\tcopyfile(self.vocab_file\t, __A\t)\r\n\r\n\t\t\t\t\t\treturn (out_vocab_file,)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport copy\r\nfrom collections import OrderedDict\r\nfrom typing import Dict, Mapping\r\n\r\nfrom packaging import version\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...onnx import OnnxConfig\r\nfrom ...utils import logging\r\nfrom ..auto import CONFIG_MAPPING\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',\r\n # See all DETR models at https://huggingface.co/models?filter=detr\r\n}\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\t\"detr\"\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\t[\"past_key_values\"]\r\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\t{\r\n\t\t\t \"hidden_size\": \"d_model\",\r\n\t\t\t \"num_attention_heads\": \"encoder_attention_heads\",\r\n\t\t\t}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A=True\t, __A=None\t, __A=3\t, __A=100\t, __A=6\t, __A=2048\t, __A=8\t, __A=6\t, __A=2048\t, __A=8\t, __A=0.0\t, __A=0.0\t, __A=True\t, __A=\"relu\"\t, __A=256\t, __A=0.1\t, __A=0.0\t, __A=0.0\t, __A=0.0_2\t, __A=1.0\t, __A=False\t, __A=\"sine\"\t, __A=\"resnet50\"\t, __A=True\t, __A=False\t, __A=1\t, __A=5\t, __A=2\t, __A=1\t, __A=1\t, __A=5\t, __A=2\t, __A=0.1\t, **__A\t, ) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tif backbone_config is not None and use_timm_backbone:\r\n\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"You can't specify both `backbone_config` and `use_timm_backbone`.\"\"\"\t)\r\n\r\n\t\t\t\t\t\tif not use_timm_backbone:\r\n\t\t\t\t\t\t\t\t\tif backbone_config is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlogger.info(\"\"\"`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= CONFIG_MAPPING[\"\"\"resnet\"\"\"](out_features=[\"\"\"stage4\"\"\"]\t)\r\n\t\t\t\t\t\t\t\t\telif isinstance(__A\t, __A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= backbone_config.get(\"\"\"model_type\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= CONFIG_MAPPING[backbone_model_type]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= config_class.from_dict(__A\t)\r\n\t\t\t\t\t\t\t\t\t# set timm attributes to None\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= None, None, None\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= use_timm_backbone\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= backbone_config\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= num_channels\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= num_queries\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= d_model\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= encoder_ffn_dim\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= encoder_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= encoder_attention_heads\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= decoder_ffn_dim\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= decoder_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= decoder_attention_heads\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= dropout\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= attention_dropout\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= activation_dropout\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= activation_function\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= init_std\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= init_xavier_std\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= encoder_layerdrop\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= decoder_layerdrop\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= encoder_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= auxiliary_loss\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= position_embedding_type\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= backbone\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= use_pretrained_backbone\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= dilation\r\n\t\t\t\t\t\t# Hungarian matcher\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= class_cost\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= bbox_cost\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= giou_cost\r\n\t\t\t\t\t\t# Loss coefficients\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= mask_loss_coefficient\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= dice_loss_coefficient\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= bbox_loss_coefficient\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= giou_loss_coefficient\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= eos_coefficient\r\n\t\t\t\t\t\tsuper().__init__(is_encoder_decoder=__A\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn self.encoder_attention_heads\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn self.d_model\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@classmethod\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( cls\t, __A\t, **__A\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\treturn cls(backbone_config=__A\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict[str, any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= copy.deepcopy(self.__dict__\t)\r\n\t\t\t\t\t\tif output[\"backbone_config\"] is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.backbone_config.to_dict()\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.__class__.model_type\r\n\t\t\t\t\t\treturn output\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[Any] \t\t\t=\t\t\t\t\t\tversion.parse(\"1.11\"\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tMapping[str, Mapping[int, str]]:\r\n\t\t\t\t\t\treturn OrderedDict(\r\n\t\t\t\t\t\t [\r\n\t\t\t\t\t\t (\"\"\"pixel_values\"\"\", {0: \"\"\"batch\"\"\", 1: \"\"\"num_channels\"\"\", 2: \"\"\"height\"\"\", 3: \"\"\"width\"\"\"}),\r\n\t\t\t\t\t\t (\"\"\"pixel_mask\"\"\", {0: \"\"\"batch\"\"\"}),\r\n\t\t\t\t\t\t ]\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tfloat:\r\n\t\t\t\t\t\treturn 1E-5\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn 12\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":656,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels\r\nfrom .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features\r\nfrom .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor\r\nfrom .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import (\r\n OptionalDependencyNotAvailable,\r\n _LazyModule,\r\n is_tf_available,\r\n is_torch_available,\r\n is_vision_available,\r\n)\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}\r\n\r\ntry:\r\n\t\t\t\tif not is_vision_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= ['DeiTFeatureExtractor']\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= ['DeiTImageProcessor']\r\n\r\ntry:\r\n\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\r\n\t\t\t\t 'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',\r\n\t\t\t\t 'DeiTForImageClassification',\r\n\t\t\t\t 'DeiTForImageClassificationWithTeacher',\r\n\t\t\t\t 'DeiTForMaskedImageModeling',\r\n\t\t\t\t 'DeiTModel',\r\n\t\t\t\t 'DeiTPreTrainedModel',\r\n\t\t\t\t]\r\n\r\ntry:\r\n\t\t\t\tif not is_tf_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\r\n\t\t\t\t 'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',\r\n\t\t\t\t 'TFDeiTForImageClassification',\r\n\t\t\t\t 'TFDeiTForImageClassificationWithTeacher',\r\n\t\t\t\t 'TFDeiTForMaskedImageModeling',\r\n\t\t\t\t 'TFDeiTModel',\r\n\t\t\t\t 'TFDeiTPreTrainedModel',\r\n\t\t\t\t]\r\n\r\n\r\nif TYPE_CHECKING:\r\n\t\t\t\tfrom .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_vision_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .feature_extraction_deit import DeiTFeatureExtractor\r\n\t\t\t\t\t\t\t\tfrom .image_processing_deit import DeiTImageProcessor\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .modeling_deit import (\r\n\t\t\t\t\t\t\t\t DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t DeiTForImageClassification,\r\n\t\t\t\t\t\t\t\t DeiTForImageClassificationWithTeacher,\r\n\t\t\t\t\t\t\t\t DeiTForMaskedImageModeling,\r\n\t\t\t\t\t\t\t\t DeiTModel,\r\n\t\t\t\t\t\t\t\t DeiTPreTrainedModel,\r\n\t\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_tf_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .modeling_tf_deit import (\r\n\t\t\t\t\t\t\t\t TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t TFDeiTForImageClassification,\r\n\t\t\t\t\t\t\t\t TFDeiTForImageClassificationWithTeacher,\r\n\t\t\t\t\t\t\t\t TFDeiTForMaskedImageModeling,\r\n\t\t\t\t\t\t\t\t TFDeiTModel,\r\n\t\t\t\t\t\t\t\t TFDeiTPreTrainedModel,\r\n\t\t\t\t\t\t\t\t)\r\n\r\n\r\nelse:\r\n\t\t\t\timport sys\r\n\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":657,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'configuration_squeezebert': [\r\n 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',\r\n 'SqueezeBertConfig',\r\n 'SqueezeBertOnnxConfig',\r\n ],\r\n 'tokenization_squeezebert': ['SqueezeBertTokenizer'],\r\n}\r\n\r\ntry:\r\n\t\t\t\tif not is_tokenizers_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= ['SqueezeBertTokenizerFast']\r\n\r\ntry:\r\n\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\r\n\t\t\t\t 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',\r\n\t\t\t\t 'SqueezeBertForMaskedLM',\r\n\t\t\t\t 'SqueezeBertForMultipleChoice',\r\n\t\t\t\t 'SqueezeBertForQuestionAnswering',\r\n\t\t\t\t 'SqueezeBertForSequenceClassification',\r\n\t\t\t\t 'SqueezeBertForTokenClassification',\r\n\t\t\t\t 'SqueezeBertModel',\r\n\t\t\t\t 'SqueezeBertModule',\r\n\t\t\t\t 'SqueezeBertPreTrainedModel',\r\n\t\t\t\t]\r\n\r\n\r\nif TYPE_CHECKING:\r\n\t\t\t\tfrom .configuration_squeezebert import (\r\n\t\t\t\t SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,\r\n\t\t\t\t SqueezeBertConfig,\r\n\t\t\t\t SqueezeBertOnnxConfig,\r\n\t\t\t\t)\r\n\t\t\t\tfrom .tokenization_squeezebert import SqueezeBertTokenizer\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_tokenizers_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .tokenization_squeezebert_fast import SqueezeBertTokenizerFast\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .modeling_squeezebert import (\r\n\t\t\t\t\t\t\t\t SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t SqueezeBertForMaskedLM,\r\n\t\t\t\t\t\t\t\t SqueezeBertForMultipleChoice,\r\n\t\t\t\t\t\t\t\t SqueezeBertForQuestionAnswering,\r\n\t\t\t\t\t\t\t\t SqueezeBertForSequenceClassification,\r\n\t\t\t\t\t\t\t\t SqueezeBertForTokenClassification,\r\n\t\t\t\t\t\t\t\t SqueezeBertModel,\r\n\t\t\t\t\t\t\t\t SqueezeBertModule,\r\n\t\t\t\t\t\t\t\t SqueezeBertPreTrainedModel,\r\n\t\t\t\t\t\t\t\t)\r\n\r\nelse:\r\n\t\t\t\timport sys\r\n\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'configuration_squeezebert': [\r\n 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',\r\n 'SqueezeBertConfig',\r\n 'SqueezeBertOnnxConfig',\r\n ],\r\n 'tokenization_squeezebert': ['SqueezeBertTokenizer'],\r\n}\r\n\r\ntry:\r\n\t\t\t\tif not is_tokenizers_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= ['SqueezeBertTokenizerFast']\r\n\r\ntry:\r\n\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\r\n\t\t\t\t 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',\r\n\t\t\t\t 'SqueezeBertForMaskedLM',\r\n\t\t\t\t 'SqueezeBertForMultipleChoice',\r\n\t\t\t\t 'SqueezeBertForQuestionAnswering',\r\n\t\t\t\t 'SqueezeBertForSequenceClassification',\r\n\t\t\t\t 'SqueezeBertForTokenClassification',\r\n\t\t\t\t 'SqueezeBertModel',\r\n\t\t\t\t 'SqueezeBertModule',\r\n\t\t\t\t 'SqueezeBertPreTrainedModel',\r\n\t\t\t\t]\r\n\r\n\r\nif TYPE_CHECKING:\r\n\t\t\t\tfrom .configuration_squeezebert import (\r\n\t\t\t\t SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,\r\n\t\t\t\t SqueezeBertConfig,\r\n\t\t\t\t SqueezeBertOnnxConfig,\r\n\t\t\t\t)\r\n\t\t\t\tfrom .tokenization_squeezebert import SqueezeBertTokenizer\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_tokenizers_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .tokenization_squeezebert_fast import SqueezeBertTokenizerFast\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .modeling_squeezebert import (\r\n\t\t\t\t\t\t\t\t SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t SqueezeBertForMaskedLM,\r\n\t\t\t\t\t\t\t\t SqueezeBertForMultipleChoice,\r\n\t\t\t\t\t\t\t\t SqueezeBertForQuestionAnswering,\r\n\t\t\t\t\t\t\t\t SqueezeBertForSequenceClassification,\r\n\t\t\t\t\t\t\t\t SqueezeBertForTokenClassification,\r\n\t\t\t\t\t\t\t\t SqueezeBertModel,\r\n\t\t\t\t\t\t\t\t SqueezeBertModule,\r\n\t\t\t\t\t\t\t\t SqueezeBertPreTrainedModel,\r\n\t\t\t\t\t\t\t\t)\r\n\r\nelse:\r\n\t\t\t\timport sys\r\n\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":658,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport copy\r\nimport tempfile\r\nimport unittest\r\n\r\nfrom transformers import MaMaaaConfig, is_torch_available\r\nfrom transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device\r\nfrom transformers.utils import cached_property\r\n\r\nfrom ...generation.test_utils import GenerationTesterMixin\r\nfrom ...test_configuration_common import ConfigTester\r\nfrom ...test_modeling_common import ModelTesterMixin, ids_tensor\r\nfrom ...test_pipeline_mixin import PipelineTesterMixin\r\n\r\n\r\nif is_torch_available():\r\n\t\t\t\timport torch\r\n\r\n\t\t\t\tfrom transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer\r\n\t\t\t\tfrom transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Optional[int]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: str\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Optional[Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[str]=None\t\t\t\t,\t\t\t\t\tlowercase__\t\t: str=None\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[Any]=None\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Dict=None\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[Any]=None\t\t\t\t,\t\t\t\t\t) -> List[Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif attention_mask is None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= input_ids.ne(config.pad_token_id\t\t)\r\n\t\t\tif decoder_attention_mask is None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= decoder_input_ids.ne(config.pad_token_id\t\t)\r\n\t\t\tif head_mask is None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= torch.ones(config.encoder_layers\t\t\t\t,\t\t\t\t\tconfig.encoder_attention_heads\t\t\t\t,\t\t\t\t\tdevice=lowercase__\t\t)\r\n\t\t\tif decoder_head_mask is None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= torch.ones(config.decoder_layers\t\t\t\t,\t\t\t\t\tconfig.decoder_attention_heads\t\t\t\t,\t\t\t\t\tdevice=lowercase__\t\t)\r\n\t\t\tif cross_attn_head_mask is None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= torch.ones(config.decoder_layers\t\t\t\t,\t\t\t\t\tconfig.decoder_attention_heads\t\t\t\t,\t\t\t\t\tdevice=lowercase__\t\t)\r\n\t\t\treturn {\r\n\t\t\t \"input_ids\": input_ids,\r\n\t\t\t \"decoder_input_ids\": decoder_input_ids,\r\n\t\t\t \"attention_mask\": attention_mask,\r\n\t\t\t \"decoder_attention_mask\": attention_mask,\r\n\t\t\t \"head_mask\": head_mask,\r\n\t\t\t \"decoder_head_mask\": decoder_head_mask,\r\n\t\t\t \"cross_attn_head_mask\": cross_attn_head_mask,\r\n\t\t\t}\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A=13\t, __A=7\t, __A=True\t, __A=False\t, __A=99\t, __A=16\t, __A=2\t, __A=4\t, __A=4\t, __A=\"relu\"\t, __A=0.1\t, __A=0.1\t, __A=0.0\t, __A=0.0\t, __A=20\t, __A=2\t, __A=1\t, __A=0\t, ) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= parent\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= batch_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= seq_length\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= is_training\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= use_labels\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= vocab_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= hidden_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= num_hidden_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= num_attention_heads\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= intermediate_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= hidden_act\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= hidden_dropout_prob\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= attention_probs_dropout_prob\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= encoder_layerdrop\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= decoder_layerdrop\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= max_position_embeddings\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= eos_token_id\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= pad_token_id\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= bos_token_id\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= ids_tensor([self.batch_size, self.seq_length]\t, self.vocab_size\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.eos_token_id # Eos Token\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= ids_tensor([self.batch_size, self.seq_length]\t, self.vocab_size\t)\r\n\r\n\t\t\t\t\t\t# we need to clamp the input ids here to avoid having pad token in between\r\n\t\t\t\t\t\t# this is because for M2M100 the position_ids are prepared such that\r\n\t\t\t\t\t\t# all pad tokens have pos id = 2 and rest are between 2..seq_length\r\n\t\t\t\t\t\t# and the seq_length here is seq_length - num_pad_tokens\r\n\t\t\t\t\t\t# but when using past, there is no way of knowing if the past input ids had\r\n\t\t\t\t\t\t# pad tokens in them, which results in incorrect seq_lenth and which in turn results in\r\n\t\t\t\t\t\t# position_ids being off by num_pad_tokens in past input\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= input_ids.clamp(self.pad_token_id + 1\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= decoder_input_ids.clamp(self.pad_token_id + 1\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.get_config()\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= prepare_mam_aaa_inputs_dict(__A\t, __A\t, __A\t)\r\n\t\t\t\t\t\treturn config, inputs_dict\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn MaMaaaConfig(\r\n\t\t\t\t\t\t vocab_size=self.vocab_size\t, d_model=self.hidden_size\t, encoder_layers=self.num_hidden_layers\t, decoder_layers=self.num_hidden_layers\t, encoder_attention_heads=self.num_attention_heads\t, decoder_attention_heads=self.num_attention_heads\t, encoder_ffn_dim=self.intermediate_size\t, decoder_ffn_dim=self.intermediate_size\t, dropout=self.hidden_dropout_prob\t, attention_dropout=self.attention_probs_dropout_prob\t, encoder_layerdrop=self.encoder_layerdrop\t, decoder_layerdrop=self.decoder_layerdrop\t, max_position_embeddings=self.max_position_embeddings\t, eos_token_id=self.eos_token_id\t, bos_token_id=self.bos_token_id\t, pad_token_id=self.pad_token_id\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.prepare_config_and_inputs()\r\n\t\t\t\t\t\treturn config, inputs_dict\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= MaMaaaModel(config=__A\t).get_decoder().to(__A\t).eval()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= inputs_dict[\"\"\"input_ids\"\"\"]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= inputs_dict[\"\"\"attention_mask\"\"\"]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= inputs_dict[\"\"\"head_mask\"\"\"]\r\n\r\n\t\t\t\t\t\t# first forward pass\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= model(__A\t, attention_mask=__A\t, head_mask=__A\t, use_cache=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= outputs.to_tuple()\r\n\r\n\t\t\t\t\t\t# create hypothetical multiple next token and extent to next_input_ids\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= ids_tensor((self.batch_size, 3)\t, config.vocab_size\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= ids_tensor((self.batch_size, 3)\t, 2\t)\r\n\r\n\t\t\t\t\t\t# append to next input_ids and\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= torch.cat([input_ids, next_tokens]\t, dim=-1\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= torch.cat([attention_mask, next_attn_mask]\t, dim=-1\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= model(__A\t, attention_mask=__A\t)[\"\"\"last_hidden_state\"\"\"]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= model(__A\t, attention_mask=__A\t, past_key_values=__A\t)[\r\n\t\t\t\t\t\t \"\"\"last_hidden_state\"\"\"\r\n\t\t\t\t\t\t]\r\n\r\n\t\t\t\t\t\t# select random slice\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= ids_tensor((1,)\t, output_from_past.shape[-1]\t).item()\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= output_from_no_past[:, -3:, random_slice_idx].detach()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= output_from_past[:, :, random_slice_idx].detach()\r\n\r\n\t\t\t\t\t\tself.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]\t)\r\n\r\n\t\t\t\t\t\t# test that outputs are equal for slice\r\n\t\t\t\t\t\tself.parent.assertTrue(torch.allclose(__A\t, __A\t, atol=1E-2\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= MaMaaaModel(config=__A\t).to(__A\t).eval()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= model(**__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= outputs.encoder_last_hidden_state\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= outputs.last_hidden_state\r\n\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdirname:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= model.get_encoder()\r\n\t\t\t\t\t\t\t\t\tencoder.save_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= MaMaaaEncoder.from_pretrained(__A\t).to(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= encoder(inputs_dict[\"\"\"input_ids\"\"\"]\t, attention_mask=inputs_dict[\"\"\"attention_mask\"\"\"]\t)[\r\n\t\t\t\t\t\t 0\r\n\t\t\t\t\t\t]\r\n\r\n\t\t\t\t\t\tself.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3\t)\r\n\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdirname:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= model.get_decoder()\r\n\t\t\t\t\t\t\t\t\tdecoder.save_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= MaMaaaDecoder.from_pretrained(__A\t).to(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= decoder(\r\n\t\t\t\t\t\t input_ids=inputs_dict[\"\"\"decoder_input_ids\"\"\"]\t, attention_mask=inputs_dict[\"\"\"decoder_attention_mask\"\"\"]\t, encoder_hidden_states=__A\t, encoder_attention_mask=inputs_dict[\"\"\"attention_mask\"\"\"]\t, )[0]\r\n\r\n\t\t\t\t\t\tself.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3\t)\r\n\r\n\r\n\r\n\r\n@require_torch\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tA__ ,\t\t\tA__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Union[str, Any] \t\t\t=\t\t\t\t\t\t(\r\n\t\t\t (\r\n\t\t\t MaMaaaModel,\r\n\t\t\t MaMaaaForConditionalGeneration,\r\n\t\t\t )\r\n\t\t\t if is_torch_available()\r\n\t\t\t else ()\r\n\t\t\t)\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\t(MaMaaaForConditionalGeneration,) if is_torch_available() else ()\r\n\t\t\tUpperCAmelCase_ :Dict \t\t\t=\t\t\t\t\t\t(\r\n\t\t\t {\r\n\t\t\t \"conversational\": MaMaaaForConditionalGeneration,\r\n\t\t\t \"feature-extraction\": MaMaaaModel,\r\n\t\t\t \"summarization\": MaMaaaForConditionalGeneration,\r\n\t\t\t \"text2text-generation\": MaMaaaForConditionalGeneration,\r\n\t\t\t \"translation\": MaMaaaForConditionalGeneration,\r\n\t\t\t }\r\n\t\t\t if is_torch_available()\r\n\t\t\t else {}\r\n\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[Any] \t\t\t=\t\t\t\t\t\tTrue\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tTrue\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\tFalse\r\n\t\t\tUpperCAmelCase_ :Optional[Any] \t\t\t=\t\t\t\t\t\tFalse\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t, __A\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tif pipeline_test_casse_name == \"TranslationPipelineTests\":\r\n\t\t\t\t\t\t\t\t\t# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.\r\n\t\t\t\t\t\t\t\t\t# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.\r\n\t\t\t\t\t\t\t\t\treturn True\r\n\r\n\t\t\t\t\t\treturn False\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= MaMaaaModelTester(self\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= ConfigTester(self\t, config_class=__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tself.config_tester.run_common_tests()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :str \t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\tfor model_class in self.all_model_classes:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= model_class(__A\t)\r\n\r\n\t\t\t\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdirname:\r\n\t\t\t\t\t\t\t\t\t\t\t\tmodel.save_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= model_class.from_pretrained(__A\t, output_loading_info=__A\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(info[\"\"\"missing_keys\"\"\"]\t, []\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\tself.model_tester.create_and_check_decoder_model_past_large_inputs(*__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.model_tester.prepare_config_and_inputs_for_common()\r\n\t\t\t\t\t\tself.model_tester.check_encoder_decoder_model_standalone(*__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.model_tester.prepare_config_and_inputs_for_common()\r\n\r\n\t\t\t\t\t\tfor model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= model_class(__A\t)\r\n\t\t\t\t\t\t\t\t\tmodel.to(__A\t)\r\n\t\t\t\t\t\t\t\t\tmodel.eval()\r\n\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= copy.deepcopy(self._prepare_for_class(__A\t, __A\t)\t)\r\n\r\n\t\t\t\t\t\t\t\t\tif not self.is_encoder_decoder:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= inputs[\"\"\"input_ids\"\"\"]\r\n\t\t\t\t\t\t\t\t\t\t\t\tdel inputs[\"input_ids\"]\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= inputs[\"\"\"input_ids\"\"\"]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= inputs.get(\"\"\"decoder_input_ids\"\"\"\t, __A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tdel inputs[\"input_ids\"]\r\n\t\t\t\t\t\t\t\t\t\t\t\tinputs.pop(\"\"\"decoder_input_ids\"\"\"\t, __A\t)\r\n\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= model.get_input_embeddings()\r\n\t\t\t\t\t\t\t\t\tif not self.is_encoder_decoder:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= wte(__A\t)\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= wte(__A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= wte(__A\t)\r\n\r\n\t\t\t\t\t\t\t\t\twith torch.no_grad():\r\n\t\t\t\t\t\t\t\t\t\t\t\tmodel(**__A\t)[0]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= input_dict[\"\"\"input_ids\"\"\"]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= input_ids.ne(1\t).to(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= MaMaaaForConditionalGeneration(__A\t).eval().to(__A\t)\r\n\t\t\t\t\t\tif torch_device == \"cuda\":\r\n\t\t\t\t\t\t\t\t\tmodel.half()\r\n\t\t\t\t\t\tmodel.generate(__A\t, attention_mask=__A\t)\r\n\t\t\t\t\t\tmodel.generate(num_beams=4\t, do_sample=__A\t, early_stopping=__A\t, num_return_sequences=3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int\t\t) -> List[str]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\treturn torch.tensor(lowercase__\t\t\t\t,\t\t\t\t\tdtype=torch.long\t\t\t\t,\t\t\t\t\tdevice=lowercase__\t\t)\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 1e-4\r\n\r\n\r\n\r\n\r\n@require_torch\r\n@require_sentencepiece\r\n@require_tokenizers\r\n@slow\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@cached_property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\treturn MaMaaaTokenizer.from_pretrained(\"\"\"facebook/m2m100_418M\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= MaMaaaModel.from_pretrained(\"\"\"facebook/m2m100_418M\"\"\"\t).to(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= prepare_mam_aaa_inputs_dict(model.config\t, __A\t, __A\t)\r\n\t\t\t\t\t\twith torch.no_grad():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= model(**__A\t)[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= torch.Size((1, 11, 1024)\t)\r\n\t\t\t\t\t\tself.assertEqual(output.shape\t, __A\t)\r\n\t\t\t\t\t\t# change to expected output here\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.tensor(\r\n\t\t\t\t\t\t [[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]]\t, device=__A\t)\r\n\t\t\t\t\t\tself.assertTrue(torch.allclose(output[:, :3, :3]\t, __A\t, atol=__A\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= MaMaaaForConditionalGeneration.from_pretrained(\"\"\"facebook/m2m100_418M\"\"\"\t).to(__A\t)\r\n\r\n\t\t\t\t\t\t# change to intended input\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= prepare_mam_aaa_inputs_dict(model.config\t, __A\t, __A\t)\r\n\t\t\t\t\t\twith torch.no_grad():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= model(**__A\t)[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= torch.Size((1, 11, model.config.vocab_size)\t)\r\n\t\t\t\t\t\tself.assertEqual(output.shape\t, __A\t)\r\n\t\t\t\t\t\t# change to expected output here\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= torch.tensor(\r\n\t\t\t\t\t\t [[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]]\t, device=__A\t)\r\n\t\t\t\t\t\tself.assertTrue(torch.allclose(output[:, :3, :3]\t, __A\t, atol=__A\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= MaMaaaForConditionalGeneration.from_pretrained(\"\"\"facebook/m2m100_418M\"\"\"\t).to(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= MaMaaaTokenizer.from_pretrained(\"\"\"facebook/m2m100_418M\"\"\"\t, src_lang=\"\"\"fr\"\"\"\t, tgt_lang=\"\"\"en\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= [\r\n\t\t\t\t\t\t \"\"\"L'affaire NSA souligne l'absence totale de dรฉbat sur le renseignement\"\"\",\r\n\t\t\t\t\t\t \"\"\"Selon moi, il y a deux niveaux de rรฉponse de la part du gouvernement franรงais.\"\"\",\r\n\t\t\t\t\t\t \"\"\"Lorsque Franรงois Hollande tรฉlรฉphone ร  Barack Obama ou quand le ministre des affaires รฉtrangรจres Laurent\"\"\"\r\n\t\t\t\t\t\t \"\"\" Fabius convoque l'ambassadeur des Etats-Unis, ils rรฉagissent ร  une vraie dรฉcouverte, qui est celle de\"\"\"\r\n\t\t\t\t\t\t \"\"\" l'ampleur de la surveillance amรฉricaine sur l'ensemble des communications en France.\"\"\",\r\n\t\t\t\t\t\t]\r\n\r\n\t\t\t\t\t\t# The below article tests that we don't add any hypotheses outside of the top n_beams\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer(__A\t, padding=__A\t, return_tensors=\"\"\"pt\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= model.generate(\r\n\t\t\t\t\t\t input_ids=dct[\"\"\"input_ids\"\"\"].to(__A\t)\t, attention_mask=dct[\"\"\"attention_mask\"\"\"].to(__A\t)\t, num_beams=5\t, forced_bos_token_id=tokenizer.get_lang_id(\"\"\"en\"\"\"\t)\t, )\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= [\r\n\t\t\t\t\t\t \"\"\"The NSA case highlights the total absence of intelligence debate\"\"\",\r\n\t\t\t\t\t\t \"\"\"I think there are two levels of response from the French government.\"\"\",\r\n\t\t\t\t\t\t \"\"\"When Franรงois Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.\"\"\"\r\n\t\t\t\t\t\t \"\"\" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all\"\"\"\r\n\t\t\t\t\t\t \"\"\" communications in France.\"\"\",\r\n\t\t\t\t\t\t]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer.batch_decode(\r\n\t\t\t\t\t\t hypotheses_batch.tolist()\t, clean_up_tokenization_spaces=__A\t, skip_special_tokens=__A\t)\r\n\t\t\t\t\t\tassert generated == expected_en\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 2_56\r\n# Modulus to hash a string\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 1_00_00_03\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str\t\t\t\t,\t\t\t\t\tlowercase__\t\t: str\t\t) -> bool:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= len(lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= len(lowercase__\t\t)\r\n\t\t\tif p_len > t_len:\r\n\t\t\t\t\t\treturn False\r\n\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= 1\r\n\r\n\t\t\t# Calculating the hash of pattern and substring of text\r\n\t\t\tfor i in range(lowercase__\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= (ord(pattern[i]\t\t) + p_hash * alphabet_size) % modulus\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= (ord(text[i]\t\t) + text_hash * alphabet_size) % modulus\r\n\t\t\t\t\t\tif i == p_len - 1:\r\n\t\t\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= (modulus_power * alphabet_size) % modulus\r\n\r\n\t\t\tfor i in range(0\t\t\t\t,\t\t\t\t\tt_len - p_len + 1\t\t):\r\n\t\t\t\t\t\tif text_hash == p_hash and text[i : i + p_len] == pattern:\r\n\t\t\t\t\t\t\t\t\treturn True\r\n\t\t\t\t\t\tif i == t_len - p_len:\r\n\t\t\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\t# Calculate the https://en.wikipedia.org/wiki/Rolling_hash\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= (\r\n\t\t\t\t\t\t (text_hash - ord(text[i]\t\t) * modulus_power) * alphabet_size\r\n\t\t\t\t\t\t + ord(text[i + p_len]\t\t)\r\n\t\t\t\t\t\t) % modulus\r\n\t\t\treturn False\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> None:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"abc1abc12\"\"\"\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= \"\"\"alskfjaldsabc1abc1abc12k23adsfabcabc\"\"\"\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"alskfjaldsk23adsfabcabc\"\"\"\r\n\t\t\tassert rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t) and not rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Test 2)\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= \"\"\"ABABX\"\"\"\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"ABABZABABYABABX\"\"\"\r\n\t\t\tassert rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Test 3)\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= \"\"\"AAAB\"\"\"\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= \"\"\"ABAAAAAB\"\"\"\r\n\t\t\tassert rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Test 4)\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= \"\"\"abcdabcy\"\"\"\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= \"\"\"abcxabcdabxabcdabcdabcy\"\"\"\r\n\t\t\tassert rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Test 5)\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"Lรผ\"\"\"\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"Lรผsai\"\"\"\r\n\t\t\tassert rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"Lue\"\"\"\r\n\t\t\tassert not rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\tprint(\"\"\"Success.\"\"\"\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\ttest_rabin_karp()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":659,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport unittest\r\n\r\nimport numpy as np\r\nimport torch\r\n\r\nfrom diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel\r\nfrom diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device\r\n\r\n\r\nenable_full_determinism()\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= UNetaDModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, sample_size=32\t, in_channels=3\t, out_channels=3\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"AttnDownBlock2D\"\"\")\t, up_block_types=(\"\"\"AttnUpBlock2D\"\"\", \"\"\"UpBlock2D\"\"\")\t, )\r\n\t\t\t\t\t\treturn model\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.dummy_uncond_unet\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= ScoreSdeVeScheduler()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= ScoreSdeVePipeline(unet=__A\t, scheduler=__A\t)\r\n\t\t\t\t\t\tsde_ve.to(__A\t)\r\n\t\t\t\t\t\tsde_ve.set_progress_bar_config(disable=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= sde_ve(num_inference_steps=2\t, output_type=\"\"\"numpy\"\"\"\t, generator=__A\t).images\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= sde_ve(num_inference_steps=2\t, output_type=\"\"\"numpy\"\"\"\t, generator=__A\t, return_dict=__A\t)[\r\n\t\t\t\t\t\t 0\r\n\t\t\t\t\t\t]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= image[0, -3:, -3:, -1]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= image_from_tuple[0, -3:, -3:, -1]\r\n\r\n\t\t\t\t\t\tassert image.shape == (1, 32, 32, 3)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]\t)\r\n\r\n\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t).max() < 1E-2\r\n\t\t\t\t\t\tassert np.abs(image_from_tuple_slice.flatten() - expected_slice\t).max() < 1E-2\r\n\r\n\r\n\r\n\r\n@slow\r\n@require_torch\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= \"\"\"google/ncsnpp-church-256\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= UNetaDModel.from_pretrained(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= ScoreSdeVeScheduler.from_pretrained(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= ScoreSdeVePipeline(unet=__A\t, scheduler=__A\t)\r\n\t\t\t\t\t\tsde_ve.to(__A\t)\r\n\t\t\t\t\t\tsde_ve.set_progress_bar_config(disable=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= torch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= sde_ve(num_inference_steps=10\t, output_type=\"\"\"numpy\"\"\"\t, generator=__A\t).images\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= image[0, -3:, -3:, -1]\r\n\r\n\t\t\t\t\t\tassert image.shape == (1, 256, 256, 3)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0]\t)\r\n\r\n\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t).max() < 1E-2\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":1,"string":"1"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport argparse\r\nimport os\r\n\r\nimport evaluate\r\nimport torch\r\nfrom datasets import load_dataset\r\nfrom torch.optim import AdamW\r\nfrom torch.utils.data import DataLoader\r\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\r\n\r\nfrom accelerate import Accelerator, DistributedType\r\nfrom accelerate.local_sgd import LocalSGD\r\n\r\n\r\n########################################################################\r\n# This is a fully working simple example to use Accelerate\r\n# with LocalSGD, which is a method to synchronize model\r\n# parameters every K batches. It is different, but complementary\r\n# to gradient accumulation.\r\n#\r\n# This example trains a Bert base model on GLUE MRPC\r\n# in any of the following settings (with the same script):\r\n# - single CPU or single GPU\r\n# - multi GPUS (using PyTorch distributed mode)\r\n# - (multi) TPUs\r\n# - fp16 (mixed-precision) or fp32 (normal precision)\r\n#\r\n# To run it in each of these various modes, follow the instructions\r\n# in the readme for examples:\r\n# https://github.com/huggingface/accelerate/tree/main/examples\r\n#\r\n########################################################################\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 16\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 32\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Accelerator\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int = 1_6\t\t) -> str:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoTokenizer.from_pretrained(\"\"\"bert-base-cased\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= load_dataset(\"\"\"glue\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"mrpc\"\"\"\t\t)\r\n\r\n\t\t\tdef tokenize_function(lowercase__\t\t: int\t\t):\r\n\t\t\t\t\t\t# max_length=None => use the model max length (it's actually the default)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= tokenizer(examples[\"\"\"sentence1\"\"\"]\t\t\t\t,\t\t\t\t\texamples[\"\"\"sentence2\"\"\"]\t\t\t\t,\t\t\t\t\ttruncation=lowercase__\t\t\t\t,\t\t\t\t\tmax_length=lowercase__\t\t)\r\n\t\t\t\t\t\treturn outputs\r\n\r\n\t\t\t# Apply the method we just defined to all the examples in all the splits of the dataset\r\n\t\t\t# starting with the main process first:\r\n\t\t\twith accelerator.main_process_first():\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= datasets.map(\r\n\t\t\t\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tbatched=lowercase__\t\t\t\t,\t\t\t\t\tremove_columns=[\"\"\"idx\"\"\", \"\"\"sentence1\"\"\", \"\"\"sentence2\"\"\"]\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\r\n\t\t\t# transformers library\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokenized_datasets.rename_column(\"\"\"label\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"labels\"\"\"\t\t)\r\n\r\n\t\t\tdef collate_fn(lowercase__\t\t: Dict\t\t):\r\n\t\t\t\t\t\t# On TPU it's best to pad everything to the same length or training will be very slow.\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None\r\n\t\t\t\t\t\t# When using mixed precision we want round multiples of 8/16\r\n\t\t\t\t\t\tif accelerator.mixed_precision == \"fp8\":\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 1_6\r\n\t\t\t\t\t\telif accelerator.mixed_precision != \"no\":\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 8\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= None\r\n\r\n\t\t\t\t\t\treturn tokenizer.pad(\r\n\t\t\t\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tpadding=\"\"\"longest\"\"\"\t\t\t\t,\t\t\t\t\tmax_length=lowercase__\t\t\t\t,\t\t\t\t\tpad_to_multiple_of=lowercase__\t\t\t\t,\t\t\t\t\treturn_tensors=\"\"\"pt\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t# Instantiate dataloaders.\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= DataLoader(\r\n\t\t\t tokenized_datasets[\"\"\"train\"\"\"]\t\t\t\t,\t\t\t\t\tshuffle=lowercase__\t\t\t\t,\t\t\t\t\tcollate_fn=lowercase__\t\t\t\t,\t\t\t\t\tbatch_size=lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= DataLoader(\r\n\t\t\t tokenized_datasets[\"\"\"validation\"\"\"]\t\t\t\t,\t\t\t\t\tshuffle=lowercase__\t\t\t\t,\t\t\t\t\tcollate_fn=lowercase__\t\t\t\t,\t\t\t\t\tbatch_size=lowercase__\t\t)\r\n\r\n\t\t\treturn train_dataloader, eval_dataloader\r\n\r\n\r\n# For testing only\r\nif os.environ.get('TESTING_MOCKED_DATALOADERS', None) == \"1\":\r\n\t\t\t\tfrom accelerate.test_utils.training import mocked_dataloaders\r\n\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= mocked_dataloaders # noqa: F811\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: List[Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Optional[int]\t\t) -> Optional[Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif os.environ.get(\"\"\"TESTING_MOCKED_DATALOADERS\"\"\"\t\t\t\t,\t\t\t\t\tlowercase__\t\t) == \"1\":\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= 2\r\n\t\t\t# New Code #\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= int(args.gradient_accumulation_steps\t\t)\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= int(args.local_sgd_steps\t\t)\r\n\t\t\t# Initialize accelerator\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= Accelerator(\r\n\t\t\t cpu=args.cpu\t\t\t\t,\t\t\t\t\tmixed_precision=args.mixed_precision\t\t\t\t,\t\t\t\t\tgradient_accumulation_steps=lowercase__\t\t)\r\n\t\t\tif accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:\r\n\t\t\t\t\t\traise NotImplementedError(\"\"\"LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)\"\"\"\t\t)\r\n\t\t\t# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= config[\"\"\"lr\"\"\"]\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= int(config[\"\"\"num_epochs\"\"\"]\t\t)\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= int(config[\"\"\"seed\"\"\"]\t\t)\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= int(config[\"\"\"batch_size\"\"\"]\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= evaluate.load(\"\"\"glue\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"mrpc\"\"\"\t\t)\r\n\r\n\t\t\tset_seed(lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= get_dataloaders(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t# Instantiate the model (we build the model here so that the seed also control new weights initialization)\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= AutoModelForSequenceClassification.from_pretrained(\"\"\"bert-base-cased\"\"\"\t\t\t\t,\t\t\t\t\treturn_dict=lowercase__\t\t)\r\n\r\n\t\t\t# We could avoid this line since the accelerator is set with `device_placement=True` (default value).\r\n\t\t\t# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\r\n\t\t\t# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= model.to(accelerator.device\t\t)\r\n\r\n\t\t\t# Instantiate optimizer\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= AdamW(params=model.parameters()\t\t\t\t,\t\t\t\t\tlr=lowercase__\t\t)\r\n\r\n\t\t\t# Instantiate scheduler\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= get_linear_schedule_with_warmup(\r\n\t\t\t optimizer=lowercase__\t\t\t\t,\t\t\t\t\tnum_warmup_steps=1_0_0\t\t\t\t,\t\t\t\t\tnum_training_steps=(len(lowercase__\t\t) * num_epochs)\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t# Prepare everything\r\n\t\t\t# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\r\n\t\t\t# prepare method.\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= accelerator.prepare(\r\n\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Now we train the model\r\n\t\t\tfor epoch in range(lowercase__\t\t):\r\n\t\t\t\t\t\tmodel.train()\r\n\t\t\t\t\t\twith LocalSGD(\r\n\t\t\t\t\t\t accelerator=lowercase__\t\t\t\t,\t\t\t\t\tmodel=lowercase__\t\t\t\t,\t\t\t\t\tlocal_sgd_steps=lowercase__\t\t\t\t,\t\t\t\t\tenabled=local_sgd_steps is not None\t\t) as local_sgd:\r\n\t\t\t\t\t\t\t\t\tfor step, batch in enumerate(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t# We could avoid this line since we set the accelerator with `device_placement=True`.\r\n\t\t\t\t\t\t\t\t\t\t\t\tbatch.to(accelerator.device\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t# New code #\r\n\t\t\t\t\t\t\t\t\t\t\t\t# We use the new `accumulate` context manager to perform gradient accumulation\r\n\t\t\t\t\t\t\t\t\t\t\t\t# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.\r\n\t\t\t\t\t\t\t\t\t\t\t\twith accelerator.accumulate(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= model(**lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= output.loss\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\taccelerator.backward(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toptimizer.step()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlr_scheduler.step()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toptimizer.zero_grad()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# LocalSGD-specific line\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlocal_sgd.step()\r\n\r\n\t\t\t\t\t\tmodel.eval()\r\n\t\t\t\t\t\tfor step, batch in enumerate(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\t# We could avoid this line since we set the accelerator with `device_placement=True`.\r\n\t\t\t\t\t\t\t\t\tbatch.to(accelerator.device\t\t)\r\n\t\t\t\t\t\t\t\t\twith torch.no_grad():\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= model(**lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= outputs.logits.argmax(dim=-1\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= accelerator.gather_for_metrics((predictions, batch[\"\"\"labels\"\"\"])\t\t)\r\n\t\t\t\t\t\t\t\t\tmetric.add_batch(\r\n\t\t\t\t\t\t\t\t\t predictions=lowercase__\t\t\t\t,\t\t\t\t\treferences=lowercase__\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= metric.compute()\r\n\t\t\t\t\t\t# Use accelerator.print to print only on the main process.\r\n\t\t\t\t\t\taccelerator.print(f\"\"\"epoch {epoch}:\"\"\"\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> Tuple:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= argparse.ArgumentParser(description=\"\"\"Simple example of training script.\"\"\"\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--mixed_precision\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=lowercase__\t\t\t\t,\t\t\t\t\tchoices=[\"\"\"no\"\"\", \"\"\"fp16\"\"\", \"\"\"bf16\"\"\", \"\"\"fp8\"\"\"]\t\t\t\t,\t\t\t\t\thelp=\"\"\"Whether to use mixed precision. Choose\"\"\"\r\n\t\t\t \"\"\"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\"\"\r\n\t\t\t \"\"\"and an Nvidia Ampere GPU.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\t# New Code #\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--gradient_accumulation_steps\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=1\t\t\t\t,\t\t\t\t\thelp=\"\"\"The number of minibatches to be ran before gradients are accumulated.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--local_sgd_steps\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=8\t\t\t\t,\t\t\t\t\thelp=\"\"\"Number of local SGD steps or None to disable local SGD\"\"\"\t\t)\r\n\t\t\tparser.add_argument(\"\"\"--cpu\"\"\"\t\t\t\t,\t\t\t\t\taction=\"\"\"store_true\"\"\"\t\t\t\t,\t\t\t\t\thelp=\"\"\"If passed, will train on the CPU.\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= parser.parse_args()\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= {\"\"\"lr\"\"\": 2E-5, \"\"\"num_epochs\"\"\": 3, \"\"\"seed\"\"\": 4_2, \"\"\"batch_size\"\"\": 1_6}\r\n\t\t\ttraining_function(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tmain()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":660,"cells":{"code":{"kind":"string","value":"\n\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\nfrom __future__ import annotations\n\nimport unittest\n\nfrom transformers import XGLMConfig, XGLMTokenizer, is_tf_available\nfrom transformers.testing_utils import require_tf, slow\n\nfrom ...test_configuration_common import ConfigTester\nfrom ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask\nfrom ...test_pipeline_mixin import PipelineTesterMixin\n\n\nif is_tf_available():\n\t\t\t\timport tensorflow as tf\n\n\t\t\t\tfrom transformers.models.xglm.modeling_tf_xglm import (\n\t\t\t\t TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,\n\t\t\t\t TFXGLMForCausalLM,\n\t\t\t\t TFXGLMModel,\n\t\t\t\t)\n\n\n\n\n@require_tf\nclass _SCREAMING_SNAKE_CASE :\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tXGLMConfig\n\t\t\tUpperCAmelCase_ :Optional[Any] \t\t\t=\t\t\t\t\t\t{}\n\t\t\tUpperCAmelCase_ :Any \t\t\t=\t\t\t\t\t\t\"gelu\"\n\n\n\n\n\n\n\t\t\tdef __init__( self\t, __A\t, __A=14\t, __A=7\t, __A=True\t, __A=True\t, __A=True\t, __A=99\t, __A=32\t, __A=2\t, __A=4\t, __A=37\t, __A=\"gelu\"\t, __A=0.1\t, __A=0.1\t, __A=512\t, __A=0.0_2\t, ) ->\t\t\t\t\tUnion[str, Any]:\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= parent\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= batch_size\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= seq_length\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= is_training\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= use_input_mask\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= use_labels\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= vocab_size\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= d_model\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= num_hidden_layers\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= num_attention_heads\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= ffn_dim\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= activation_function\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= activation_dropout\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= attention_dropout\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= max_position_embeddings\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= initializer_range\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= None\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= 0\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= 2\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 1\n\n\n\n\n\n\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\n\t\t\t\t\t\treturn XGLMConfig.from_pretrained(\"\"\"facebook/xglm-564M\"\"\"\t)\n\n\n\n\n\n\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= tf.clip_by_value(\n\t\t\t\t\t\t ids_tensor([self.batch_size, self.seq_length]\t, self.vocab_size\t)\t, clip_value_min=0\t, clip_value_max=3\t)\n\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= None\n\t\t\t\t\t\tif self.use_input_mask:\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= random_attention_mask([self.batch_size, self.seq_length]\t)\n\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.get_config()\n\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= floats_tensor([self.num_hidden_layers, self.num_attention_heads]\t, 2\t)\n\n\t\t\t\t\t\treturn (\n\t\t\t\t\t\t config,\n\t\t\t\t\t\t input_ids,\n\t\t\t\t\t\t input_mask,\n\t\t\t\t\t\t head_mask,\n\t\t\t\t\t\t)\n\n\n\n\n\n\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\n\t\t\t\t\t\treturn XGLMConfig(\n\t\t\t\t\t\t vocab_size=self.vocab_size\t, d_model=self.hidden_size\t, num_layers=self.num_hidden_layers\t, attention_heads=self.num_attention_heads\t, ffn_dim=self.ffn_dim\t, activation_function=self.activation_function\t, activation_dropout=self.activation_dropout\t, attention_dropout=self.attention_dropout\t, max_position_embeddings=self.max_position_embeddings\t, initializer_range=self.initializer_range\t, use_cache=__a\t, bos_token_id=self.bos_token_id\t, eos_token_id=self.eos_token_id\t, pad_token_id=self.pad_token_id\t, return_dict=__a\t, )\n\n\n\n\n\n\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.prepare_config_and_inputs()\n\n\t\t\t\t\t\t(\n\t\t\t\t\t\t lowerCAmelCase_ \n\t\t\t\t\t\t) :List[Any] \t\t\t\t\t= config_and_inputs\n\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {\n\t\t\t\t\t\t 'input_ids': input_ids,\n\t\t\t\t\t\t 'head_mask': head_mask,\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\treturn config, inputs_dict\n\n\n\n\n@require_tf\nclass _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ,\t\t\tSCREAMING_SNAKE_CASE_ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\t(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()\n\t\t\tUpperCAmelCase_ :Union[str, Any] \t\t\t=\t\t\t\t\t\t(TFXGLMForCausalLM,) if is_tf_available() else ()\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\t(\n\t\t\t {\"feature-extraction\": TFXGLMModel, \"text-generation\": TFXGLMForCausalLM} if is_tf_available() else {}\n\t\t\t)\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tFalse\n\t\t\tUpperCAmelCase_ :Union[str, Any] \t\t\t=\t\t\t\t\t\tFalse\n\t\t\tUpperCAmelCase_ :Union[str, Any] \t\t\t=\t\t\t\t\t\tFalse\n\n\n\n\n\n\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= TFXGLMModelTester(self\t)\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= ConfigTester(self\t, config_class=__a\t, n_embd=37\t)\n\n\n\n\n\n\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\n\t\t\t\t\t\tself.config_tester.run_common_tests()\n\n\n\n\n\n\n\t\t\t@slow\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\n\t\t\t\t\t\tfor model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= TFXGLMModel.from_pretrained(__a\t)\n\t\t\t\t\t\t\t\t\tself.assertIsNotNone(__a\t)\n\n\n\n\n\n\n\t\t\t@unittest.skip(reason=\"\"\"Currently, model embeddings are going to undergo a major refactor.\"\"\"\t)\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\n\t\t\t\t\t\tsuper().test_resize_token_embeddings()\n\n\n\n\n@require_tf\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\n\n\n\n\n\n\n\t\t\t@slow\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A=True\t) ->\t\t\t\t\tAny:\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= TFXGLMForCausalLM.from_pretrained(\"\"\"facebook/xglm-564M\"\"\"\t)\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= tf.convert_to_tensor([[2, 268, 9865]]\t, dtype=tf.intaa\t) # The dog\n\t\t\t\t\t\t# The dog is a very friendly dog. He is very affectionate and loves to play with other\n\t\t\t\t\t\t# fmt: off\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]\n\t\t\t\t\t\t# fmt: on\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= model.generate(__a\t, do_sample=__a\t, num_beams=1\t)\n\t\t\t\t\t\tif verify_outputs:\n\t\t\t\t\t\t\t\t\tself.assertListEqual(output_ids[0].numpy().tolist()\t, __a\t)\n\n\n\n\n\n\n\t\t\t@slow\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= XGLMTokenizer.from_pretrained(\"\"\"facebook/xglm-564M\"\"\"\t)\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= TFXGLMForCausalLM.from_pretrained(\"\"\"facebook/xglm-564M\"\"\"\t)\n\n\t\t\t\t\t\ttf.random.set_seed(0\t)\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer(\"\"\"Today is a nice day and\"\"\"\t, return_tensors=\"\"\"tf\"\"\"\t)\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= tokenized.input_ids\n\t\t\t\t\t\t# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)\n\t\t\t\t\t\twith tf.device(\"\"\":/CPU:0\"\"\"\t):\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= model.generate(__a\t, do_sample=__a\t, seed=[7, 0]\t)\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= tokenizer.decode(output_ids[0]\t, skip_special_tokens=__a\t)\n\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= (\n\t\t\t\t\t\t 'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'\n\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertEqual(__a\t, __a\t)\n\n\n\n\n\n\n\t\t\t@slow\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= TFXGLMForCausalLM.from_pretrained(\"\"\"facebook/xglm-564M\"\"\"\t)\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= XGLMTokenizer.from_pretrained(\"\"\"facebook/xglm-564M\"\"\"\t)\n\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 'left'\n\n\t\t\t\t\t\t# use different length sentences to test batching\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= [\n\t\t\t\t\t\t 'This is an extremelly long sentence that only exists to test the ability of the model to cope with '\n\t\t\t\t\t\t 'left-padding, such as in batched generation. The output for the sequence below should be the same '\n\t\t\t\t\t\t 'regardless of whether left padding is applied or not. When',\n\t\t\t\t\t\t 'Hello, my dog is a little',\n\t\t\t\t\t\t]\n\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer(__a\t, return_tensors=\"\"\"tf\"\"\"\t, padding=__a\t)\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= inputs['input_ids']\n\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= model.generate(input_ids=__a\t, attention_mask=inputs[\"\"\"attention_mask\"\"\"]\t, max_new_tokens=12\t)\n\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer(sentences[0]\t, return_tensors=\"\"\"tf\"\"\"\t).input_ids\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= model.generate(input_ids=__a\t, max_new_tokens=12\t)\n\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer(sentences[1]\t, return_tensors=\"\"\"tf\"\"\"\t).input_ids\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= model.generate(input_ids=__a\t, max_new_tokens=12\t)\n\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= tokenizer.batch_decode(__a\t, skip_special_tokens=__a\t)\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= tokenizer.decode(output_non_padded[0]\t, skip_special_tokens=__a\t)\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tokenizer.decode(output_padded[0]\t, skip_special_tokens=__a\t)\n\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [\n\t\t\t\t\t\t 'This is an extremelly long sentence that only exists to test the ability of the model to cope with '\n\t\t\t\t\t\t 'left-padding, such as in batched generation. The output for the sequence below should be the same '\n\t\t\t\t\t\t 'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '\n\t\t\t\t\t\t 'a single',\n\t\t\t\t\t\t 'Hello, my dog is a little bit of a shy one, but he is very friendly',\n\t\t\t\t\t\t]\n\t\t\t\t\t\tself.assertListEqual(__a\t, __a\t)\n\t\t\t\t\t\tself.assertListEqual(__a\t, [non_padded_sentence, padded_sentence]\t)\n\n\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":350,"string":"350"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport argparse\r\nimport json\r\nimport os\r\n\r\nimport evaluate\r\nimport torch\r\nfrom datasets import load_dataset\r\nfrom torch.optim import AdamW\r\nfrom torch.utils.data import DataLoader\r\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\r\n\r\nfrom accelerate import Accelerator, DistributedType\r\nfrom accelerate.utils.deepspeed import DummyOptim, DummyScheduler\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 16\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 32\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Accelerator\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int = 1_6\t\t\t\t,\t\t\t\t\tlowercase__\t\t: str = \"bert-base-cased\"\t\t) -> Union[str, Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoTokenizer.from_pretrained(lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= load_dataset(\"\"\"glue\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"mrpc\"\"\"\t\t)\r\n\r\n\t\t\tdef tokenize_function(lowercase__\t\t: List[str]\t\t):\r\n\t\t\t\t\t\t# max_length=None => use the model max length (it's actually the default)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokenizer(examples[\"\"\"sentence1\"\"\"]\t\t\t\t,\t\t\t\t\texamples[\"\"\"sentence2\"\"\"]\t\t\t\t,\t\t\t\t\ttruncation=lowercase__\t\t\t\t,\t\t\t\t\tmax_length=lowercase__\t\t)\r\n\t\t\t\t\t\treturn outputs\r\n\r\n\t\t\t# Apply the method we just defined to all the examples in all the splits of the dataset\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= datasets.map(\r\n\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tbatched=lowercase__\t\t\t\t,\t\t\t\t\tremove_columns=[\"\"\"idx\"\"\", \"\"\"sentence1\"\"\", \"\"\"sentence2\"\"\"]\t\t\t\t,\t\t\t\t\tload_from_cache_file=lowercase__\t\t)\r\n\r\n\t\t\t# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\r\n\t\t\t# transformers library\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenized_datasets.rename_column(\"\"\"label\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"labels\"\"\"\t\t)\r\n\r\n\t\t\tdef collate_fn(lowercase__\t\t: Union[str, Any]\t\t):\r\n\t\t\t\t\t\t# On TPU it's best to pad everything to the same length or training will be very slow.\r\n\t\t\t\t\t\tif accelerator.distributed_type == DistributedType.TPU:\r\n\t\t\t\t\t\t\t\t\treturn tokenizer.pad(lowercase__\t\t\t\t,\t\t\t\t\tpadding=\"\"\"max_length\"\"\"\t\t\t\t,\t\t\t\t\tmax_length=1_2_8\t\t\t\t,\t\t\t\t\treturn_tensors=\"\"\"pt\"\"\"\t\t)\r\n\t\t\t\t\t\treturn tokenizer.pad(lowercase__\t\t\t\t,\t\t\t\t\tpadding=\"\"\"longest\"\"\"\t\t\t\t,\t\t\t\t\treturn_tensors=\"\"\"pt\"\"\"\t\t)\r\n\r\n\t\t\t# Instantiate dataloaders.\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= DataLoader(\r\n\t\t\t tokenized_datasets[\"\"\"train\"\"\"]\t\t\t\t,\t\t\t\t\tshuffle=lowercase__\t\t\t\t,\t\t\t\t\tcollate_fn=lowercase__\t\t\t\t,\t\t\t\t\tbatch_size=lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= DataLoader(\r\n\t\t\t tokenized_datasets[\"\"\"validation\"\"\"]\t\t\t\t,\t\t\t\t\tshuffle=lowercase__\t\t\t\t,\t\t\t\t\tcollate_fn=lowercase__\t\t\t\t,\t\t\t\t\tbatch_size=lowercase__\t\t)\r\n\r\n\t\t\treturn train_dataloader, eval_dataloader\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Optional[Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Union[str, Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Tuple\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t) -> List[str]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tmodel.eval()\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= 0\r\n\t\t\tfor step, batch in enumerate(lowercase__\t\t):\r\n\t\t\t\t\t\t# We could avoid this line since we set the accelerator with `device_placement=True`.\r\n\t\t\t\t\t\tbatch.to(accelerator.device\t\t)\r\n\t\t\t\t\t\twith torch.no_grad():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= model(**lowercase__\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= outputs.logits.argmax(dim=-1\t\t)\r\n\t\t\t\t\t\t# It is slightly faster to call this once, than multiple times\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= accelerator.gather(\r\n\t\t\t\t\t\t (predictions, batch[\"\"\"labels\"\"\"])\t\t) # If we are in a multiprocess environment, the last batch has duplicates\r\n\t\t\t\t\t\tif accelerator.use_distributed:\r\n\t\t\t\t\t\t\t\t\tif step == len(lowercase__\t\t) - 1:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= predictions[: len(eval_dataloader.dataset\t\t) - samples_seen]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= references[: len(eval_dataloader.dataset\t\t) - samples_seen]\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tsamples_seen += references.shape[0]\r\n\t\t\t\t\t\tmetric.add_batch(\r\n\t\t\t\t\t\t predictions=lowercase__\t\t\t\t,\t\t\t\t\treferences=lowercase__\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= metric.compute()\r\n\t\t\treturn eval_metric[\"accuracy\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[str]\t\t) -> Any:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= Accelerator()\r\n\r\n\t\t\t# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= config[\"\"\"lr\"\"\"]\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= int(config[\"\"\"num_epochs\"\"\"]\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= int(config[\"\"\"seed\"\"\"]\t\t)\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= int(config[\"\"\"batch_size\"\"\"]\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= args.model_name_or_path\r\n\r\n\t\t\tset_seed(lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Dict \t\t\t\t\t= get_dataloaders(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Instantiate the model (we build the model here so that the seed also control new weights initialization)\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= AutoModelForSequenceClassification.from_pretrained(lowercase__\t\t\t\t,\t\t\t\t\treturn_dict=lowercase__\t\t)\r\n\r\n\t\t\t# Instantiate optimizer\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= (\r\n\t\t\t AdamW\r\n\t\t\t if accelerator.state.deepspeed_plugin is None\r\n\t\t\t or \"\"\"optimizer\"\"\" not in accelerator.state.deepspeed_plugin.deepspeed_config\r\n\t\t\t else DummyOptim\r\n\t\t\t)\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= optimizer_cls(params=model.parameters()\t\t\t\t,\t\t\t\t\tlr=lowercase__\t\t)\r\n\r\n\t\t\tif accelerator.state.deepspeed_plugin is not None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= accelerator.state.deepspeed_plugin.deepspeed_config[\r\n\t\t\t\t\t\t \"\"\"gradient_accumulation_steps\"\"\"\r\n\t\t\t\t\t\t]\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= 1\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= (len(lowercase__\t\t) * num_epochs) // gradient_accumulation_steps\r\n\r\n\t\t\t# Instantiate scheduler\r\n\t\t\tif (\r\n\t\t\t accelerator.state.deepspeed_plugin is None\r\n\t\t\t or \"scheduler\" not in accelerator.state.deepspeed_plugin.deepspeed_config\r\n\t\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= get_linear_schedule_with_warmup(\r\n\t\t\t\t\t\t optimizer=lowercase__\t\t\t\t,\t\t\t\t\tnum_warmup_steps=0\t\t\t\t,\t\t\t\t\tnum_training_steps=lowercase__\t\t\t\t,\t\t\t\t\t)\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= DummyScheduler(lowercase__\t\t\t\t,\t\t\t\t\ttotal_num_steps=lowercase__\t\t\t\t,\t\t\t\t\twarmup_num_steps=0\t\t)\r\n\r\n\t\t\t# Prepare everything\r\n\t\t\t# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\r\n\t\t\t# prepare method.\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= accelerator.prepare(\r\n\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# We need to keep track of how many total steps we have iterated over\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 0\r\n\t\t\t# We also need to keep track of the stating epoch so files are named properly\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= evaluate.load(\"\"\"glue\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"mrpc\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= num_epochs\r\n\r\n\t\t\tif args.partial_train_epoch is not None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= args.partial_train_epoch\r\n\r\n\t\t\tif args.resume_from_checkpoint:\r\n\t\t\t\t\t\taccelerator.load_state(args.resume_from_checkpoint\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= args.resume_from_checkpoint.split(\"\"\"epoch_\"\"\"\t\t)[1]\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"\"\"\"\r\n\t\t\t\t\t\tfor char in epoch_string:\r\n\t\t\t\t\t\t\t\t\tif char.isdigit():\r\n\t\t\t\t\t\t\t\t\t\t\t\tstate_epoch_num += char\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= int(lowercase__\t\t) + 1\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= evaluation_loop(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t\t\t\taccelerator.print(\"\"\"resumed checkpoint performance:\"\"\"\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t\t\t\taccelerator.print(\"\"\"resumed checkpoint's scheduler's lr:\"\"\"\t\t\t\t,\t\t\t\t\tlr_scheduler.get_lr()[0]\t\t)\r\n\t\t\t\t\t\taccelerator.print(\"\"\"resumed optimizers's lr:\"\"\"\t\t\t\t,\t\t\t\t\toptimizer.param_groups[0][\"\"\"lr\"\"\"]\t\t)\r\n\t\t\t\t\t\twith open(os.path.join(args.output_dir\t\t\t\t,\t\t\t\t\tf\"\"\"state_{starting_epoch-1}.json\"\"\"\t\t)\t\t\t\t,\t\t\t\t\t\"\"\"r\"\"\"\t\t) as f:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= json.load(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tassert resumed_state[\"accuracy\"] == accuracy, \"Accuracy mismatch, loading from checkpoint failed\"\r\n\t\t\t\t\t\t\t\t\tassert (\r\n\t\t\t\t\t\t\t\t\t resumed_state[\"lr\"] == lr_scheduler.get_lr()[0]\r\n\t\t\t\t\t\t\t\t\t), \"Scheduler learning rate mismatch, loading from checkpoint failed\"\r\n\t\t\t\t\t\t\t\t\tassert (\r\n\t\t\t\t\t\t\t\t\t resumed_state[\"optimizer_lr\"] == optimizer.param_groups[0][\"lr\"]\r\n\t\t\t\t\t\t\t\t\t), \"Optimizer learning rate mismatch, loading from checkpoint failed\"\r\n\t\t\t\t\t\t\t\t\tassert resumed_state[\"epoch\"] == starting_epoch - 1, \"Epoch mismatch, loading from checkpoint failed\"\r\n\t\t\t\t\t\t\t\t\treturn\r\n\r\n # Now we train the model\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= {}\r\n\t\t\tfor epoch in range(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t):\r\n\t\t\t\t\t\tmodel.train()\r\n\t\t\t\t\t\tfor step, batch in enumerate(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= model(**lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= outputs.loss\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= loss / gradient_accumulation_steps\r\n\t\t\t\t\t\t\t\t\taccelerator.backward(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tif step % gradient_accumulation_steps == 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\toptimizer.step()\r\n\t\t\t\t\t\t\t\t\t\t\t\tlr_scheduler.step()\r\n\t\t\t\t\t\t\t\t\t\t\t\toptimizer.zero_grad()\r\n\r\n\t\t\t\t\t\t\t\t\toverall_step += 1\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= f\"\"\"epoch_{epoch}\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= os.path.join(args.output_dir\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t\t\t\taccelerator.save_state(lowercase__\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= evaluation_loop(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= accuracy\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= lr_scheduler.get_lr()[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= optimizer.param_groups[0][\"\"\"lr\"\"\"]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= epoch\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= overall_step\r\n\t\t\t\t\t\taccelerator.print(f\"\"\"epoch {epoch}:\"\"\"\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t\t\t\taccelerator.wait_for_everyone()\r\n\t\t\t\t\t\tif accelerator.is_main_process:\r\n\t\t\t\t\t\t\t\t\twith open(os.path.join(args.output_dir\t\t\t\t,\t\t\t\t\tf\"\"\"state_{epoch}.json\"\"\"\t\t)\t\t\t\t,\t\t\t\t\t\"\"\"w\"\"\"\t\t) as f:\r\n\t\t\t\t\t\t\t\t\t\t\t\tjson.dump(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= argparse.ArgumentParser(description=\"\"\"Simple example of training script tracking peak GPU memory usage.\"\"\"\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--model_name_or_path\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=\"\"\"bert-base-cased\"\"\"\t\t\t\t,\t\t\t\t\thelp=\"\"\"Path to pretrained model or model identifier from huggingface.co/models.\"\"\"\t\t\t\t,\t\t\t\t\trequired=lowercase__\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--output_dir\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=\"\"\".\"\"\"\t\t\t\t,\t\t\t\t\thelp=\"\"\"Optional save directory where all checkpoint folders will be stored. Default is the current working directory.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--resume_from_checkpoint\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=lowercase__\t\t\t\t,\t\t\t\t\thelp=\"\"\"If the training should continue from a checkpoint folder.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--partial_train_epoch\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=lowercase__\t\t\t\t,\t\t\t\t\thelp=\"\"\"If passed, the training will stop after this number of epochs.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--num_epochs\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=2\t\t\t\t,\t\t\t\t\thelp=\"\"\"Number of train epochs.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= parser.parse_args()\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= {\"\"\"lr\"\"\": 2E-5, \"\"\"num_epochs\"\"\": args.num_epochs, \"\"\"seed\"\"\": 4_2, \"\"\"batch_size\"\"\": 1_6}\r\n\r\n\t\t\ttraining_function(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tmain()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":661,"cells":{"code":{"kind":"string","value":"\r\nimport gc\r\nimport random\r\nimport unittest\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel\r\n\r\nfrom diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline\r\nfrom diffusers.pipelines.shap_e import ShapERenderer\r\nfrom diffusers.utils import floats_tensor, load_image, load_numpy, slow\r\nfrom diffusers.utils.testing_utils import require_torch_gpu, torch_device\r\n\r\nfrom ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( _UpperCAmelCase ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Dict \t\t\t=\t\t\t\t\t\tShapEImgaImgPipeline\r\n\t\t\tUpperCAmelCase_ :Union[str, Any] \t\t\t=\t\t\t\t\t\t[\"image\"]\r\n\t\t\tUpperCAmelCase_ :Dict \t\t\t=\t\t\t\t\t\t[\"image\"]\r\n\t\t\tUpperCAmelCase_ :Any \t\t\t=\t\t\t\t\t\t[\r\n\t\t\t \"num_images_per_prompt\",\r\n\t\t\t \"num_inference_steps\",\r\n\t\t\t \"generator\",\r\n\t\t\t \"latents\",\r\n\t\t\t \"guidance_scale\",\r\n\t\t\t \"frame_size\",\r\n\t\t\t \"output_type\",\r\n\t\t\t \"return_dict\",\r\n\t\t\t]\r\n\t\t\tUpperCAmelCase_ :Dict \t\t\t=\t\t\t\t\t\tFalse\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\treturn 32\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\treturn 32\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\treturn self.time_input_dim * 4\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn 8\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= CLIPVisionConfig(\r\n\t\t\t\t\t\t hidden_size=self.text_embedder_hidden_size\t, image_size=64\t, projection_dim=self.text_embedder_hidden_size\t, intermediate_size=37\t, num_attention_heads=4\t, num_channels=3\t, num_hidden_layers=5\t, patch_size=1\t, )\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= CLIPVisionModel(_UpperCAmelCase\t)\r\n\t\t\t\t\t\treturn model\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= CLIPImageProcessor(\r\n\t\t\t\t\t\t crop_size=224\t, do_center_crop=_UpperCAmelCase\t, do_normalize=_UpperCAmelCase\t, do_resize=_UpperCAmelCase\t, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]\t, image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]\t, resample=3\t, size=224\t, )\r\n\r\n\t\t\t\t\t\treturn image_processor\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {\r\n\t\t\t\t\t\t '''num_attention_heads''': 2,\r\n\t\t\t\t\t\t '''attention_head_dim''': 16,\r\n\t\t\t\t\t\t '''embedding_dim''': self.time_input_dim,\r\n\t\t\t\t\t\t '''num_embeddings''': 32,\r\n\t\t\t\t\t\t '''embedding_proj_dim''': self.text_embedder_hidden_size,\r\n\t\t\t\t\t\t '''time_embed_dim''': self.time_embed_dim,\r\n\t\t\t\t\t\t '''num_layers''': 1,\r\n\t\t\t\t\t\t '''clip_embed_dim''': self.time_input_dim * 2,\r\n\t\t\t\t\t\t '''additional_embeddings''': 0,\r\n\t\t\t\t\t\t '''time_embed_act_fn''': '''gelu''',\r\n\t\t\t\t\t\t '''norm_in_type''': '''layer''',\r\n\t\t\t\t\t\t '''embedding_proj_norm_type''': '''layer''',\r\n\t\t\t\t\t\t '''encoder_hid_proj_type''': None,\r\n\t\t\t\t\t\t '''added_emb_type''': None,\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= PriorTransformer(**_UpperCAmelCase\t)\r\n\t\t\t\t\t\treturn model\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= {\r\n\t\t\t\t\t\t '''param_shapes''': (\r\n\t\t\t\t\t\t (self.renderer_dim, 93),\r\n\t\t\t\t\t\t (self.renderer_dim, 8),\r\n\t\t\t\t\t\t (self.renderer_dim, 8),\r\n\t\t\t\t\t\t (self.renderer_dim, 8),\r\n\t\t\t\t\t\t ),\r\n\t\t\t\t\t\t '''d_latent''': self.time_input_dim,\r\n\t\t\t\t\t\t '''d_hidden''': self.renderer_dim,\r\n\t\t\t\t\t\t '''n_output''': 12,\r\n\t\t\t\t\t\t '''background''': (\r\n\t\t\t\t\t\t 0.1,\r\n\t\t\t\t\t\t 0.1,\r\n\t\t\t\t\t\t 0.1,\r\n\t\t\t\t\t\t ),\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= ShapERenderer(**_UpperCAmelCase\t)\r\n\t\t\t\t\t\treturn model\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.dummy_prior\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.dummy_image_encoder\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.dummy_image_processor\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.dummy_renderer\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= HeunDiscreteScheduler(\r\n\t\t\t\t\t\t beta_schedule=\"\"\"exp\"\"\"\t, num_train_timesteps=1024\t, prediction_type=\"\"\"sample\"\"\"\t, use_karras_sigmas=_UpperCAmelCase\t, clip_sample=_UpperCAmelCase\t, clip_sample_range=1.0\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= {\r\n\t\t\t\t\t\t '''prior''': prior,\r\n\t\t\t\t\t\t '''image_encoder''': image_encoder,\r\n\t\t\t\t\t\t '''image_processor''': image_processor,\r\n\t\t\t\t\t\t '''renderer''': renderer,\r\n\t\t\t\t\t\t '''scheduler''': scheduler,\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\treturn components\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=0\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= floats_tensor((1, 3, 64, 64)\t, rng=random.Random(_UpperCAmelCase\t)\t).to(_UpperCAmelCase\t)\r\n\r\n\t\t\t\t\t\tif str(_UpperCAmelCase\t).startswith(\"\"\"mps\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= torch.manual_seed(_UpperCAmelCase\t)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= torch.Generator(device=_UpperCAmelCase\t).manual_seed(_UpperCAmelCase\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= {\r\n\t\t\t\t\t\t '''image''': input_image,\r\n\t\t\t\t\t\t '''generator''': generator,\r\n\t\t\t\t\t\t '''num_inference_steps''': 1,\r\n\t\t\t\t\t\t '''frame_size''': 32,\r\n\t\t\t\t\t\t '''output_type''': '''np''',\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\treturn inputs\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= '''cpu'''\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_dummy_components()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.pipeline_class(**_UpperCAmelCase\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= pipe.to(_UpperCAmelCase\t)\r\n\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=_UpperCAmelCase\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= pipe(**self.get_dummy_inputs(_UpperCAmelCase\t)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= output.images[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= image[0, -3:, -3:, -1]\r\n\r\n\t\t\t\t\t\tassert image.shape == (20, 32, 32, 3)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= np.array(\r\n\t\t\t\t\t\t [\r\n\t\t\t\t\t\t 0.0_0_0_3_9_2_1_6,\r\n\t\t\t\t\t\t 0.0_0_0_3_9_2_1_6,\r\n\t\t\t\t\t\t 0.0_0_0_3_9_2_1_6,\r\n\t\t\t\t\t\t 0.0_0_0_3_9_2_1_6,\r\n\t\t\t\t\t\t 0.0_0_0_3_9_2_1_6,\r\n\t\t\t\t\t\t 0.0_0_0_3_9_2_1_6,\r\n\t\t\t\t\t\t 0.0_0_0_3_9_2_1_6,\r\n\t\t\t\t\t\t 0.0_0_0_3_9_2_1_6,\r\n\t\t\t\t\t\t 0.0_0_0_3_9_2_1_6,\r\n\t\t\t\t\t\t ]\t)\r\n\r\n\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t).max() < 1E-2\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\t# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches\r\n\t\t\t\t\t\tself._test_inference_batch_consistent(batch_sizes=[1, 2]\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= torch_device == '''cpu'''\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= True\r\n\t\t\t\t\t\tself._test_inference_batch_single_identical(\r\n\t\t\t\t\t\t batch_size=2\t, test_max_difference=_UpperCAmelCase\t, relax_max_difference=_UpperCAmelCase\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.get_dummy_components()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.pipeline_class(**_UpperCAmelCase\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= pipe.to(_UpperCAmelCase\t)\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=_UpperCAmelCase\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 1\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 2\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.get_dummy_inputs(_UpperCAmelCase\t)\r\n\r\n\t\t\t\t\t\tfor key in inputs.keys():\r\n\t\t\t\t\t\t\t\t\tif key in self.batch_params:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= batch_size * [inputs[key]]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= pipe(**_UpperCAmelCase\t, num_images_per_prompt=_UpperCAmelCase\t)[0]\r\n\r\n\t\t\t\t\t\tassert images.shape[0] == batch_size * num_images_per_prompt\r\n\r\n\r\n\r\n\r\n@slow\r\n@require_torch_gpu\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\t# clean up the VRAM after each test\r\n\t\t\t\t\t\tsuper().tearDown()\r\n\t\t\t\t\t\tgc.collect()\r\n\t\t\t\t\t\ttorch.cuda.empty_cache()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= load_image(\r\n\t\t\t\t\t\t \"\"\"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\"\" \"\"\"/shap_e/corgi.png\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= load_numpy(\r\n\t\t\t\t\t\t \"\"\"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\"\"\r\n\t\t\t\t\t\t \"\"\"/shap_e/test_shap_e_img2img_out.npy\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= ShapEImgaImgPipeline.from_pretrained(\"\"\"openai/shap-e-img2img\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= pipe.to(_UpperCAmelCase\t)\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=_UpperCAmelCase\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.Generator(device=_UpperCAmelCase\t).manual_seed(0\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= pipe(\r\n\t\t\t\t\t\t _UpperCAmelCase\t, generator=_UpperCAmelCase\t, guidance_scale=3.0\t, num_inference_steps=64\t, frame_size=64\t, output_type=\"\"\"np\"\"\"\t, ).images[0]\r\n\r\n\t\t\t\t\t\tassert images.shape == (20, 64, 64, 3)\r\n\r\n\t\t\t\t\t\tassert_mean_pixel_difference(_UpperCAmelCase\t, _UpperCAmelCase\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":351,"string":"351"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport baseaa\r\nimport io\r\nimport json\r\nimport os\r\nfrom copy import deepcopy\r\n\r\nfrom ..optimizer import AcceleratedOptimizer\r\nfrom ..scheduler import AcceleratedScheduler\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tif isinstance(__A\t, __A\t):\r\n\t\t\t\t\t\t\t\t\t# Don't modify user's data should they want to reuse it (e.g. in tests), because once we\r\n\t\t\t\t\t\t\t\t\t# modified it, it will not be accepted here again, since `auto` values would have been overridden\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= deepcopy(__A\t)\r\n\t\t\t\t\t\telif os.path.exists(__A\t):\r\n\t\t\t\t\t\t\t\t\twith io.open(__A\t, \"\"\"r\"\"\"\t, encoding=\"\"\"utf-8\"\"\"\t) as f:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= json.load(__A\t)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= baseaa.urlsafe_baadecode(__A\t).decode(\"\"\"utf-8\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= json.loads(__A\t)\r\n\t\t\t\t\t\t\t\t\texcept (UnicodeDecodeError, AttributeError, ValueError):\r\n\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\r\n\t\t\t\t\t\t\t\t\t\t\t\t f\"\"\"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= config\r\n\r\n\t\t\t\t\t\tself.set_stage_and_offload()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\t# zero stage - this is done as early as possible, before model is created, to allow\r\n\t\t\t\t\t\t# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object\r\n\t\t\t\t\t\t# during ``zero.Init()`` which needs to know the dtype, and some other hparams.\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.get_value(\"\"\"zero_optimization.stage\"\"\"\t, -1\t)\r\n\r\n\t\t\t\t\t\t# offload\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= False\r\n\t\t\t\t\t\tif self.is_zeroa() or self.is_zeroa():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= set([\"\"\"cpu\"\"\", \"\"\"nvme\"\"\"]\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= set(\r\n\t\t\t\t\t\t\t\t\t [\r\n\t\t\t\t\t\t\t\t\t self.get_value(\"\"\"zero_optimization.offload_optimizer.device\"\"\"\t),\r\n\t\t\t\t\t\t\t\t\t self.get_value(\"\"\"zero_optimization.offload_param.device\"\"\"\t),\r\n\t\t\t\t\t\t\t\t\t ]\t)\r\n\t\t\t\t\t\t\t\t\tif len(offload_devices & offload_devices_valid\t) > 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= True\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.config\r\n\r\n\t\t\t\t\t\t# find the config node of interest if it exists\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= ds_key_long.split(\"\"\".\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= nodes.pop()\r\n\t\t\t\t\t\tfor node in nodes:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= config.get(__A\t)\r\n\t\t\t\t\t\t\t\t\tif config is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\treturn None, ds_key\r\n\r\n\t\t\t\t\t\treturn config, ds_key\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=None\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.find_config_node(__A\t)\r\n\t\t\t\t\t\tif config is None:\r\n\t\t\t\t\t\t\t\t\treturn default\r\n\t\t\t\t\t\treturn config.get(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=False\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.config\r\n\r\n\t\t\t\t\t\t# find the config node of interest if it exists\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= ds_key_long.split(\"\"\".\"\"\"\t)\r\n\t\t\t\t\t\tfor node in nodes:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= config\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= config.get(__A\t)\r\n\t\t\t\t\t\t\t\t\tif config is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\tif must_exist:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(f\"\"\"Can't find {ds_key_long} entry in the config: {self.config}\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn\r\n\r\n # if found remove it\r\n\t\t\t\t\t\tif parent_config is not None:\r\n\t\t\t\t\t\t\t\t\tparent_config.pop(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.get_value(__A\t)\r\n\t\t\t\t\t\treturn False if value is None else bool(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.get_value(__A\t)\r\n\t\t\t\t\t\treturn False if value is None else not bool(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\treturn self._stage == 2\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\treturn self._stage == 3\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\treturn self._offload\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= engine\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, **__A\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\t# runs backpropagation and handles mixed precision\r\n\t\t\t\t\t\tself.engine.backward(__A\t, **__A\t)\r\n\r\n\t\t\t\t\t\t# Deepspeed's `engine.step` performs the following operations:\r\n\t\t\t\t\t\t# - gradient accumulation check\r\n\t\t\t\t\t\t# - gradient clipping\r\n\t\t\t\t\t\t# - optimizer step\r\n\t\t\t\t\t\t# - zero grad\r\n\t\t\t\t\t\t# - checking overflow\r\n\t\t\t\t\t\t# - lr_scheduler step (only if engine.lr_scheduler is not None)\r\n\t\t\t\t\t\tself.engine.step()\r\n\t\t\t\t\t\t# and this plugin overrides the above calls with no-ops when Accelerate runs under\r\n\t\t\t\t\t\t# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple\r\n\t\t\t\t\t\t# training loop that works transparently under many training regimes.\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tsuper().__init__(__A\t, device_placement=__A\t, scaler=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= hasattr(self.optimizer\t, \"\"\"overflow\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A=None\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tpass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tpass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tif self.__has_overflow__:\r\n\t\t\t\t\t\t\t\t\treturn self.optimizer.overflow\r\n\t\t\t\t\t\treturn False\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tsuper().__init__(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tpass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A=0.0_0_1\t, __A=0\t, **__A\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= params\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= lr\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= weight_decay\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= kwargs\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A=None\t, __A=0\t, **__A\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= optimizer\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= total_num_steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= warmup_num_steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= kwargs\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":662,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: bytes\t\t) -> bytes:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif not isinstance(__lowerCAmelCase\t\t\t\t,\t\t\t\t\t__lowerCAmelCase\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= f\"\"\"a bytes-like object is required, not \\'{data.__class__.__name__}\\'\"\"\"\r\n\t\t\t\t\t\traise TypeError(__lowerCAmelCase\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"\"\"\".join(bin(__lowerCAmelCase\t\t)[2:].zfill(8\t\t) for byte in data\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= len(__lowerCAmelCase\t\t) % 6 != 0\r\n\r\n\t\t\tif padding_needed:\r\n\t\t\t\t\t\t# The padding that will be added later\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= b\"\"\"=\"\"\" * ((6 - len(__lowerCAmelCase\t\t) % 6) // 2)\r\n\r\n\t\t\t\t\t\t# Append binary_stream with arbitrary binary digits (0's by default) to make its\r\n\t\t\t\t\t\t# length a multiple of 6.\r\n\t\t\t\t\t\tbinary_stream += \"0\" * (6 - len(__lowerCAmelCase\t\t) % 6)\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= b\"\"\"\"\"\"\r\n\r\n\t\t\t# Encode every 6 binary digits to their corresponding Base64 character\r\n\t\t\treturn (\r\n\t\t\t \"\".join(\r\n\t\t\t B64_CHARSET[int(binary_stream[index : index + 6]\t\t\t\t,\t\t\t\t\t2\t\t)]\r\n\t\t\t for index in range(0\t\t\t\t,\t\t\t\t\tlen(__lowerCAmelCase\t\t)\t\t\t\t,\t\t\t\t\t6\t\t)\t\t).encode()\r\n\t\t\t + padding\r\n\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str\t\t) -> bytes:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif not isinstance(__lowerCAmelCase\t\t\t\t,\t\t\t\t\t__lowerCAmelCase\t\t) and not isinstance(__lowerCAmelCase\t\t\t\t,\t\t\t\t\t__lowerCAmelCase\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= (\r\n\t\t\t\t\t\t \"\"\"argument should be a bytes-like object or ASCII string, \"\"\"\r\n\t\t\t\t\t\t f\"\"\"not \\'{encoded_data.__class__.__name__}\\'\"\"\"\r\n\t\t\t\t\t\t)\r\n\t\t\t\t\t\traise TypeError(__lowerCAmelCase\t\t)\r\n\r\n\t\t\t# In case encoded_data is a bytes-like object, make sure it contains only\r\n\t\t\t# ASCII characters so we convert it to a string object\r\n\t\t\tif isinstance(__lowerCAmelCase\t\t\t\t,\t\t\t\t\t__lowerCAmelCase\t\t):\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= encoded_data.decode(\"\"\"utf-8\"\"\"\t\t)\r\n\t\t\t\t\t\texcept UnicodeDecodeError:\r\n\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"base64 encoded data should only contain ASCII characters\"\"\"\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= encoded_data.count(\"\"\"=\"\"\"\t\t)\r\n\r\n\t\t\t# Check if the encoded string contains non base64 characters\r\n\t\t\tif padding:\r\n\t\t\t\t\t\tassert all(\r\n\t\t\t\t\t\t char in B64_CHARSET for char in encoded_data[:-padding]\t\t), \"Invalid base64 character(s) found.\"\r\n\t\t\telse:\r\n\t\t\t\t\t\tassert all(\r\n\t\t\t\t\t\t char in B64_CHARSET for char in encoded_data\t\t), \"Invalid base64 character(s) found.\"\r\n\r\n\t\t\t# Check the padding\r\n\t\t\tassert len(__lowerCAmelCase\t\t) % 4 == 0 and padding < 3, \"Incorrect padding\"\r\n\r\n\t\t\tif padding:\r\n\t\t\t\t\t\t# Remove padding if there is one\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= encoded_data[:-padding]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"\"\"\".join(\r\n\t\t\t\t\t\t bin(B64_CHARSET.index(__lowerCAmelCase\t\t)\t\t)[2:].zfill(6\t\t) for char in encoded_data\t\t)[: -padding * 2]\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"\"\"\".join(\r\n\t\t\t\t\t\t bin(B64_CHARSET.index(__lowerCAmelCase\t\t)\t\t)[2:].zfill(6\t\t) for char in encoded_data\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= [\r\n\t\t\t int(binary_stream[index : index + 8]\t\t\t\t,\t\t\t\t\t2\t\t)\r\n\t\t\t for index in range(0\t\t\t\t,\t\t\t\t\tlen(__lowerCAmelCase\t\t)\t\t\t\t,\t\t\t\t\t8\t\t)\r\n\t\t\t]\r\n\r\n\t\t\treturn bytes(__lowerCAmelCase\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":352,"string":"352"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n# Copyright 2023 The HuggingFace Inc. team. All rights reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\nimport numpy as np\r\nimport torch\r\n\r\nfrom ..models.clipseg import CLIPSegForImageSegmentation\r\nfrom ..utils import is_vision_available, requires_backends\r\nfrom .base import PipelineTool\r\n\r\n\r\nif is_vision_available():\r\n\t\t\t\tfrom PIL import Image\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Dict \t\t\t=\t\t\t\t\t\t(\r\n\t\t\t \"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.\"\r\n\t\t\t \"It takes two arguments named `image` which should be the original image, and `label` which should be a text \"\r\n\t\t\t \"describing the elements what should be identified in the segmentation mask. The tool returns the mask.\"\r\n\t\t\t)\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\t\"CIDAS/clipseg-rd64-refined\"\r\n\t\t\tUpperCAmelCase_ :List[Any] \t\t\t=\t\t\t\t\t\t\"image_segmenter\"\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tCLIPSegForImageSegmentation\r\n\r\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\t[\"image\", \"text\"]\r\n\t\t\tUpperCAmelCase_ :Dict \t\t\t=\t\t\t\t\t\t[\"image\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, *__A\t, **__A\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\trequires_backends(self\t, [\"\"\"vision\"\"\"]\t)\r\n\t\t\t\t\t\tsuper().__init__(*__A\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\treturn self.pre_processor(text=[label]\t, images=[image]\t, padding=__A\t, return_tensors=\"\"\"pt\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\twith torch.no_grad():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.model(**__A\t).logits\r\n\t\t\t\t\t\treturn logits\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= outputs.cpu().detach().numpy()\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 0\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= 1\r\n\t\t\t\t\t\treturn Image.fromarray((array * 255).astype(np.uinta\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":663,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import List, Optional, Union\r\n\r\nimport numpy as np\r\nimport PIL.Image\r\n\r\nfrom ...image_processing_utils import BaseImageProcessor, BatchFeature\r\nfrom ...image_transforms import rescale, resize, to_channel_dimension_format\r\nfrom ...image_utils import (\r\n ChannelDimension,\r\n PILImageResampling,\r\n get_image_size,\r\n make_list_of_images,\r\n to_numpy_array,\r\n valid_images,\r\n)\r\nfrom ...utils import TensorType, logging\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( a__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[Any] \t\t\t=\t\t\t\t\t\t[\"pixel_values\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A = True\t, __A = 32\t, __A=PILImageResampling.BILINEAR\t, __A = True\t, **__A\t, ) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= do_resize\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= do_rescale\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= size_divisor\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= resample\r\n\t\t\t\t\t\tsuper().__init__(**_lowerCamelCase\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t, __A = None\t, **__A\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= get_image_size(_lowerCamelCase\t)\r\n\t\t\t\t\t\t# Rounds the height and width down to the closest multiple of size_divisor\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= height // size_divisor * size_divisor\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= width // size_divisor * size_divisor\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= resize(_lowerCamelCase\t, (new_h, new_w)\t, resample=_lowerCamelCase\t, data_format=_lowerCamelCase\t, **_lowerCamelCase\t)\r\n\t\t\t\t\t\treturn image\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A = None\t, **__A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\treturn rescale(image=_lowerCamelCase\t, scale=_lowerCamelCase\t, data_format=_lowerCamelCase\t, **_lowerCamelCase\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A = None\t, __A = None\t, __A=None\t, __A = None\t, __A = None\t, __A = ChannelDimension.FIRST\t, **__A\t, ) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= do_resize if do_resize is not None else self.do_resize\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= do_rescale if do_rescale is not None else self.do_rescale\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= size_divisor if size_divisor is not None else self.size_divisor\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= resample if resample is not None else self.resample\r\n\r\n\t\t\t\t\t\tif do_resize and size_divisor is None:\r\n\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"size_divisor is required for resizing\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= make_list_of_images(_lowerCamelCase\t)\r\n\r\n\t\t\t\t\t\tif not valid_images(_lowerCamelCase\t):\r\n\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"Invalid image(s)\"\"\"\t)\r\n\r\n\t\t\t\t\t\t# All transformations expect numpy arrays.\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= [to_numpy_array(_lowerCamelCase\t) for img in images]\r\n\r\n\t\t\t\t\t\tif do_resize:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= [self.resize(_lowerCamelCase\t, size_divisor=_lowerCamelCase\t, resample=_lowerCamelCase\t) for image in images]\r\n\r\n\t\t\t\t\t\tif do_rescale:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= [self.rescale(_lowerCamelCase\t, scale=1 / 255\t) for image in images]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= [to_channel_dimension_format(_lowerCamelCase\t, _lowerCamelCase\t) for image in images]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {'''pixel_values''': images}\r\n\t\t\t\t\t\treturn BatchFeature(data=_lowerCamelCase\t, tensor_type=_lowerCamelCase\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":353,"string":"353"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: list\t\t\t\t,\t\t\t\t\tlowercase__\t\t: list\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif index == number_of_items:\r\n\t\t\t\t\t\treturn 0\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= knapsack(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tindex + 1\t\t)\r\n\t\t\tif weights[index] <= max_weight:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= values[index] + knapsack(\r\n\t\t\t\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tmax_weight - weights[index]\t\t\t\t,\t\t\t\t\tindex + 1\t\t)\r\n\t\t\treturn max(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":664,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport math\r\nimport time\r\n\r\nfrom transformers import Trainer, is_torch_tpu_available\r\nfrom transformers.trainer_utils import PredictionOutput, speed_metrics\r\n\r\n\r\nif is_torch_tpu_available(check_device=False):\r\n\t\t\t\timport torch_xla.core.xla_model as xm\r\n\t\t\t\timport torch_xla.debug.metrics as met\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t_SCREAMING_SNAKE_CASE ( lowercase__\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, *__A\t, __A=None\t, __A=None\t, **__A\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tsuper().__init__(*_a\t, **_a\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= eval_examples\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= post_process_function\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A=None\t, __A=None\t, __A=None\t, __A = \"eval\"\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.eval_dataset if eval_dataset is None else eval_dataset\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.get_eval_dataloader(_a\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.eval_examples if eval_examples is None else eval_examples\r\n\r\n\t\t\t\t\t\t# Temporarily disable metric computation, we will do it in the loop here.\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.compute_metrics\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= None\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= time.time()\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= eval_loop(\r\n\t\t\t\t\t\t\t\t\t _a\t, description=\"\"\"Evaluation\"\"\"\t, prediction_loss_only=True if compute_metrics is None else None\t, ignore_keys=_a\t, metric_key_prefix=_a\t, )\r\n\t\t\t\t\t\tfinally:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= compute_metrics\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.args.eval_batch_size * self.args.world_size\r\n\t\t\t\t\t\tif f\"\"\"{metric_key_prefix}_jit_compilation_time\"\"\" in output.metrics:\r\n\t\t\t\t\t\t\t\t\tstart_time += output.metrics[f\"\"\"{metric_key_prefix}_jit_compilation_time\"\"\"]\r\n\t\t\t\t\t\toutput.metrics.update(\r\n\t\t\t\t\t\t speed_metrics(\r\n\t\t\t\t\t\t _a\t, _a\t, num_samples=output.num_samples\t, num_steps=math.ceil(output.num_samples / total_batch_size\t)\t, )\t)\r\n\t\t\t\t\t\tif self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:\r\n\t\t\t\t\t\t\t\t\t# Only the main node write the results by default\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.post_process_function(_a\t, _a\t, output.predictions\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.compute_metrics(_a\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Prefix all keys with metric_key_prefix + '_'\r\n\t\t\t\t\t\t\t\t\tfor key in list(metrics.keys()\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tif not key.startswith(f\"\"\"{metric_key_prefix}_\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= metrics.pop(_a\t)\r\n\t\t\t\t\t\t\t\t\tmetrics.update(output.metrics\t)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= output.metrics\r\n\r\n\t\t\t\t\t\tif self.args.should_log:\r\n\t\t\t\t\t\t\t\t\t# Only the main node log the results by default\r\n\t\t\t\t\t\t\t\t\tself.log(_a\t)\r\n\r\n\t\t\t\t\t\tif self.args.tpu_metrics_debug or self.args.debug:\r\n\t\t\t\t\t\t\t\t\t# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)\r\n\t\t\t\t\t\t\t\t\txm.master_print(met.metrics_report()\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.callback_handler.on_evaluate(self.args\t, self.state\t, self.control\t, _a\t)\r\n\t\t\t\t\t\treturn metrics\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A=None\t, __A = \"test\"\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.get_test_dataloader(_a\t)\r\n\r\n\t\t\t\t\t\t# Temporarily disable metric computation, we will do it in the loop here.\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.compute_metrics\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= None\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= time.time()\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= eval_loop(\r\n\t\t\t\t\t\t\t\t\t _a\t, description=\"\"\"Prediction\"\"\"\t, prediction_loss_only=True if compute_metrics is None else None\t, ignore_keys=_a\t, metric_key_prefix=_a\t, )\r\n\t\t\t\t\t\tfinally:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= compute_metrics\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.args.eval_batch_size * self.args.world_size\r\n\t\t\t\t\t\tif f\"\"\"{metric_key_prefix}_jit_compilation_time\"\"\" in output.metrics:\r\n\t\t\t\t\t\t\t\t\tstart_time += output.metrics[f\"\"\"{metric_key_prefix}_jit_compilation_time\"\"\"]\r\n\t\t\t\t\t\toutput.metrics.update(\r\n\t\t\t\t\t\t speed_metrics(\r\n\t\t\t\t\t\t _a\t, _a\t, num_samples=output.num_samples\t, num_steps=math.ceil(output.num_samples / total_batch_size\t)\t, )\t)\r\n\r\n\t\t\t\t\t\tif self.post_process_function is None or self.compute_metrics is None:\r\n\t\t\t\t\t\t\t\t\treturn output\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.post_process_function(_a\t, _a\t, output.predictions\t, \"\"\"predict\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.compute_metrics(_a\t)\r\n\r\n\t\t\t\t\t\t# Prefix all keys with metric_key_prefix + '_'\r\n\t\t\t\t\t\tfor key in list(metrics.keys()\t):\r\n\t\t\t\t\t\t\t\t\tif not key.startswith(f\"\"\"{metric_key_prefix}_\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= metrics.pop(_a\t)\r\n\t\t\t\t\t\tmetrics.update(output.metrics\t)\r\n\t\t\t\t\t\treturn PredictionOutput(predictions=predictions.predictions\t, label_ids=predictions.label_ids\t, metrics=_a\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":354,"string":"354"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom .imports import is_tqdm_available\r\n\r\n\r\nif is_tqdm_available():\r\n\t\t\t\tfrom tqdm.auto import tqdm as _tqdm\r\n\r\nfrom ..state import PartialState\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: bool = True\t\t\t\t,\t\t\t\t\t*lowercase__\t\t: Optional[int]\t\t\t\t,\t\t\t\t\t**lowercase__\t\t: str\t\t) -> Optional[Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif not is_tqdm_available():\r\n\t\t\t\t\t\traise ImportError(\"\"\"Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= False\r\n\t\t\tif main_process_only:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= PartialState().local_process_index == 0\r\n\t\t\treturn _tqdm(*lowercase__\t\t\t\t,\t\t\t\t\t**lowercase__\t\t\t\t,\t\t\t\t\tdisable=lowercase__\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":665,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: list[float]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: list[float]\t\t) -> float:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= sorted(numsa + numsa\t\t)\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= divmod(len(__snake_case\t\t)\t\t\t\t,\t\t\t\t\t2\t\t)\r\n\t\t\tif mod == 1:\r\n\t\t\t\t\t\treturn all_numbers[div]\r\n\t\t\telse:\r\n\t\t\t\t\t\treturn (all_numbers[div] + all_numbers[div - 1]) / 2\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [float(x) for x in input('Enter the elements of first array: ').split()]\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [float(x) for x in input('Enter the elements of second array: ').split()]\r\n\t\t\t\tprint(F\"\"\"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}\"\"\")\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":355,"string":"355"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport importlib\r\nimport json\r\nimport os\r\nimport sys\r\nimport tempfile\r\nimport unittest\r\nfrom pathlib import Path\r\n\r\nimport transformers\r\nimport transformers.models.auto\r\nfrom transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig\r\nfrom transformers.models.bert.configuration_bert import BertConfig\r\nfrom transformers.models.roberta.configuration_roberta import RobertaConfig\r\nfrom transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir\r\n\r\n\r\nsys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))\r\n\r\nfrom test_module.custom_configuration import CustomConfig # noqa E402\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= get_tests_dir('fixtures/dummy-config.json')\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 0\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tself.assertIsNotNone(transformers.models.auto.__spec__\t)\r\n\t\t\t\t\t\tself.assertIsNotNone(importlib.util.find_spec(\"\"\"transformers.models.auto\"\"\"\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"bert-base-uncased\"\"\"\t)\r\n\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= AutoConfig.from_pretrained(__A\t)\r\n\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= AutoConfig.from_pretrained(__A\t)\r\n\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= AutoConfig.for_model(\"\"\"roberta\"\"\"\t)\r\n\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmp_dir:\r\n\t\t\t\t\t\t\t\t\t# This model name contains bert and roberta, but roberta ends up being picked.\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= os.path.join(__A\t, \"\"\"fake-roberta\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\tos.makedirs(__A\t, exist_ok=__A\t)\r\n\t\t\t\t\t\t\t\t\twith open(os.path.join(__A\t, \"\"\"config.json\"\"\"\t)\t, \"\"\"w\"\"\"\t) as f:\r\n\t\t\t\t\t\t\t\t\t\t\t\tf.write(json.dumps({}\t)\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= AutoConfig.from_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(type(__A\t)\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\tAutoConfig.register(\"\"\"custom\"\"\"\t, __A\t)\r\n\t\t\t\t\t\t\t\t\t# Wrong model type will raise an error\r\n\t\t\t\t\t\t\t\t\twith self.assertRaises(__A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tAutoConfig.register(\"\"\"model\"\"\"\t, __A\t)\r\n\t\t\t\t\t\t\t\t\t# Trying to register something existing in the Transformers library will raise an error\r\n\t\t\t\t\t\t\t\t\twith self.assertRaises(__A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tAutoConfig.register(\"\"\"bert\"\"\"\t, __A\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Now that the config is registered, it can be used as any other config with the auto-API\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= CustomConfig()\r\n\t\t\t\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmp_dir:\r\n\t\t\t\t\t\t\t\t\t\t\t\tconfig.save_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= AutoConfig.from_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\t\t\t\t\t\tfinally:\r\n\t\t\t\t\t\t\t\t\tif \"custom\" in CONFIG_MAPPING._extra_content:\r\n\t\t\t\t\t\t\t\t\t\t\t\tdel CONFIG_MAPPING._extra_content[\"custom\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\twith self.assertRaisesRegex(\r\n\t\t\t\t\t\t __A\t, \"\"\"bert-base is not a local folder and is not a valid model identifier\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"bert-base\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\twith self.assertRaisesRegex(\r\n\t\t\t\t\t\t __A\t, r\"\"\"aaaaaa is not a valid git identifier \\(branch name, tag name or commit id\\)\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= AutoConfig.from_pretrained(__A\t, revision=\"\"\"aaaaaa\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\twith self.assertRaisesRegex(\r\n\t\t\t\t\t\t __A\t, \"\"\"hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.\"\"\"\t, ):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/no-config-test-repo\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\t# If remote code is not set, we will time out when asking whether to load the model.\r\n\t\t\t\t\t\twith self.assertRaises(__A\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t)\r\n\t\t\t\t\t\t# If remote code is disabled, we can't load this config.\r\n\t\t\t\t\t\twith self.assertRaises(__A\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t, trust_remote_code=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t, trust_remote_code=__A\t)\r\n\t\t\t\t\t\tself.assertEqual(config.__class__.__name__\t, \"\"\"NewModelConfig\"\"\"\t)\r\n\r\n\t\t\t\t\t\t# Test config can be reloaded.\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmp_dir:\r\n\t\t\t\t\t\t\t\t\tconfig.save_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= AutoConfig.from_pretrained(__A\t, trust_remote_code=__A\t)\r\n\t\t\t\t\t\tself.assertEqual(reloaded_config.__class__.__name__\t, \"\"\"NewModelConfig\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\t\t\t\t\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\t\"new-model\"\r\n\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\tAutoConfig.register(\"\"\"new-model\"\"\"\t, __A\t)\r\n\t\t\t\t\t\t\t\t\t# If remote code is not set, the default is to use local\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(config.__class__.__name__\t, \"\"\"NewModelConfigLocal\"\"\"\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# If remote code is disabled, we load the local one.\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t, trust_remote_code=__A\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(config.__class__.__name__\t, \"\"\"NewModelConfigLocal\"\"\"\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# If remote is enabled, we load from the Hub\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t, trust_remote_code=__A\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(config.__class__.__name__\t, \"\"\"NewModelConfig\"\"\"\t)\r\n\r\n\t\t\t\t\t\tfinally:\r\n\t\t\t\t\t\t\t\t\tif \"new-model\" in CONFIG_MAPPING._extra_content:\r\n\t\t\t\t\t\t\t\t\t\t\t\tdel CONFIG_MAPPING._extra_content[\"new-model\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":666,"cells":{"code":{"kind":"string","value":"\r\nfrom collections import deque\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A\t, __A\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= process_name # process name\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= arrival_time # arrival time of the process\r\n\t\t\t\t\t\t# completion time of finished process or last interrupted time\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= arrival_time\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= burst_time # remaining burst time\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 0 # total time of the process wait in ready queue\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 0 # time from arrival time to completion time\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A\t, __A\t, __A\t, ) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= number_of_queues\r\n\t\t\t\t\t\t# time slice of queues that round robin algorithm applied\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= time_slices\r\n\t\t\t\t\t\t# unfinished process is in this ready_queue\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= queue\r\n\t\t\t\t\t\t# current time\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= current_time\r\n\t\t\t\t\t\t# finished process is in this sequence queue\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= deque()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= []\r\n\t\t\t\t\t\tfor i in range(len(self.finish_queue\t)\t):\r\n\t\t\t\t\t\t\t\t\tsequence.append(self.finish_queue[i].process_name\t)\r\n\t\t\t\t\t\treturn sequence\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= []\r\n\t\t\t\t\t\tfor i in range(len(_snake_case\t)\t):\r\n\t\t\t\t\t\t\t\t\twaiting_times.append(queue[i].waiting_time\t)\r\n\t\t\t\t\t\treturn waiting_times\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= []\r\n\t\t\t\t\t\tfor i in range(len(_snake_case\t)\t):\r\n\t\t\t\t\t\t\t\t\tturnaround_times.append(queue[i].turnaround_time\t)\r\n\t\t\t\t\t\treturn turnaround_times\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= []\r\n\t\t\t\t\t\tfor i in range(len(_snake_case\t)\t):\r\n\t\t\t\t\t\t\t\t\tcompletion_times.append(queue[i].stop_time\t)\r\n\t\t\t\t\t\treturn completion_times\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\treturn [q.burst_time for q in queue]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tprocess.waiting_time += self.current_time - process.stop_time\r\n\t\t\t\t\t\treturn process.waiting_time\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= deque() # sequence deque of finished process\r\n\t\t\t\t\t\twhile len(_snake_case\t) != 0:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= ready_queue.popleft() # current process\r\n\r\n\t\t\t\t\t\t\t\t\t# if process's arrival time is later than current time, update current time\r\n\t\t\t\t\t\t\t\t\tif self.current_time < cp.arrival_time:\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.current_time += cp.arrival_time\r\n\r\n\t\t\t\t\t\t\t\t\t# update waiting time of current process\r\n\t\t\t\t\t\t\t\t\tself.update_waiting_time(_snake_case\t)\r\n\t\t\t\t\t\t\t\t\t# update current time\r\n\t\t\t\t\t\t\t\t\tself.current_time += cp.burst_time\r\n\t\t\t\t\t\t\t\t\t# finish the process and set the process's burst-time 0\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= 0\r\n\t\t\t\t\t\t\t\t\t# set the process's turnaround time because it is finished\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.current_time - cp.arrival_time\r\n\t\t\t\t\t\t\t\t\t# set the completion time\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.current_time\r\n\t\t\t\t\t\t\t\t\t# add the process to queue that has finished queue\r\n\t\t\t\t\t\t\t\t\tfinished.append(_snake_case\t)\r\n\r\n\t\t\t\t\t\tself.finish_queue.extend(_snake_case\t) # add finished process to finish queue\r\n\t\t\t\t\t\t# FCFS will finish all remaining processes\r\n\t\t\t\t\t\treturn finished\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= deque() # sequence deque of terminated process\r\n\t\t\t\t\t\t# just for 1 cycle and unfinished processes will go back to queue\r\n\t\t\t\t\t\tfor _ in range(len(_snake_case\t)\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= ready_queue.popleft() # current process\r\n\r\n\t\t\t\t\t\t\t\t\t# if process's arrival time is later than current time, update current time\r\n\t\t\t\t\t\t\t\t\tif self.current_time < cp.arrival_time:\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.current_time += cp.arrival_time\r\n\r\n\t\t\t\t\t\t\t\t\t# update waiting time of unfinished processes\r\n\t\t\t\t\t\t\t\t\tself.update_waiting_time(_snake_case\t)\r\n\t\t\t\t\t\t\t\t\t# if the burst time of process is bigger than time-slice\r\n\t\t\t\t\t\t\t\t\tif cp.burst_time > time_slice:\r\n\t\t\t\t\t\t\t\t\t\t\t\t# use CPU for only time-slice\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.current_time += time_slice\r\n\t\t\t\t\t\t\t\t\t\t\t\t# update remaining burst time\r\n\t\t\t\t\t\t\t\t\t\t\t\tcp.burst_time -= time_slice\r\n\t\t\t\t\t\t\t\t\t\t\t\t# update end point time\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.current_time\r\n\t\t\t\t\t\t\t\t\t\t\t\t# locate the process behind the queue because it is not finished\r\n\t\t\t\t\t\t\t\t\t\t\t\tready_queue.append(_snake_case\t)\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t# use CPU for remaining burst time\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.current_time += cp.burst_time\r\n\t\t\t\t\t\t\t\t\t\t\t\t# set burst time 0 because the process is finished\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= 0\r\n\t\t\t\t\t\t\t\t\t\t\t\t# set the finish time\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.current_time\r\n\t\t\t\t\t\t\t\t\t\t\t\t# update the process' turnaround time because it is finished\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.current_time - cp.arrival_time\r\n\t\t\t\t\t\t\t\t\t\t\t\t# add the process to queue that has finished queue\r\n\t\t\t\t\t\t\t\t\t\t\t\tfinished.append(_snake_case\t)\r\n\r\n\t\t\t\t\t\tself.finish_queue.extend(_snake_case\t) # add finished process to finish queue\r\n\t\t\t\t\t\t# return finished processes queue and remaining processes queue\r\n\t\t\t\t\t\treturn finished, ready_queue\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\r\n\r\n\t\t\t\t\t\tfor i in range(self.number_of_queues - 1\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.round_robin(\r\n\t\t\t\t\t\t\t\t\t self.ready_queue\t, self.time_slices[i]\t)\r\n\t\t\t\t\t\t# the last queue has first_come_first_served algorithm\r\n\t\t\t\t\t\tself.first_come_first_served(self.ready_queue\t)\r\n\r\n\t\t\t\t\t\treturn self.finish_queue\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= Process('P1', 0, 53)\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= Process('P2', 0, 17)\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= Process('P3', 0, 68)\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= Process('P4', 0, 24)\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= 3\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [17, 25]\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= deque([Pa, Pa, Pa, Pa])\r\n\r\n\t\t\t\tif len(time_slices) != number_of_queues - 1:\r\n\t\t\t\t\t\t\t\traise SystemExit(0)\r\n\r\n\t\t\t\tdoctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})\r\n\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= Process('P1', 0, 53)\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= Process('P2', 0, 17)\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= Process('P3', 0, 68)\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= Process('P4', 0, 24)\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= 3\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [17, 25]\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= deque([Pa, Pa, Pa, Pa])\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= MLFQ(number_of_queues, time_slices, queue, 0)\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= mlfq.multi_level_feedback_queue()\r\n\r\n\t\t\t\t# print total waiting times of processes(P1, P2, P3, P4)\r\n\t\t\t\tprint(\r\n\t\t\t\t F\"\"\"waiting time:\\\n \\t\\t\\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}\"\"\"\r\n\t\t\t\t)\r\n\t\t\t\t# print completion times of processes(P1, P2, P3, P4)\r\n\t\t\t\tprint(\r\n\t\t\t\t F\"\"\"completion time:\\\n \\t\\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}\"\"\"\r\n\t\t\t\t)\r\n\t\t\t\t# print total turnaround times of processes(P1, P2, P3, P4)\r\n\t\t\t\tprint(\r\n\t\t\t\t F\"\"\"turnaround time:\\\n \\t\\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}\"\"\"\r\n\t\t\t\t)\r\n\t\t\t\t# print sequence of finished processes\r\n\t\t\t\tprint(\r\n\t\t\t\t F\"\"\"sequence of finished processes:\\\n {mlfq.calculate_sequence_of_finish_queue()}\"\"\"\r\n\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":356,"string":"356"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport json\r\nimport os\r\nimport unittest\r\n\r\nfrom transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (\r\n VOCAB_FILES_NAMES,\r\n GPTSanJapaneseTokenizer,\r\n)\r\nfrom transformers.testing_utils import require_tokenizers, slow\r\n\r\nfrom ...test_tokenization_common import TokenizerTesterMixin\r\n\r\n\r\n\r\n\r\n@require_tokenizers\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tGPTSanJapaneseTokenizer\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tFalse\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\t{\"do_clean_text\": False, \"add_prefix_space\": False}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tsuper().setUp()\r\n\r\n\t\t\t\t\t\t# fmt: off\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= [\"\"\"ใ“ใ‚“\"\"\", \"\"\"ใ“ใ‚“ใซ\"\"\", \"\"\"ใซใกใฏ\"\"\", \"\"\"ใฐใ‚“ใฏ\"\"\", \"\"\"ไธ–็•Œ,ใ”บ็•Œ\"\"\", \"\"\"ใ€\"\"\", \"\"\"ใ€‚\"\"\", \"\"\"
\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"<|emoji1|>\"\"\", \"\"\"\"\"\", \"\"\"<|bagoftoken|>\"\"\", \"\"\"<|endoftext|>\"\"\"]\r\n\t\t\t\t\t\t# fmt: on\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {\"\"\"emoji\"\"\": {\"\"\"\\ud83d\\ude00\"\"\": \"\"\"<|emoji1|>\"\"\"}, \"\"\"emoji_inv\"\"\": {\"\"\"<|emoji1|>\"\"\": \"\"\"\\ud83d\\ude00\"\"\"}} # ๐Ÿ˜€\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= {\"\"\"unk_token\"\"\": \"\"\"\"\"\"}\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= os.path.join(self.tmpdirname\t, VOCAB_FILES_NAMES[\"\"\"vocab_file\"\"\"]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= os.path.join(self.tmpdirname\t, VOCAB_FILES_NAMES[\"\"\"emoji_file\"\"\"]\t)\r\n\t\t\t\t\t\twith open(self.vocab_file\t, \"\"\"w\"\"\"\t, encoding=\"\"\"utf-8\"\"\"\t) as vocab_writer:\r\n\t\t\t\t\t\t\t\t\tvocab_writer.write(\"\"\"\"\"\".join([x + \"\"\"\\n\"\"\" for x in vocab_tokens]\t)\t)\r\n\t\t\t\t\t\twith open(self.emoji_file\t, \"\"\"w\"\"\"\t) as emoji_writer:\r\n\t\t\t\t\t\t\t\t\temoji_writer.write(json.dumps(__A\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, **__A\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tkwargs.update(self.special_tokens_map\t)\r\n\t\t\t\t\t\treturn GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \\nใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \\nใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\t\t\t\t\t\treturn input_text, output_text\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.get_input_output_texts(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer.encode(__A\t, add_special_tokens=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokenizer.decode(__A\t, clean_up_tokenization_spaces=__A\t)\r\n\t\t\t\t\t\treturn text, ids\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tpass # TODO add if relevant\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tpass # TODO add if relevant\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tpass # TODO add if relevant\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\t# Testing tokenization\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ€€ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= [\"\"\"ใ“ใ‚“\"\"\", \"\"\"ใซใกใฏ\"\"\", \"\"\"ใ€\"\"\", \"\"\"ไธ–็•Œ\"\"\", \"\"\"ใ€‚\"\"\", \"\"\"\"\"\", \"\"\"ใ“ใ‚“\"\"\", \"\"\"ใฐใ‚“ใฏ\"\"\", \"\"\"ใ€\"\"\", \"\"\"ใ”บ็•Œ\"\"\", \"\"\"ใ€‚\"\"\"]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= tokenizer.tokenize(__A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\t\t\t\t\t\t# Testing conversion to ids without special tokens\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer.convert_tokens_to_ids(__A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\t\t\t\t\t\t# Testing conversion to ids with special tokens\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= tokens + [tokenizer.unk_token]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= tokenizer.convert_tokens_to_ids(__A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\t# Testing tokenization\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€<|bagoftoken|>ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€<|bagoftoken|>ใ”บ็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokenizer.encode(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer.decode(__A\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.tokenizer_class.from_pretrained(\"\"\"Tanrei/GPTSAN-japanese\"\"\"\t)\r\n\r\n\t\t\t\t\t\t# Testing tokenization\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= \"\"\"ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tokenizer.encode(prefix_text + input_text\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer.encode(\"\"\"\"\"\"\t, prefix_text=prefix_text + input_text\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer.encode(__A\t, prefix_text=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer.decode(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer.decode(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= tokenizer.decode(__A\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.tokenizer_class.from_pretrained(\"\"\"Tanrei/GPTSAN-japanese\"\"\"\t)\r\n\r\n\t\t\t\t\t\t# Testing tokenization\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= len(tokenizer.encode(__A\t)\t) - 2\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= len(tokenizer.encode(__A\t)\t) - 2\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [1] + [0] * (len_prefix + len_text + 1)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= [1] * (len_prefix + len_text + 1) + [0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= [1] + [1] * (len_prefix) + [0] * (len_text + 1)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tokenizer(prefix_text + input_text\t).token_type_ids\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer(\"\"\"\"\"\"\t, prefix_text=prefix_text + input_text\t).token_type_ids\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tokenizer(__A\t, prefix_text=__A\t).token_type_ids\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.tokenizer_class.from_pretrained(\"\"\"Tanrei/GPTSAN-japanese\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer.encode(\"\"\"ใ‚ใƒณใ„ใƒฏ\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tokenizer.encode(\"\"\"\"\"\"\t, prefix_text=\"\"\"ใ‚ใƒณใ„ใƒฏ\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer.encode(\"\"\"ใ„ใƒฏ\"\"\"\t, prefix_text=\"\"\"ใ‚ใƒณ\"\"\"\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(tokenizer.decode(__A\t)\t, tokenizer.decode(__A\t)\t)\r\n\t\t\t\t\t\tself.assertEqual(tokenizer.decode(__A\t)\t, tokenizer.decode(__A\t)\t)\r\n\t\t\t\t\t\tself.assertNotEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertNotEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertEqual(x_token_a[1]\t, x_token_a[-1]\t) # SEG token\r\n\t\t\t\t\t\tself.assertEqual(x_token_a[1]\t, x_token_a[3]\t) # SEG token\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.tokenizer_class.from_pretrained(\"\"\"Tanrei/GPTSAN-japanese\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [[\"\"\"ๆญฆ็”ฐไฟก็Ž„\"\"\", \"\"\"ใฏใ€\"\"\"], [\"\"\"็น”็”ฐไฟก้•ท\"\"\", \"\"\"ใฎ้…ไธ‹ใฎใ€\"\"\"]]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer(__A\t, padding=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= tokenizer.batch_encode_plus(__A\t, padding=__A\t)\r\n\r\n\t\t\t\t\t\t# fmt: off\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]\r\n\t\t\t\t\t\t# fmt: on\r\n\t\t\t\t\t\tself.assertListEqual(x_token.input_ids\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token.token_type_ids\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token.attention_mask\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token_a.input_ids\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token_a.token_type_ids\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token_a.attention_mask\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\t# Intentionally convert some words to accommodate character fluctuations unique to Japanese\r\n\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\t# tokenizer has no padding token\r\n\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":667,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport math\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int\t\t) -> List[Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif 1 < number < 4:\r\n\t\t\t\t\t\t# 2 and 3 are primes\r\n\t\t\t\t\t\treturn True\r\n\t\t\telif number < 2 or number % 2 == 0 or number % 3 == 0:\r\n\t\t\t\t\t\t# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes\r\n\t\t\t\t\t\treturn False\r\n\r\n\t\t\t# All primes number are in format of 6k +/- 1\r\n\t\t\tfor i in range(5\t\t\t\t,\t\t\t\t\tint(math.sqrt(lowerCamelCase_\t\t) + 1\t\t)\t\t\t\t,\t\t\t\t\t6\t\t):\r\n\t\t\t\t\t\tif number % i == 0 or number % (i + 2) == 0:\r\n\t\t\t\t\t\t\t\t\treturn False\r\n\t\t\treturn True\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int = 1_0_0_0_1\t\t) -> Optional[int]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\ttry:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= int(lowerCamelCase_\t\t)\r\n\t\t\texcept (TypeError, ValueError):\r\n\t\t\t\t\t\traise TypeError(\"\"\"Parameter nth must be int or castable to int.\"\"\"\t\t) from None\r\n\t\t\tif nth <= 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"Parameter nth must be greater than or equal to one.\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :list[int] \t\t\t\t\t= []\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 2\r\n\t\t\twhile len(lowerCamelCase_\t\t) < nth:\r\n\t\t\t\t\t\tif is_prime(lowerCamelCase_\t\t):\r\n\t\t\t\t\t\t\t\t\tprimes.append(lowerCamelCase_\t\t)\r\n\t\t\t\t\t\t\t\t\tnum += 1\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tnum += 1\r\n\t\t\treturn primes[len(lowerCamelCase_\t\t) - 1]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tprint(F\"\"\"{solution() = }\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":357,"string":"357"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nfrom sklearn.linear_model import LinearRegression\r\n\r\n# Splitting the dataset into the Training set and Test set\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# Fitting Polynomial Regression to the dataset\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\n\r\n# Importing the dataset\r\n__UpperCAmelCase\t\t\t\t\t\t\t= pd.read_csv(\r\n 'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'\r\n 'position_salaries.csv'\r\n)\r\n__UpperCAmelCase\t\t\t\t\t\t\t= dataset.iloc[:, 1:2].values\r\n__UpperCAmelCase\t\t\t\t\t\t\t= dataset.iloc[:, 2].values\r\n\r\n\r\n__UpperCAmelCase\t\t\t,\t\t\t__UpperCAmelCase\t\t\t,\t\t\t__UpperCAmelCase\t\t\t,\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= train_test_split(X, y, test_size=0.2, random_state=0)\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= PolynomialFeatures(degree=4)\r\n__UpperCAmelCase\t\t\t\t\t\t\t= poly_reg.fit_transform(X)\r\n__UpperCAmelCase\t\t\t\t\t\t\t= LinearRegression()\r\npol_reg.fit(X_poly, y)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> str:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tplt.scatter(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tcolor=\"\"\"red\"\"\"\t\t)\r\n\t\t\tplt.plot(lowercase__\t\t\t\t,\t\t\t\t\tpol_reg.predict(poly_reg.fit_transform(lowercase__\t\t)\t\t)\t\t\t\t,\t\t\t\t\tcolor=\"\"\"blue\"\"\"\t\t)\r\n\t\t\tplt.title(\"\"\"Truth or Bluff (Linear Regression)\"\"\"\t\t)\r\n\t\t\tplt.xlabel(\"\"\"Position level\"\"\"\t\t)\r\n\t\t\tplt.ylabel(\"\"\"Salary\"\"\"\t\t)\r\n\t\t\tplt.show()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tviz_polymonial()\r\n\r\n\t\t\t\t# Predicting a new result with Polymonial Regression\r\n\t\t\t\tpol_reg.predict(poly_reg.fit_transform([[5.5]]))\r\n\t\t\t\t# output should be 132148.43750003\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":668,"cells":{"code":{"kind":"string","value":"\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import (\r\n OptionalDependencyNotAvailable,\r\n _LazyModule,\r\n is_flax_available,\r\n is_sentencepiece_available,\r\n is_tf_available,\r\n is_tokenizers_available,\r\n is_torch_available,\r\n)\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n \"\"\"configuration_albert\"\"\": [\"\"\"ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP\"\"\", \"\"\"AlbertConfig\"\"\", \"\"\"AlbertOnnxConfig\"\"\"],\r\n}\r\n\r\ntry:\r\n\t\t\t\tif not is_sentencepiece_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\"\"\"AlbertTokenizer\"\"\"]\r\n\r\ntry:\r\n\t\t\t\tif not is_tokenizers_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\"\"\"AlbertTokenizerFast\"\"\"]\r\n\r\ntry:\r\n\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\r\n\t\t\t\t \"\"\"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST\"\"\",\r\n\t\t\t\t \"\"\"AlbertForMaskedLM\"\"\",\r\n\t\t\t\t \"\"\"AlbertForMultipleChoice\"\"\",\r\n\t\t\t\t \"\"\"AlbertForPreTraining\"\"\",\r\n\t\t\t\t \"\"\"AlbertForQuestionAnswering\"\"\",\r\n\t\t\t\t \"\"\"AlbertForSequenceClassification\"\"\",\r\n\t\t\t\t \"\"\"AlbertForTokenClassification\"\"\",\r\n\t\t\t\t \"\"\"AlbertModel\"\"\",\r\n\t\t\t\t \"\"\"AlbertPreTrainedModel\"\"\",\r\n\t\t\t\t \"\"\"load_tf_weights_in_albert\"\"\",\r\n\t\t\t\t]\r\n\r\ntry:\r\n\t\t\t\tif not is_tf_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\r\n\t\t\t\t \"\"\"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST\"\"\",\r\n\t\t\t\t \"\"\"TFAlbertForMaskedLM\"\"\",\r\n\t\t\t\t \"\"\"TFAlbertForMultipleChoice\"\"\",\r\n\t\t\t\t \"\"\"TFAlbertForPreTraining\"\"\",\r\n\t\t\t\t \"\"\"TFAlbertForQuestionAnswering\"\"\",\r\n\t\t\t\t \"\"\"TFAlbertForSequenceClassification\"\"\",\r\n\t\t\t\t \"\"\"TFAlbertForTokenClassification\"\"\",\r\n\t\t\t\t \"\"\"TFAlbertMainLayer\"\"\",\r\n\t\t\t\t \"\"\"TFAlbertModel\"\"\",\r\n\t\t\t\t \"\"\"TFAlbertPreTrainedModel\"\"\",\r\n\t\t\t\t]\r\n\r\ntry:\r\n\t\t\t\tif not is_flax_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\r\n\t\t\t\t \"\"\"FlaxAlbertForMaskedLM\"\"\",\r\n\t\t\t\t \"\"\"FlaxAlbertForMultipleChoice\"\"\",\r\n\t\t\t\t \"\"\"FlaxAlbertForPreTraining\"\"\",\r\n\t\t\t\t \"\"\"FlaxAlbertForQuestionAnswering\"\"\",\r\n\t\t\t\t \"\"\"FlaxAlbertForSequenceClassification\"\"\",\r\n\t\t\t\t \"\"\"FlaxAlbertForTokenClassification\"\"\",\r\n\t\t\t\t \"\"\"FlaxAlbertModel\"\"\",\r\n\t\t\t\t \"\"\"FlaxAlbertPreTrainedModel\"\"\",\r\n\t\t\t\t]\r\n\r\nif TYPE_CHECKING:\r\n\t\t\t\tfrom .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_sentencepiece_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .tokenization_albert import AlbertTokenizer\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_tokenizers_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .tokenization_albert_fast import AlbertTokenizerFast\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .modeling_albert import (\r\n\t\t\t\t\t\t\t\t ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t AlbertForMaskedLM,\r\n\t\t\t\t\t\t\t\t AlbertForMultipleChoice,\r\n\t\t\t\t\t\t\t\t AlbertForPreTraining,\r\n\t\t\t\t\t\t\t\t AlbertForQuestionAnswering,\r\n\t\t\t\t\t\t\t\t AlbertForSequenceClassification,\r\n\t\t\t\t\t\t\t\t AlbertForTokenClassification,\r\n\t\t\t\t\t\t\t\t AlbertModel,\r\n\t\t\t\t\t\t\t\t AlbertPreTrainedModel,\r\n\t\t\t\t\t\t\t\t load_tf_weights_in_albert,\r\n\t\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_tf_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .modeling_tf_albert import (\r\n\t\t\t\t\t\t\t\t TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t TFAlbertForMaskedLM,\r\n\t\t\t\t\t\t\t\t TFAlbertForMultipleChoice,\r\n\t\t\t\t\t\t\t\t TFAlbertForPreTraining,\r\n\t\t\t\t\t\t\t\t TFAlbertForQuestionAnswering,\r\n\t\t\t\t\t\t\t\t TFAlbertForSequenceClassification,\r\n\t\t\t\t\t\t\t\t TFAlbertForTokenClassification,\r\n\t\t\t\t\t\t\t\t TFAlbertMainLayer,\r\n\t\t\t\t\t\t\t\t TFAlbertModel,\r\n\t\t\t\t\t\t\t\t TFAlbertPreTrainedModel,\r\n\t\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_flax_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .modeling_flax_albert import (\r\n\t\t\t\t\t\t\t\t FlaxAlbertForMaskedLM,\r\n\t\t\t\t\t\t\t\t FlaxAlbertForMultipleChoice,\r\n\t\t\t\t\t\t\t\t FlaxAlbertForPreTraining,\r\n\t\t\t\t\t\t\t\t FlaxAlbertForQuestionAnswering,\r\n\t\t\t\t\t\t\t\t FlaxAlbertForSequenceClassification,\r\n\t\t\t\t\t\t\t\t FlaxAlbertForTokenClassification,\r\n\t\t\t\t\t\t\t\t FlaxAlbertModel,\r\n\t\t\t\t\t\t\t\t FlaxAlbertPreTrainedModel,\r\n\t\t\t\t\t\t\t\t)\r\nelse:\r\n\t\t\t\timport sys\r\n\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":358,"string":"358"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 1.6021e-19 # units = C\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: float\t\t\t\t,\t\t\t\t\tlowercase__\t\t: float\t\t\t\t,\t\t\t\t\tlowercase__\t\t: float\t\t\t\t,\t\t\t\t\t) -> tuple[str, float]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif (conductivity, electron_conc, mobility).count(0\t\t) != 1:\r\n\t\t\t\t\t\traise ValueError(\"\"\"You cannot supply more or less than 2 values\"\"\"\t\t)\r\n\t\t\telif conductivity < 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"Conductivity cannot be negative\"\"\"\t\t)\r\n\t\t\telif electron_conc < 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"Electron concentration cannot be negative\"\"\"\t\t)\r\n\t\t\telif mobility < 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"mobility cannot be negative\"\"\"\t\t)\r\n\t\t\telif conductivity == 0:\r\n\t\t\t\t\t\treturn (\r\n\t\t\t\t\t\t \"conductivity\",\r\n\t\t\t\t\t\t mobility * electron_conc * ELECTRON_CHARGE,\r\n\t\t\t\t\t\t)\r\n\t\t\telif electron_conc == 0:\r\n\t\t\t\t\t\treturn (\r\n\t\t\t\t\t\t \"electron_conc\",\r\n\t\t\t\t\t\t conductivity / (mobility * ELECTRON_CHARGE),\r\n\t\t\t\t\t\t)\r\n\t\t\telse:\r\n\t\t\t\t\t\treturn (\r\n\t\t\t\t\t\t \"mobility\",\r\n\t\t\t\t\t\t conductivity / (electron_conc * ELECTRON_CHARGE),\r\n\t\t\t\t\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":669,"cells":{"code":{"kind":"string","value":"\r\nfrom pathlib import PurePosixPath\r\nfrom typing import Optional\r\n\r\nimport fsspec\r\nfrom fsspec import AbstractFileSystem\r\nfrom huggingface_hub.hf_api import DatasetInfo\r\n\r\nfrom ..utils.file_utils import get_authentication_headers_for_url\r\nfrom ..utils.hub import hf_hub_url\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A_\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\t\"\"\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\t\"hf-legacy\" # \"hf://\"\" is reserved for hffs\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A = None\t, __A = None\t, **__A\t, ) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tsuper().__init__(self\t, **snake_case__\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= repo_info\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= token\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= None\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\r\n\r\n\t\t\t\t\t\tif self.dir_cache is None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {}\r\n\t\t\t\t\t\t\t\t\tfor hf_file in self.repo_info.siblings:\r\n\t\t\t\t\t\t\t\t\t\t\t\t# TODO(QL): add sizes\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= {\r\n\t\t\t\t\t\t\t\t\t\t\t\t \"name\": hf_file.rfilename,\r\n\t\t\t\t\t\t\t\t\t\t\t\t \"size\": None,\r\n\t\t\t\t\t\t\t\t\t\t\t\t \"type\": \"file\",\r\n\t\t\t\t\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.dir_cache.update(\r\n\t\t\t\t\t\t\t\t\t\t\t\t {\r\n\t\t\t\t\t\t\t\t\t\t\t\t str(snake_case__\t): {\"\"\"name\"\"\": str(snake_case__\t), \"\"\"size\"\"\": None, \"\"\"type\"\"\": \"\"\"directory\"\"\"}\r\n\t\t\t\t\t\t\t\t\t\t\t\t for d in list(PurePosixPath(hf_file.rfilename\t).parents\t)[:-1]\r\n\t\t\t\t\t\t\t\t\t\t\t\t }\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A = \"rb\"\t, **__A\t, ) ->\t\t\t\t\tUnion[str, Any]:\r\n\r\n\r\n\t\t\t\t\t\tif not isinstance(self.repo_info\t, snake_case__\t):\r\n\t\t\t\t\t\t\t\t\traise NotImplementedError(f\"\"\"Open is only implemented for dataset repositories, but got {self.repo_info}\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= hf_hub_url(self.repo_info.id\t, snake_case__\t, revision=self.repo_info.sha\t)\r\n\t\t\t\t\t\treturn fsspec.open(\r\n\t\t\t\t\t\t snake_case__\t, mode=snake_case__\t, headers=get_authentication_headers_for_url(snake_case__\t, use_auth_token=self.token\t)\t, client_kwargs={\"\"\"trust_env\"\"\": True}\t, ).open()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, **__A\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tself._get_dirs()\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self._strip_protocol(snake_case__\t)\r\n\t\t\t\t\t\tif path in self.dir_cache:\r\n\t\t\t\t\t\t\t\t\treturn self.dir_cache[path]\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\traise FileNotFoundError(snake_case__\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=False\t, **__A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tself._get_dirs()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= PurePosixPath(path.strip(\"\"\"/\"\"\"\t)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= {}\r\n\t\t\t\t\t\tfor p, f in self.dir_cache.items():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= PurePosixPath(p.strip(\"\"\"/\"\"\"\t)\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= p.parent\r\n\t\t\t\t\t\t\t\t\tif root == path:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= f\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= list(paths.values()\t)\r\n\t\t\t\t\t\tif detail:\r\n\t\t\t\t\t\t\t\t\treturn out\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\treturn sorted(f[\"\"\"name\"\"\"] for f in out\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":359,"string":"359"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport warnings\r\n\r\nfrom ...utils import logging\r\nfrom .image_processing_clip import CLIPImageProcessor\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, *__A\t, **__A\t) ->\t\t\t\t\tNone:\r\n\t\t\t\t\t\twarnings.warn(\r\n\t\t\t\t\t\t \"\"\"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please\"\"\"\r\n\t\t\t\t\t\t \"\"\" use CLIPImageProcessor instead.\"\"\"\t, __A\t, )\r\n\t\t\t\t\t\tsuper().__init__(*__A\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":670,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport json\r\nimport os\r\nfrom typing import Optional, Tuple\r\n\r\nfrom ...tokenization_utils import PreTrainedTokenizer\r\nfrom ...utils import logging\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {'vocab_file': 'vocab.json'}\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'vocab_file': {\r\n 'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',\r\n }\r\n}\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {'mgp-str': 27}\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\tVOCAB_FILES_NAMES\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\tPRETRAINED_VOCAB_FILES_MAP\r\n\t\t\tUpperCAmelCase_ :Any \t\t\t=\t\t\t\t\t\tPRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A=\"[GO]\"\t, __A=\"[GO]\"\t, __A=\"[s]\"\t, __A=\"[GO]\"\t, **__A\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tsuper().__init__(\r\n\t\t\t\t\t\t unk_token=_a\t, bos_token=_a\t, eos_token=_a\t, pad_token=_a\t, **_a\t, )\r\n\r\n\t\t\t\t\t\twith open(_a\t, encoding=\"\"\"utf-8\"\"\"\t) as vocab_handle:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= json.load(_a\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {v: k for k, v in self.vocab.items()}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\treturn len(self.vocab\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\treturn dict(self.vocab\t, **self.added_tokens_encoder\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= []\r\n\t\t\t\t\t\tfor s in text:\r\n\t\t\t\t\t\t\t\t\tchar_tokens.extend(_a\t)\r\n\t\t\t\t\t\treturn char_tokens\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\treturn self.vocab.get(_a\t, self.vocab.get(self.unk_token\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\treturn self.decoder.get(_a\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A = None\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tif not os.path.isdir(_a\t):\r\n\t\t\t\t\t\t\t\t\tlogger.error(\"\"\"Vocabulary path ({}) should be a directory\"\"\".format(_a\t)\t)\r\n\t\t\t\t\t\t\t\t\treturn\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= os.path.join(\r\n\t\t\t\t\t\t _a\t, (filename_prefix + \"\"\"-\"\"\" if filename_prefix else \"\"\"\"\"\") + VOCAB_FILES_NAMES[\"\"\"vocab_file\"\"\"]\t)\r\n\r\n\t\t\t\t\t\twith open(_a\t, \"\"\"w\"\"\"\t, encoding=\"\"\"utf-8\"\"\"\t) as f:\r\n\t\t\t\t\t\t\t\t\tf.write(json.dumps(self.vocab\t, indent=2\t, sort_keys=_a\t, ensure_ascii=_a\t) + \"\"\"\\n\"\"\"\t)\r\n\r\n\t\t\t\t\t\treturn (vocab_file,)\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":360,"string":"360"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom itertools import zip_longest\r\n\r\nimport requests\r\nfrom bsa import BeautifulSoup\r\nfrom pandas import DataFrame\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str = \"laptop\"\t\t) -> DataFrame:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= f\"\"\"https://www.amazon.in/laptop/s?k={product}\"\"\"\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {\r\n\t\t\t \"\"\"User-Agent\"\"\": \"\"\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36\"\"\",\r\n\t\t\t \"\"\"Accept-Language\"\"\": \"\"\"en-US, en;q=0.5\"\"\",\r\n\t\t\t}\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= BeautifulSoup(requests.get(lowercase__\t\t\t\t,\t\t\t\t\theaders=lowercase__\t\t).text\t\t)\r\n\t\t\t# Initialize a Pandas dataframe with the column titles\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= DataFrame(\r\n\t\t\t columns=[\r\n\t\t\t \"\"\"Product Title\"\"\",\r\n\t\t\t \"\"\"Product Link\"\"\",\r\n\t\t\t \"\"\"Current Price of the product\"\"\",\r\n\t\t\t \"\"\"Product Rating\"\"\",\r\n\t\t\t \"\"\"MRP of the product\"\"\",\r\n\t\t\t \"\"\"Discount\"\"\",\r\n\t\t\t ]\t\t)\r\n\t\t\t# Loop through each entry and store them in the dataframe\r\n\t\t\tfor item, _ in zip_longest(\r\n\t\t\t soup.find_all(\r\n\t\t\t \"\"\"div\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"s-result-item\"\"\", \"\"\"data-component-type\"\"\": \"\"\"s-search-result\"\"\"}\t\t\t\t,\t\t\t\t\t)\t\t\t\t,\t\t\t\t\tsoup.find_all(\"\"\"div\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-row a-size-base a-color-base\"\"\"}\t\t)\t\t\t\t,\t\t\t\t\t):\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= item.ha.text\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= \"\"\"https://www.amazon.in/\"\"\" + item.ha.a[\"\"\"href\"\"\"]\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= item.find(\"\"\"span\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-offscreen\"\"\"}\t\t).text\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= item.find(\"\"\"span\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-icon-alt\"\"\"}\t\t).text\r\n\t\t\t\t\t\t\t\t\texcept AttributeError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"Not available\"\"\"\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= (\r\n\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"โ‚น\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t + item.find(\r\n\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"span\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-price a-text-price\"\"\"}\t\t).text.split(\"\"\"โ‚น\"\"\"\t\t)[1]\r\n\t\t\t\t\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\texcept AttributeError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= \"\"\"\"\"\"\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= float(\r\n\t\t\t\t\t\t\t\t\t\t\t\t (\r\n\t\t\t\t\t\t\t\t\t\t\t\t (\r\n\t\t\t\t\t\t\t\t\t\t\t\t float(product_mrp.strip(\"\"\"โ‚น\"\"\"\t\t).replace(\"\"\",\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"\"\"\"\t\t)\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t - float(product_price.strip(\"\"\"โ‚น\"\"\"\t\t).replace(\"\"\",\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"\"\"\"\t\t)\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t )\r\n\t\t\t\t\t\t\t\t\t\t\t\t / float(product_mrp.strip(\"\"\"โ‚น\"\"\"\t\t).replace(\"\"\",\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"\"\"\"\t\t)\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t )\r\n\t\t\t\t\t\t\t\t\t\t\t\t * 1_0_0\t\t)\r\n\t\t\t\t\t\t\t\t\texcept ValueError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= float(\"\"\"nan\"\"\"\t\t)\r\n\t\t\t\t\t\texcept AttributeError:\r\n\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= [\r\n\t\t\t\t\t\t product_title,\r\n\t\t\t\t\t\t product_link,\r\n\t\t\t\t\t\t product_price,\r\n\t\t\t\t\t\t product_rating,\r\n\t\t\t\t\t\t product_mrp,\r\n\t\t\t\t\t\t discount,\r\n\t\t\t\t\t\t]\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\" \"\"\"\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= \"\"\" \"\"\"\r\n\t\t\tdata_frame.index += 1\r\n\t\t\treturn data_frame\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= 'headphones'\r\n\t\t\t\tget_amazon_product_data(product).to_csv(F\"\"\"Amazon Product Data for {product}.csv\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":671,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\nfrom collections import namedtuple\r\nfrom dataclasses import dataclass\r\n\r\n\r\n\r\n\r\n@dataclass\r\nclass _SCREAMING_SNAKE_CASE :\r\n\t\t\tUpperCAmelCase_ :Optional[Any] \t\t\t=\t\t\t\t\t\t42\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tNone\r\n\t\t\tUpperCAmelCase_ :Any \t\t\t=\t\t\t\t\t\tNone\r\n\r\n\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= namedtuple('CoinsDistribResult', 'moves excess')\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Optional[Any]\t\t) -> Union[str, Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif root is None:\r\n\t\t\t\t\t\treturn 0\r\n\r\n\t\t\t# Validation\r\n\t\t\tdef count_nodes(lowercase__\t\t: Optional[Any]\t\t) -> int:\r\n\t\t\t\t\t\tif node is None:\r\n\t\t\t\t\t\t\t\t\treturn 0\r\n\r\n\t\t\t\t\t\treturn count_nodes(node.left\t\t) + count_nodes(node.right\t\t) + 1\r\n\r\n\t\t\tdef count_coins(lowercase__\t\t: List[Any]\t\t) -> int:\r\n\t\t\t\t\t\tif node is None:\r\n\t\t\t\t\t\t\t\t\treturn 0\r\n\r\n\t\t\t\t\t\treturn count_coins(node.left\t\t) + count_coins(node.right\t\t) + node.data\r\n\r\n\t\t\tif count_nodes(lowercase__\t\t) != count_coins(lowercase__\t\t):\r\n\t\t\t\t\t\traise ValueError(\"\"\"The nodes number should be same as the number of coins\"\"\"\t\t)\r\n\r\n\t\t\t# Main calculation\r\n\t\t\tdef get_distrib(lowercase__\t\t: Any\t\t) -> CoinsDistribResult:\r\n\r\n\t\t\t\t\t\tif node is None:\r\n\t\t\t\t\t\t\t\t\treturn CoinsDistribResult(0\t\t\t\t,\t\t\t\t\t1\t\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= get_distrib(node.left\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= get_distrib(node.right\t\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= 1 - left_distrib_excess\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= 1 - right_distrib_excess\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= (\r\n\t\t\t\t\t\t left_distrib_moves\r\n\t\t\t\t\t\t + right_distrib_moves\r\n\t\t\t\t\t\t + abs(lowercase__\t\t)\r\n\t\t\t\t\t\t + abs(lowercase__\t\t)\r\n\t\t\t\t\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= node.data - coins_to_left - coins_to_right\r\n\r\n\t\t\t\t\t\treturn CoinsDistribResult(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\treturn get_distrib(lowercase__\t\t)[0]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":361,"string":"361"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport shutil\r\nimport tempfile\r\nimport unittest\r\n\r\nfrom transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast\r\nfrom transformers.testing_utils import require_sentencepiece, require_torchaudio\r\n\r\nfrom .test_feature_extraction_clap import floats_list\r\n\r\n\r\n\r\n\r\n@require_torchaudio\r\n@require_sentencepiece\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= \"\"\"laion/clap-htsat-unfused\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tempfile.mkdtemp()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, **__A\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\treturn RobertaTokenizer.from_pretrained(self.checkpoint\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, **__A\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\treturn ClapFeatureExtractor.from_pretrained(self.checkpoint\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tshutil.rmtree(self.tmpdirname\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_tokenizer()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_feature_extractor()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tprocessor.save_pretrained(self.tmpdirname\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= ClapProcessor.from_pretrained(self.tmpdirname\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor.tokenizer.get_vocab()\t, tokenizer.get_vocab()\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor.tokenizer\t, __A\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor.feature_extractor.to_json_string()\t, feature_extractor.to_json_string()\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor.feature_extractor\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= ClapProcessor(tokenizer=self.get_tokenizer()\t, feature_extractor=self.get_feature_extractor()\t)\r\n\t\t\t\t\t\tprocessor.save_pretrained(self.tmpdirname\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.get_tokenizer(bos_token=\"\"\"(BOS)\"\"\"\t, eos_token=\"\"\"(EOS)\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.get_feature_extractor(do_normalize=__A\t, padding_value=1.0\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= ClapProcessor.from_pretrained(\r\n\t\t\t\t\t\t self.tmpdirname\t, bos_token=\"\"\"(BOS)\"\"\"\t, eos_token=\"\"\"(EOS)\"\"\"\t, do_normalize=__A\t, padding_value=1.0\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor.tokenizer.get_vocab()\t, tokenizer_add_kwargs.get_vocab()\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor.tokenizer\t, __A\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor.feature_extractor.to_json_string()\t, feature_extractor_add_kwargs.to_json_string()\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor.feature_extractor\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.get_feature_extractor()\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= floats_list((3, 1000)\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= feature_extractor(__A\t, return_tensors=\"\"\"np\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= processor(audios=__A\t, return_tensors=\"\"\"np\"\"\"\t)\r\n\r\n\t\t\t\t\t\tfor key in input_feat_extract.keys():\r\n\t\t\t\t\t\t\t\t\tself.assertAlmostEqual(input_feat_extract[key].sum()\t, input_processor[key].sum()\t, delta=1E-2\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.get_feature_extractor()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"This is a test string\"\"\"\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= processor(text=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer(__A\t)\r\n\r\n\t\t\t\t\t\tfor key in encoded_tok.keys():\r\n\t\t\t\t\t\t\t\t\tself.assertListEqual(encoded_tok[key]\t, encoded_processor[key]\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.get_feature_extractor()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= processor.batch_decode(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tokenizer.batch_decode(__A\t)\r\n\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_feature_extractor()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tself.assertListEqual(\r\n\t\t\t\t\t\t processor.model_input_names[2:]\t, feature_extractor.model_input_names\t, msg=\"\"\"`processor` and `feature_extractor` model input names do not match\"\"\"\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":672,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: List[Any]\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :list[list[int]] \t\t\t\t\t= [[0 for _ in range(_lowerCamelCase\t\t)] for _ in range(m + 1\t\t)]\r\n\t\t\tfor i in range(m + 1\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= 1\r\n\r\n\t\t\tfor n in range(m + 1\t\t):\r\n\t\t\t\t\t\tfor k in range(1\t\t\t\t,\t\t\t\t\t_lowerCamelCase\t\t):\r\n\t\t\t\t\t\t\t\t\tmemo[n][k] += memo[n][k - 1]\r\n\t\t\t\t\t\t\t\t\tif n - k > 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\tmemo[n][k] += memo[n - k - 1][k]\r\n\r\n\t\t\treturn memo[m][m - 1]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport sys\r\n\r\n\t\t\t\tif len(sys.argv) == 1:\r\n\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= int(input('Enter a number: ').strip())\r\n\t\t\t\t\t\t\t\t\t\t\t\tprint(partition(n))\r\n\t\t\t\t\t\t\t\texcept ValueError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tprint('Please enter a number.')\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= int(sys.argv[1])\r\n\t\t\t\t\t\t\t\t\t\t\t\tprint(partition(n))\r\n\t\t\t\t\t\t\t\texcept ValueError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tprint('Please pass a number.')\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":362,"string":"362"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport os\r\nfrom math import logaa\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str = \"base_exp.txt\"\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :float \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 0\r\n\t\t\tfor i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__\t\t)\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\t\t)\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= list(map(lowercase__\t\t\t\t,\t\t\t\t\tline.split(\"\"\",\"\"\"\t\t)\t\t)\t\t)\r\n\t\t\t\t\t\tif x * logaa(lowercase__\t\t) > largest:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= x * logaa(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= i + 1\r\n\t\t\treturn result\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tprint(solution())\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":673,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport argparse\r\n\r\nfrom torch import nn\r\n\r\n# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here\r\n# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively\r\nfrom transformers_old.modeling_prophetnet import (\r\n ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,\r\n)\r\nfrom transformers_old.modeling_xlm_prophetnet import (\r\n XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,\r\n)\r\n\r\nfrom transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\nlogging.set_verbosity_info()\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Dict\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Optional[Any]\t\t) -> int:\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n if \"xprophetnet\" in prophetnet_checkpoint_path:\r\n lowerCAmelCase_ :Tuple \t\t\t\t\t= XLMProphetNetForConditionalGenerationOld.from_pretrained(a_\t\t)\r\n lowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= XLMProphetNetForConditionalGeneration.from_pretrained(\r\n a_\t\t\t\t,\t\t\t\t\toutput_loading_info=a_\t\t)\r\n else:\r\n lowerCAmelCase_ :Optional[int] \t\t\t\t\t= ProphetNetForConditionalGenerationOld.from_pretrained(a_\t\t)\r\n lowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Tuple \t\t\t\t\t= ProphetNetForConditionalGeneration.from_pretrained(\r\n a_\t\t\t\t,\t\t\t\t\toutput_loading_info=a_\t\t)\r\n\r\n lowerCAmelCase_ :Optional[int] \t\t\t\t\t= [\"\"\"key_proj\"\"\", \"\"\"value_proj\"\"\", \"\"\"query_proj\"\"\"]\r\n\r\n lowerCAmelCase_ :Tuple \t\t\t\t\t= {\r\n \"\"\"self_attn\"\"\": \"\"\"ngram_self_attn\"\"\",\r\n \"\"\"cross_attn\"\"\": \"\"\"encoder_attn\"\"\",\r\n \"\"\"cross_attn_layer_norm\"\"\": \"\"\"encoder_attn_layer_norm\"\"\",\r\n \"\"\"feed_forward_layer_norm\"\"\": \"\"\"final_layer_norm\"\"\",\r\n \"\"\"feed_forward\"\"\": \"\"\"\"\"\",\r\n \"\"\"intermediate\"\"\": \"\"\"fc1\"\"\",\r\n \"\"\"output\"\"\": \"\"\"fc2\"\"\",\r\n \"\"\"key_proj\"\"\": \"\"\"k_proj\"\"\",\r\n \"\"\"query_proj\"\"\": \"\"\"q_proj\"\"\",\r\n \"\"\"value_proj\"\"\": \"\"\"v_proj\"\"\",\r\n \"\"\"word_embeddings\"\"\": \"\"\"embed_tokens\"\"\",\r\n \"\"\"embeddings_layer_norm\"\"\": \"\"\"emb_layer_norm\"\"\",\r\n \"\"\"relative_pos_embeddings\"\"\": \"\"\"relative_linear\"\"\",\r\n \"\"\"ngram_embeddings\"\"\": \"\"\"ngram_input_embed\"\"\",\r\n \"\"\"position_embeddings\"\"\": \"\"\"embed_positions\"\"\",\r\n }\r\n\r\n for key in loading_info[\"missing_keys\"]:\r\n lowerCAmelCase_ :Optional[Any] \t\t\t\t\t= key.split(\"\"\".\"\"\"\t\t)\r\n\r\n if attributes[0] == \"lm_head\":\r\n lowerCAmelCase_ :List[Any] \t\t\t\t\t= prophet\r\n lowerCAmelCase_ :Any \t\t\t\t\t= prophet_old\r\n else:\r\n lowerCAmelCase_ :str \t\t\t\t\t= prophet.prophetnet\r\n lowerCAmelCase_ :Optional[Any] \t\t\t\t\t= prophet_old.model\r\n\r\n lowerCAmelCase_ :str \t\t\t\t\t= False\r\n for attribute in attributes:\r\n if attribute in mapping:\r\n lowerCAmelCase_ :Optional[Any] \t\t\t\t\t= mapping[attribute]\r\n if not hasattr(a_\t\t\t\t,\t\t\t\t\ta_\t\t) and len(a_\t\t) > 0:\r\n lowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= attribute\r\n elif hasattr(a_\t\t\t\t,\t\t\t\t\ta_\t\t):\r\n lowerCAmelCase_ :Optional[int] \t\t\t\t\t= attribute\r\n\r\n if attribute == \"weight\":\r\n assert old_model.weight.shape == model.weight.shape, \"Shapes have to match!\"\r\n lowerCAmelCase_ :Dict \t\t\t\t\t= old_model.weight\r\n logger.info(f\"\"\"{attribute} is initialized.\"\"\"\t\t)\r\n lowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= True\r\n break\r\n elif attribute == \"bias\":\r\n assert old_model.bias.shape == model.bias.shape, \"Shapes have to match!\"\r\n lowerCAmelCase_ :str \t\t\t\t\t= old_model.bias\r\n logger.info(f\"\"\"{attribute} is initialized\"\"\"\t\t)\r\n lowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= True\r\n break\r\n elif attribute in special_keys and hasattr(a_\t\t\t\t,\t\t\t\t\t\"\"\"in_proj_weight\"\"\"\t\t):\r\n lowerCAmelCase_ :Optional[Any] \t\t\t\t\t= old_model.in_proj_weight.shape[0] // 3\r\n lowerCAmelCase_ :str \t\t\t\t\t= getattr(a_\t\t\t\t,\t\t\t\t\ta_\t\t)\r\n param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, \"Shapes have to match\"\r\n param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, \"Shapes have to match\"\r\n if attribute == \"query_proj\":\r\n lowerCAmelCase_ :int \t\t\t\t\t= nn.Parameter(old_model.in_proj_weight[:embed_dim, :]\t\t)\r\n lowerCAmelCase_ :List[str] \t\t\t\t\t= nn.Parameter(old_model.in_proj_bias[:embed_dim]\t\t)\r\n\r\n elif attribute == \"key_proj\":\r\n lowerCAmelCase_ :Dict \t\t\t\t\t= nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :]\t\t)\r\n lowerCAmelCase_ :Dict \t\t\t\t\t= nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim]\t\t)\r\n elif attribute == \"value_proj\":\r\n lowerCAmelCase_ :Tuple \t\t\t\t\t= nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :]\t\t)\r\n lowerCAmelCase_ :Dict \t\t\t\t\t= nn.Parameter(old_model.in_proj_bias[2 * embed_dim :]\t\t)\r\n lowerCAmelCase_ :Any \t\t\t\t\t= True\r\n break\r\n elif attribute == \"position_embeddings\":\r\n assert (\r\n model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]\r\n ), \"Hidden size has to match\"\r\n assert model.position_embeddings.weight.shape[0] == 5_1_2, \"We want 512 position_embeddings.\"\r\n lowerCAmelCase_ :Dict \t\t\t\t\t= nn.Parameter(old_model.embed_positions.weight[:5_1_2, :]\t\t)\r\n lowerCAmelCase_ :List[Any] \t\t\t\t\t= True\r\n break\r\n\r\n if attribute.isdigit():\r\n lowerCAmelCase_ :Optional[Any] \t\t\t\t\t= model[int(a_\t\t)]\r\n lowerCAmelCase_ :Any \t\t\t\t\t= old_model[int(a_\t\t)]\r\n else:\r\n lowerCAmelCase_ :Tuple \t\t\t\t\t= getattr(a_\t\t\t\t,\t\t\t\t\ta_\t\t)\r\n\r\n if old_attribute == \"\":\r\n lowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= old_model\r\n else:\r\n if not hasattr(a_\t\t\t\t,\t\t\t\t\ta_\t\t):\r\n raise ValueError(f\"\"\"{old_model} does not have {old_attribute}\"\"\"\t\t)\r\n lowerCAmelCase_ :Dict \t\t\t\t\t= getattr(a_\t\t\t\t,\t\t\t\t\ta_\t\t)\r\n\r\n if not is_key_init:\r\n raise ValueError(f\"\"\"{key} was not correctly initialized!\"\"\"\t\t)\r\n\r\n print(f\"\"\"Saving model to {pytorch_dump_folder_path}\"\"\"\t\t)\r\n prophet.save_pretrained(a_\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n __UpperCAmelCase\t\t\t\t\t\t\t= argparse.ArgumentParser()\r\n # Required parameters\r\n parser.add_argument(\r\n '--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'\r\n )\r\n parser.add_argument(\r\n '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'\r\n )\r\n __UpperCAmelCase\t\t\t\t\t\t\t= parser.parse_args()\r\n convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":363,"string":"363"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport itertools\r\nimport math\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int\t\t) -> bool:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif 1 < number < 4:\r\n\t\t\t\t\t\t# 2 and 3 are primes\r\n\t\t\t\t\t\treturn True\r\n\t\t\telif number < 2 or number % 2 == 0 or number % 3 == 0:\r\n\t\t\t\t\t\t# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes\r\n\t\t\t\t\t\treturn False\r\n\r\n\t\t\t# All primes number are in format of 6k +/- 1\r\n\t\t\tfor i in range(5\t\t\t\t,\t\t\t\t\tint(math.sqrt(lowercase__\t\t) + 1\t\t)\t\t\t\t,\t\t\t\t\t6\t\t):\r\n\t\t\t\t\t\tif number % i == 0 or number % (i + 2) == 0:\r\n\t\t\t\t\t\t\t\t\treturn False\r\n\t\t\treturn True\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> Dict:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 2\r\n\t\t\twhile True:\r\n\t\t\t\t\t\tif is_prime(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\tyield num\r\n\t\t\t\t\t\tnum += 1\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int = 1_0_0_0_1\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\treturn next(itertools.islice(prime_generator()\t\t\t\t,\t\t\t\t\tnth - 1\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tprint(F\"\"\"{solution() = }\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":674,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: list[int] ,\t\t\t\t\tlowercase__\t\t: int\t\t) -> bool:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= len(__a\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= [[False] * (required_sum + 1) for _ in range(arr_len + 1\t\t)]\r\n\r\n\t\t\t# for each arr value, a sum of zero(0) can be formed by not taking any element\r\n\t\t\t# hence True/1\r\n\t\t\tfor i in range(arr_len + 1\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= True\r\n\r\n\t\t\t# sum is not zero and set is empty then false\r\n\t\t\tfor i in range(1 ,\t\t\t\t\trequired_sum + 1\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= False\r\n\r\n\t\t\tfor i in range(1 ,\t\t\t\t\tarr_len + 1\t\t):\r\n\t\t\t\t\t\tfor j in range(1 ,\t\t\t\t\trequired_sum + 1\t\t):\r\n\t\t\t\t\t\t\t\t\tif arr[i - 1] > j:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= subset[i - 1][j]\r\n\t\t\t\t\t\t\t\t\tif arr[i - 1] <= j:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]\r\n\r\n\t\t\treturn subset[arr_len][required_sum]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":364,"string":"364"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int = 5_0\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [1] * (length + 1)\r\n\r\n\t\t\tfor row_length in range(3\t\t\t\t,\t\t\t\t\tlength + 1\t\t):\r\n\t\t\t\t\t\tfor block_length in range(3\t\t\t\t,\t\t\t\t\trow_length + 1\t\t):\r\n\t\t\t\t\t\t\t\t\tfor block_start in range(row_length - block_length\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tways_number[row_length] += ways_number[\r\n\t\t\t\t\t\t\t\t\t\t\t\t row_length - block_start - block_length - 1\r\n\t\t\t\t\t\t\t\t\t\t\t\t]\r\n\r\n\t\t\t\t\t\t\t\t\tways_number[row_length] += 1\r\n\r\n\t\t\treturn ways_number[length]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tprint(F\"\"\"{solution() = }\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":675,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport os\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str = \"input.txt\"\t\t) -> Any:\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n with open(os.path.join(os.path.dirname(lowerCAmelCase__\t\t)\t\t\t\t,\t\t\t\t\tlowerCAmelCase__\t\t)\t\t) as input_file:\r\n lowerCAmelCase_ :List[Any] \t\t\t\t\t= [\r\n [int(lowerCAmelCase__\t\t) for element in line.split(\"\"\",\"\"\"\t\t)]\r\n for line in input_file.readlines()\r\n ]\r\n\r\n lowerCAmelCase_ :int \t\t\t\t\t= len(lowerCAmelCase__\t\t)\r\n lowerCAmelCase_ :str \t\t\t\t\t= len(matrix[0]\t\t)\r\n\r\n lowerCAmelCase_ :List[str] \t\t\t\t\t= [[-1 for _ in range(lowerCAmelCase__\t\t)] for _ in range(lowerCAmelCase__\t\t)]\r\n for i in range(lowerCAmelCase__\t\t):\r\n lowerCAmelCase_ :str \t\t\t\t\t= matrix[i][0]\r\n\r\n for j in range(1\t\t\t\t,\t\t\t\t\tlowerCAmelCase__\t\t):\r\n for i in range(lowerCAmelCase__\t\t):\r\n lowerCAmelCase_ :str \t\t\t\t\t= minimal_path_sums[i][j - 1] + matrix[i][j]\r\n\r\n for i in range(1\t\t\t\t,\t\t\t\t\tlowerCAmelCase__\t\t):\r\n lowerCAmelCase_ :Any \t\t\t\t\t= min(\r\n minimal_path_sums[i][j]\t\t\t\t,\t\t\t\t\tminimal_path_sums[i - 1][j] + matrix[i][j]\t\t)\r\n\r\n for i in range(rows - 2\t\t\t\t,\t\t\t\t\t-1\t\t\t\t,\t\t\t\t\t-1\t\t):\r\n lowerCAmelCase_ :Optional[Any] \t\t\t\t\t= min(\r\n minimal_path_sums[i][j]\t\t\t\t,\t\t\t\t\tminimal_path_sums[i + 1][j] + matrix[i][j]\t\t)\r\n\r\n return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(F\"\"\"{solution() = }\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":365,"string":"365"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/\r\n\r\nimport gc\r\nimport random\r\nimport tempfile\r\nimport unittest\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom PIL import Image\r\nfrom transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer\r\n\r\nfrom diffusers import (\r\n AutoencoderKL,\r\n ControlNetModel,\r\n DDIMScheduler,\r\n StableDiffusionControlNetImgaImgPipeline,\r\n UNetaDConditionModel,\r\n)\r\nfrom diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel\r\nfrom diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device\r\nfrom diffusers.utils.import_utils import is_xformers_available\r\nfrom diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu\r\n\r\nfrom ..pipeline_params import (\r\n IMAGE_TO_IMAGE_IMAGE_PARAMS,\r\n TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,\r\n TEXT_GUIDED_IMAGE_VARIATION_PARAMS,\r\n)\r\nfrom ..test_pipelines_common import (\r\n PipelineKarrasSchedulerTesterMixin,\r\n PipelineLatentTesterMixin,\r\n PipelineTesterMixin,\r\n)\r\n\r\n\r\nenable_full_determinism()\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tA__ ,\t\t\tA__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tStableDiffusionControlNetImgaImgPipeline\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_PARAMS - {\"height\", \"width\"}\r\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\tIMAGE_TO_IMAGE_IMAGE_PARAMS.union({\"control_image\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[Any] \t\t\t=\t\t\t\t\t\tIMAGE_TO_IMAGE_IMAGE_PARAMS\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= UNetaDConditionModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, sample_size=32\t, in_channels=4\t, out_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, up_block_types=(\"\"\"CrossAttnUpBlock2D\"\"\", \"\"\"UpBlock2D\"\"\")\t, cross_attention_dim=32\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= ControlNetModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, in_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, cross_attention_dim=32\t, conditioning_embedding_out_channels=(16, 32)\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= DDIMScheduler(\r\n\t\t\t\t\t\t beta_start=0.0_0_0_8_5\t, beta_end=0.0_1_2\t, beta_schedule=\"\"\"scaled_linear\"\"\"\t, clip_sample=__A\t, set_alpha_to_one=__A\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoencoderKL(\r\n\t\t\t\t\t\t block_out_channels=[32, 64]\t, in_channels=3\t, out_channels=3\t, down_block_types=[\"\"\"DownEncoderBlock2D\"\"\", \"\"\"DownEncoderBlock2D\"\"\"]\t, up_block_types=[\"\"\"UpDecoderBlock2D\"\"\", \"\"\"UpDecoderBlock2D\"\"\"]\t, latent_channels=4\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= CLIPTextConfig(\r\n\t\t\t\t\t\t bos_token_id=0\t, eos_token_id=2\t, hidden_size=32\t, intermediate_size=37\t, layer_norm_eps=1E-05\t, num_attention_heads=4\t, num_hidden_layers=5\t, pad_token_id=1\t, vocab_size=1000\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= CLIPTextModel(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= CLIPTokenizer.from_pretrained(\"\"\"hf-internal-testing/tiny-random-clip\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"unet\"\"\": unet,\r\n\t\t\t\t\t\t \"\"\"controlnet\"\"\": controlnet,\r\n\t\t\t\t\t\t \"\"\"scheduler\"\"\": scheduler,\r\n\t\t\t\t\t\t \"\"\"vae\"\"\": vae,\r\n\t\t\t\t\t\t \"\"\"text_encoder\"\"\": text_encoder,\r\n\t\t\t\t\t\t \"\"\"tokenizer\"\"\": tokenizer,\r\n\t\t\t\t\t\t \"\"\"safety_checker\"\"\": None,\r\n\t\t\t\t\t\t \"\"\"feature_extractor\"\"\": None,\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\treturn components\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=0\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tif str(__A\t).startswith(\"\"\"mps\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.manual_seed(__A\t)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= torch.Generator(device=__A\t).manual_seed(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 2\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= randn_tensor(\r\n\t\t\t\t\t\t (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor)\t, generator=__A\t, device=torch.device(__A\t)\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= floats_tensor(control_image.shape\t, rng=random.Random(__A\t)\t).to(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= image.cpu().permute(0\t, 2\t, 3\t, 1\t)[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= Image.fromarray(np.uinta(__A\t)\t).convert(\"\"\"RGB\"\"\"\t).resize((64, 64)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"prompt\"\"\": \"\"\"A painting of a squirrel eating a burger\"\"\",\r\n\t\t\t\t\t\t \"\"\"generator\"\"\": generator,\r\n\t\t\t\t\t\t \"\"\"num_inference_steps\"\"\": 2,\r\n\t\t\t\t\t\t \"\"\"guidance_scale\"\"\": 6.0,\r\n\t\t\t\t\t\t \"\"\"output_type\"\"\": \"\"\"numpy\"\"\",\r\n\t\t\t\t\t\t \"\"\"image\"\"\": image,\r\n\t\t\t\t\t\t \"\"\"control_image\"\"\": control_image,\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\treturn inputs\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn self._test_attention_slicing_forward_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@unittest.skipIf(\r\n\t\t\t torch_device != \"\"\"cuda\"\"\" or not is_xformers_available()\t, reason=\"\"\"XFormers attention is only available with CUDA and `xformers` installed\"\"\"\t, )\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tself._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tself._test_inference_batch_single_identical(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tA__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\tStableDiffusionControlNetImgaImgPipeline\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_PARAMS - {\"height\", \"width\"}\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tfrozenset([]\t\t\t\t\t\t\t) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= UNetaDConditionModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, sample_size=32\t, in_channels=4\t, out_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, up_block_types=(\"\"\"CrossAttnUpBlock2D\"\"\", \"\"\"UpBlock2D\"\"\")\t, cross_attention_dim=32\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\r\n\t\t\t\t\t\tdef init_weights(__A\t):\r\n\t\t\t\t\t\t\t\t\tif isinstance(__A\t, torch.nn.Convad\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\ttorch.nn.init.normal(m.weight\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tm.bias.data.fill_(1.0\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= ControlNetModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, in_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, cross_attention_dim=32\t, conditioning_embedding_out_channels=(16, 32)\t, )\r\n\t\t\t\t\t\tcontrolneta.controlnet_down_blocks.apply(__A\t)\r\n\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= ControlNetModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, in_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, cross_attention_dim=32\t, conditioning_embedding_out_channels=(16, 32)\t, )\r\n\t\t\t\t\t\tcontrolneta.controlnet_down_blocks.apply(__A\t)\r\n\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= DDIMScheduler(\r\n\t\t\t\t\t\t beta_start=0.0_0_0_8_5\t, beta_end=0.0_1_2\t, beta_schedule=\"\"\"scaled_linear\"\"\"\t, clip_sample=__A\t, set_alpha_to_one=__A\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= AutoencoderKL(\r\n\t\t\t\t\t\t block_out_channels=[32, 64]\t, in_channels=3\t, out_channels=3\t, down_block_types=[\"\"\"DownEncoderBlock2D\"\"\", \"\"\"DownEncoderBlock2D\"\"\"]\t, up_block_types=[\"\"\"UpDecoderBlock2D\"\"\", \"\"\"UpDecoderBlock2D\"\"\"]\t, latent_channels=4\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= CLIPTextConfig(\r\n\t\t\t\t\t\t bos_token_id=0\t, eos_token_id=2\t, hidden_size=32\t, intermediate_size=37\t, layer_norm_eps=1E-05\t, num_attention_heads=4\t, num_hidden_layers=5\t, pad_token_id=1\t, vocab_size=1000\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= CLIPTextModel(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= CLIPTokenizer.from_pretrained(\"\"\"hf-internal-testing/tiny-random-clip\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= MultiControlNetModel([controlneta, controlneta]\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"unet\"\"\": unet,\r\n\t\t\t\t\t\t \"\"\"controlnet\"\"\": controlnet,\r\n\t\t\t\t\t\t \"\"\"scheduler\"\"\": scheduler,\r\n\t\t\t\t\t\t \"\"\"vae\"\"\": vae,\r\n\t\t\t\t\t\t \"\"\"text_encoder\"\"\": text_encoder,\r\n\t\t\t\t\t\t \"\"\"tokenizer\"\"\": tokenizer,\r\n\t\t\t\t\t\t \"\"\"safety_checker\"\"\": None,\r\n\t\t\t\t\t\t \"\"\"feature_extractor\"\"\": None,\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\treturn components\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=0\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tif str(__A\t).startswith(\"\"\"mps\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= torch.manual_seed(__A\t)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.Generator(device=__A\t).manual_seed(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= 2\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= [\r\n\t\t\t\t\t\t randn_tensor(\r\n\t\t\t\t\t\t (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor)\t, generator=__A\t, device=torch.device(__A\t)\t, ),\r\n\t\t\t\t\t\t randn_tensor(\r\n\t\t\t\t\t\t (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor)\t, generator=__A\t, device=torch.device(__A\t)\t, ),\r\n\t\t\t\t\t\t]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= floats_tensor(control_image[0].shape\t, rng=random.Random(__A\t)\t).to(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= image.cpu().permute(0\t, 2\t, 3\t, 1\t)[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= Image.fromarray(np.uinta(__A\t)\t).convert(\"\"\"RGB\"\"\"\t).resize((64, 64)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"prompt\"\"\": \"\"\"A painting of a squirrel eating a burger\"\"\",\r\n\t\t\t\t\t\t \"\"\"generator\"\"\": generator,\r\n\t\t\t\t\t\t \"\"\"num_inference_steps\"\"\": 2,\r\n\t\t\t\t\t\t \"\"\"guidance_scale\"\"\": 6.0,\r\n\t\t\t\t\t\t \"\"\"output_type\"\"\": \"\"\"numpy\"\"\",\r\n\t\t\t\t\t\t \"\"\"image\"\"\": image,\r\n\t\t\t\t\t\t \"\"\"control_image\"\"\": control_image,\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\treturn inputs\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.get_dummy_components()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.pipeline_class(**__A\t)\r\n\t\t\t\t\t\tpipe.to(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 1_0.0\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 4\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= pipe(**__A\t)[0]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= pipe(**__A\t, control_guidance_start=0.1\t, control_guidance_end=0.2\t)[0]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= pipe(**__A\t, control_guidance_start=[0.1, 0.3]\t, control_guidance_end=[0.2, 0.7]\t)[0]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= pipe(**__A\t, control_guidance_start=0.4\t, control_guidance_end=[0.5, 0.8]\t)[0]\r\n\r\n\t\t\t\t\t\t# make sure that all outputs are different\r\n\t\t\t\t\t\tassert np.sum(np.abs(output_a - output_a\t)\t) > 1E-3\r\n\t\t\t\t\t\tassert np.sum(np.abs(output_a - output_a\t)\t) > 1E-3\r\n\t\t\t\t\t\tassert np.sum(np.abs(output_a - output_a\t)\t) > 1E-3\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\treturn self._test_attention_slicing_forward_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@unittest.skipIf(\r\n\t\t\t torch_device != \"\"\"cuda\"\"\" or not is_xformers_available()\t, reason=\"\"\"XFormers attention is only available with CUDA and `xformers` installed\"\"\"\t, )\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tself._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tself._test_inference_batch_single_identical(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.get_dummy_components()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.pipeline_class(**__A\t)\r\n\t\t\t\t\t\tpipe.to(__A\t)\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__A\t)\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdir:\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\t# save_pretrained is not implemented for Multi-ControlNet\r\n\t\t\t\t\t\t\t\t\t\t\t\tpipe.save_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\texcept NotImplementedError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n@slow\r\n@require_torch_gpu\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tsuper().tearDown()\r\n\t\t\t\t\t\tgc.collect()\r\n\t\t\t\t\t\ttorch.cuda.empty_cache()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= ControlNetModel.from_pretrained(\"\"\"lllyasviel/sd-controlnet-canny\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= StableDiffusionControlNetImgaImgPipeline.from_pretrained(\r\n\t\t\t\t\t\t \"\"\"runwayml/stable-diffusion-v1-5\"\"\"\t, safety_checker=__A\t, controlnet=__A\t)\r\n\t\t\t\t\t\tpipe.enable_model_cpu_offload()\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= torch.Generator(device=\"\"\"cpu\"\"\"\t).manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"evil space-punk bird\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= load_image(\r\n\t\t\t\t\t\t \"\"\"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png\"\"\"\t).resize((512, 512)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= load_image(\r\n\t\t\t\t\t\t \"\"\"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png\"\"\"\t).resize((512, 512)\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= pipe(\r\n\t\t\t\t\t\t __A\t, __A\t, control_image=__A\t, generator=__A\t, output_type=\"\"\"np\"\"\"\t, num_inference_steps=50\t, strength=0.6\t, )\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= output.images[0]\r\n\r\n\t\t\t\t\t\tassert image.shape == (512, 512, 3)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= load_numpy(\r\n\t\t\t\t\t\t \"\"\"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy\"\"\"\t)\r\n\r\n\t\t\t\t\t\tassert np.abs(expected_image - image\t).max() < 9E-2\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":676,"cells":{"code":{"kind":"string","value":"\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\rimport unittest\r\rfrom transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding\rfrom transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow\r\rfrom ...test_tokenization_common import TokenizerTesterMixin\r\r\r\r\r@require_tokenizers\r@require_sentencepiece\r@slow # see https://github.com/huggingface/transformers/issues/11457\rclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tBarthezTokenizer\r\t\t\tUpperCAmelCase_ :List[Any] \t\t\t=\t\t\t\t\t\tBarthezTokenizerFast\r\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tTrue\r\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tTrue\r\r\r\r\r\r\r\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\t\t\t\t\t\tsuper().setUp()\r\r\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= BarthezTokenizerFast.from_pretrained(\"\"\"moussaKam/mbarthez\"\"\"\t)\r\t\t\t\t\t\ttokenizer.save_pretrained(self.tmpdirname\t)\r\t\t\t\t\t\ttokenizer.save_pretrained(self.tmpdirname\t, legacy_format=lowerCAmelCase__\t)\r\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer\r\r\r\r\r\r\r\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= \"\"\r\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= 1\r\r\t\t\t\t\t\tself.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__\t)\t, lowerCAmelCase__\t)\r\t\t\t\t\t\tself.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__\t)\t, lowerCAmelCase__\t)\r\r\r\r\r\r\r\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= list(self.get_tokenizer().get_vocab().keys()\t)\r\r\t\t\t\t\t\tself.assertEqual(vocab_keys[0]\t, \"\"\"\"\"\"\t)\r\t\t\t\t\t\tself.assertEqual(vocab_keys[1]\t, \"\"\"\"\"\"\t)\r\t\t\t\t\t\tself.assertEqual(vocab_keys[-1]\t, \"\"\"\"\"\"\t)\r\t\t\t\t\t\tself.assertEqual(len(lowerCAmelCase__\t)\t, 10_1122\t)\r\r\r\r\r\r\r\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\t\t\t\t\t\tself.assertEqual(self.get_tokenizer().vocab_size\t, 10_1122\t)\r\r\r\r\r\r\r\t\t\t@require_torch\r\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [\"A long paragraph for summarization.\", \"Another paragraph for summarization.\"]\r\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= [0, 57, 3018, 7_0307, 91, 2]\r\r\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.tokenizer(\r\t\t\t\t\t\t lowerCAmelCase__\t, max_length=len(lowerCAmelCase__\t)\t, padding=lowerCAmelCase__\t, truncation=lowerCAmelCase__\t, return_tensors=\"\"\"pt\"\"\"\t)\r\t\t\t\t\t\tself.assertIsInstance(lowerCAmelCase__\t, lowerCAmelCase__\t)\r\r\t\t\t\t\t\tself.assertEqual((2, 6)\t, batch.input_ids.shape\t)\r\t\t\t\t\t\tself.assertEqual((2, 6)\t, batch.attention_mask.shape\t)\r\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= batch.input_ids.tolist()[0]\r\t\t\t\t\t\tself.assertListEqual(lowerCAmelCase__\t, lowerCAmelCase__\t)\r\r\r\r\r\r\r\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\r\r\r\r\t\t\t\t\t\tif not self.test_rust_tokenizer:\r\t\t\t\t\t\t\t\t\treturn\r\r\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.get_tokenizer()\r\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.get_rust_tokenizer()\r\r\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= \"I was born in 92000, and this is falsรฉ.\"\r\r\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer.tokenize(lowerCAmelCase__\t)\r\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= rust_tokenizer.tokenize(lowerCAmelCase__\t)\r\t\t\t\t\t\tself.assertListEqual(lowerCAmelCase__\t, lowerCAmelCase__\t)\r\r\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer.encode(lowerCAmelCase__\t, add_special_tokens=lowerCAmelCase__\t)\r\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= rust_tokenizer.encode(lowerCAmelCase__\t, add_special_tokens=lowerCAmelCase__\t)\r\t\t\t\t\t\tself.assertListEqual(lowerCAmelCase__\t, lowerCAmelCase__\t)\r\r\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_rust_tokenizer()\r\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tokenizer.encode(lowerCAmelCase__\t)\r\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= rust_tokenizer.encode(lowerCAmelCase__\t)\r\t\t\t\t\t\tself.assertListEqual(lowerCAmelCase__\t, lowerCAmelCase__\t)\r\r\r\r\r\r\r\t\t\t@slow\r\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= {\"input_ids\": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], \"attention_mask\": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501\r\t\t\t\t\t\t# fmt: on\r\r\t\t\t\t\t\t# moussaKam/mbarthez is a french model. So we also use french texts.\r\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= [\r\t\t\t\t\t\t \"Le transformeur est un modรจle d'apprentissage profond introduit en 2017, \"\r\t\t\t\t\t\t \"utilisรฉ principalement dans le domaine du traitement automatique des langues (TAL).\",\r\t\t\t\t\t\t \"ร€ l'instar des rรฉseaux de neurones rรฉcurrents (RNN), les transformeurs sont conรงus \"\r\t\t\t\t\t\t \"pour gรฉrer des donnรฉes sรฉquentielles, telles que le langage naturel, pour des tรขches \"\r\t\t\t\t\t\t \"telles que la traduction et la synthรจse de texte.\",\r\t\t\t\t\t\t]\r\r\t\t\t\t\t\tself.tokenizer_integration_test_util(\r\t\t\t\t\t\t expected_encoding=lowerCAmelCase__\t, model_name=\"\"\"moussaKam/mbarthez\"\"\"\t, revision=\"\"\"c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6\"\"\"\t, sequences=lowerCAmelCase__\t, )\r\r\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":366,"string":"366"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import Optional\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom torch import nn\r\nfrom transformers import GPTaConfig, GPTaLMHeadModel\r\nfrom transformers.modeling_utils import ModuleUtilsMixin\r\n\r\nfrom ...configuration_utils import ConfigMixin, register_to_config\r\nfrom ...models import ModelMixin\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tA__ ,\t\t\tA__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\t[r\"h\\.\\d+\\.attn\\.bias\", r\"h\\.\\d+\\.attn\\.masked_bias\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@register_to_config\r\n\t\t\tdef __init__( self\t, __A\t, __A\t, __A = None\t, __A = 5_0257\t, __A = 1024\t, __A = 768\t, __A = 12\t, __A = 12\t, __A = None\t, __A = \"gelu_new\"\t, __A = 0.1\t, __A = 0.1\t, __A = 0.1\t, __A = 1E-5\t, __A = 0.0_2\t, __A = True\t, __A = True\t, __A = False\t, __A = False\t, ) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tsuper().__init__()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= prefix_length\r\n\r\n\t\t\t\t\t\tif prefix_inner_dim != n_embd and prefix_hidden_dim is None:\r\n\t\t\t\t\t\t\t\t\traise ValueError(\r\n\t\t\t\t\t\t\t\t\t f\"\"\"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and\"\"\"\r\n\t\t\t\t\t\t\t\t\t f\"\"\" `n_embd`: {n_embd} are not equal.\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= prefix_inner_dim\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= prefix_hidden_dim\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= (\r\n\t\t\t\t\t\t nn.Linear(self.prefix_inner_dim\t, self.prefix_hidden_dim\t)\r\n\t\t\t\t\t\t if self.prefix_hidden_dim is not None\r\n\t\t\t\t\t\t else nn.Identity()\r\n\t\t\t\t\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= (\r\n\t\t\t\t\t\t nn.Linear(self.prefix_hidden_dim\t, __A\t) if self.prefix_hidden_dim is not None else nn.Identity()\r\n\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= GPTaConfig(\r\n\t\t\t\t\t\t vocab_size=__A\t, n_positions=__A\t, n_embd=__A\t, n_layer=__A\t, n_head=__A\t, n_inner=__A\t, activation_function=__A\t, resid_pdrop=__A\t, embd_pdrop=__A\t, attn_pdrop=__A\t, layer_norm_epsilon=__A\t, initializer_range=__A\t, scale_attn_weights=__A\t, use_cache=__A\t, scale_attn_by_inverse_layer_idx=__A\t, reorder_and_upcast_attn=__A\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= GPTaLMHeadModel(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A = None\t, __A = None\t, ) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.transformer.transformer.wte(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.encode_prefix(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.decode_prefix(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= torch.cat((prefix_embeds, embedding_text)\t, dim=1\t)\r\n\r\n\t\t\t\t\t\tif labels is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.get_dummy_token(input_ids.shape[0]\t, input_ids.device\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= torch.cat((dummy_token, input_ids)\t, dim=1\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.transformer(inputs_embeds=__A\t, labels=__A\t, attention_mask=__A\t)\r\n\t\t\t\t\t\tif self.prefix_hidden_dim is not None:\r\n\t\t\t\t\t\t\t\t\treturn out, hidden\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\treturn out\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t) ->\t\t\t\t\ttorch.Tensor:\r\n\t\t\t\t\t\treturn torch.zeros(__A\t, self.prefix_length\t, dtype=torch.intaa\t, device=__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\treturn self.encode_prefix(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@torch.no_grad()\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.split(__A\t, 1\t, dim=0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= []\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= []\r\n\t\t\t\t\t\tfor feature in features:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.decode_prefix(feature.to(__A\t)\t) # back to the clip feature\r\n\t\t\t\t\t\t\t\t\t# Only support beam search for now\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.generate_beam(\r\n\t\t\t\t\t\t\t\t\t input_embeds=__A\t, device=__A\t, eos_token_id=__A\t)\r\n\t\t\t\t\t\t\t\t\tgenerated_tokens.append(output_tokens[0]\t)\r\n\t\t\t\t\t\t\t\t\tgenerated_seq_lengths.append(seq_lengths[0]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.stack(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= torch.stack(__A\t)\r\n\t\t\t\t\t\treturn generated_tokens, generated_seq_lengths\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@torch.no_grad()\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A=None\t, __A=None\t, __A=None\t, __A = 5\t, __A = 67\t, __A = 1.0\t, __A = None\t, ) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= eos_token_id\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= None\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= None\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= torch.ones(__A\t, device=__A\t, dtype=torch.int\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= torch.zeros(__A\t, device=__A\t, dtype=torch.bool\t)\r\n\r\n\t\t\t\t\t\tif input_embeds is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= input_embeds\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.transformer.transformer.wte(__A\t)\r\n\r\n\t\t\t\t\t\tfor i in range(__A\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.transformer(inputs_embeds=__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= outputs.logits\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= logits[:, -1, :] / (temperature if temperature > 0 else 1.0)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= logits.softmax(-1\t).log()\r\n\r\n\t\t\t\t\t\t\t\t\tif scores is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Any \t\t\t\t\t= logits.topk(__A\t, -1\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= generated.expand(__A\t, *generated.shape[1:]\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[str] \t\t\t\t\t= next_tokens.permute(1\t, 0\t), scores.squeeze(0\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tif tokens is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= next_tokens\r\n\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tokens.expand(__A\t, *tokens.shape[1:]\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= torch.cat((tokens, next_tokens)\t, dim=1\t)\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= -float(np.inf\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 0\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= scores[:, None] + logits\r\n\t\t\t\t\t\t\t\t\t\t\t\tseq_lengths[~is_stopped] += 1\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= scores_sum / seq_lengths[:, None]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Tuple \t\t\t\t\t= scores_sum_average.view(-1\t).topk(__A\t, -1\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= next_tokens // scores_sum.shape[1]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= seq_lengths[next_tokens_source]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= next_tokens % scores_sum.shape[1]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= next_tokens.unsqueeze(1\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokens[next_tokens_source]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.cat((tokens, next_tokens)\t, dim=1\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= generated[next_tokens_source]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= scores_sum_average * seq_lengths\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= is_stopped[next_tokens_source]\r\n\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.transformer.transformer.wte(next_tokens.squeeze()\t).view(generated.shape[0]\t, 1\t, -1\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.cat((generated, next_token_embed)\t, dim=1\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= is_stopped + next_tokens.eq(__A\t).squeeze()\r\n\t\t\t\t\t\t\t\t\tif is_stopped.all():\r\n\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= scores / seq_lengths\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= scores.argsort(descending=__A\t)\r\n\t\t\t\t\t\t# tokens tensors are already padded to max_seq_length\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= [tokens[i] for i in order]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= torch.stack(__A\t, dim=0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.tensor([seq_lengths[i] for i in order]\t, dtype=seq_lengths.dtype\t)\r\n\t\t\t\t\t\treturn output_texts, seq_lengths\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":677,"cells":{"code":{"kind":"string","value":"\n\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str\t\t\t\t,\t\t\t\t\tlowercase__\t\t: str = \" \"\t\t) -> list:\n\n\n\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= []\n\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= 0\n\t\t\tfor index, char in enumerate(_UpperCamelCase\t\t):\n\t\t\t\t\t\tif char == separator:\n\t\t\t\t\t\t\t\t\tsplit_words.append(string[last_index:index]\t\t)\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= index + 1\n\t\t\t\t\t\telif index + 1 == len(_UpperCamelCase\t\t):\n\t\t\t\t\t\t\t\t\tsplit_words.append(string[last_index : index + 1]\t\t)\n\t\t\treturn split_words\n\n\nif __name__ == \"__main__\":\n\t\t\t\tfrom doctest import testmod\n\n\t\t\t\ttestmod()\n\n\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":367,"string":"367"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport copy\r\nfrom collections import OrderedDict\r\nfrom typing import Dict, Mapping\r\n\r\nfrom packaging import version\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...onnx import OnnxConfig\r\nfrom ...utils import logging\r\nfrom ..auto import CONFIG_MAPPING\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',\r\n # See all DETR models at https://huggingface.co/models?filter=detr\r\n}\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\t\"detr\"\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\t[\"past_key_values\"]\r\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\t{\r\n\t\t\t \"hidden_size\": \"d_model\",\r\n\t\t\t \"num_attention_heads\": \"encoder_attention_heads\",\r\n\t\t\t}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A=True\t, __A=None\t, __A=3\t, __A=100\t, __A=6\t, __A=2048\t, __A=8\t, __A=6\t, __A=2048\t, __A=8\t, __A=0.0\t, __A=0.0\t, __A=True\t, __A=\"relu\"\t, __A=256\t, __A=0.1\t, __A=0.0\t, __A=0.0\t, __A=0.0_2\t, __A=1.0\t, __A=False\t, __A=\"sine\"\t, __A=\"resnet50\"\t, __A=True\t, __A=False\t, __A=1\t, __A=5\t, __A=2\t, __A=1\t, __A=1\t, __A=5\t, __A=2\t, __A=0.1\t, **__A\t, ) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tif backbone_config is not None and use_timm_backbone:\r\n\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"You can't specify both `backbone_config` and `use_timm_backbone`.\"\"\"\t)\r\n\r\n\t\t\t\t\t\tif not use_timm_backbone:\r\n\t\t\t\t\t\t\t\t\tif backbone_config is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlogger.info(\"\"\"`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= CONFIG_MAPPING[\"\"\"resnet\"\"\"](out_features=[\"\"\"stage4\"\"\"]\t)\r\n\t\t\t\t\t\t\t\t\telif isinstance(__A\t, __A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= backbone_config.get(\"\"\"model_type\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= CONFIG_MAPPING[backbone_model_type]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= config_class.from_dict(__A\t)\r\n\t\t\t\t\t\t\t\t\t# set timm attributes to None\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= None, None, None\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= use_timm_backbone\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= backbone_config\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= num_channels\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= num_queries\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= d_model\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= encoder_ffn_dim\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= encoder_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= encoder_attention_heads\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= decoder_ffn_dim\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= decoder_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= decoder_attention_heads\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= dropout\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= attention_dropout\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= activation_dropout\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= activation_function\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= init_std\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= init_xavier_std\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= encoder_layerdrop\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= decoder_layerdrop\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= encoder_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= auxiliary_loss\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= position_embedding_type\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= backbone\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= use_pretrained_backbone\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= dilation\r\n\t\t\t\t\t\t# Hungarian matcher\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= class_cost\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= bbox_cost\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= giou_cost\r\n\t\t\t\t\t\t# Loss coefficients\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= mask_loss_coefficient\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= dice_loss_coefficient\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= bbox_loss_coefficient\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= giou_loss_coefficient\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= eos_coefficient\r\n\t\t\t\t\t\tsuper().__init__(is_encoder_decoder=__A\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn self.encoder_attention_heads\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn self.d_model\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@classmethod\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( cls\t, __A\t, **__A\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\treturn cls(backbone_config=__A\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict[str, any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= copy.deepcopy(self.__dict__\t)\r\n\t\t\t\t\t\tif output[\"backbone_config\"] is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.backbone_config.to_dict()\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.__class__.model_type\r\n\t\t\t\t\t\treturn output\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[Any] \t\t\t=\t\t\t\t\t\tversion.parse(\"1.11\"\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tMapping[str, Mapping[int, str]]:\r\n\t\t\t\t\t\treturn OrderedDict(\r\n\t\t\t\t\t\t [\r\n\t\t\t\t\t\t (\"\"\"pixel_values\"\"\", {0: \"\"\"batch\"\"\", 1: \"\"\"num_channels\"\"\", 2: \"\"\"height\"\"\", 3: \"\"\"width\"\"\"}),\r\n\t\t\t\t\t\t (\"\"\"pixel_mask\"\"\", {0: \"\"\"batch\"\"\"}),\r\n\t\t\t\t\t\t ]\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tfloat:\r\n\t\t\t\t\t\treturn 1E-5\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn 12\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":678,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport math\r\nfrom typing import Optional\r\n\r\nimport numpy as np\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...utils import logging\r\n\r\n\r\n__UpperCAmelCase \t\t\t= logging.get_logger(__name__)\r\n\r\n__UpperCAmelCase \t\t\t= {\r\n '''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',\r\n '''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',\r\n}\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( a__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Union[str, Any] \t\t\t=\t\t\t\t\t\t'encodec'\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A=[1.5, 3.0, 6.0, 12.0, 24.0]\t, __A=2_4000\t, __A=1\t, __A=False\t, __A=None\t, __A=None\t, __A=128\t, __A=32\t, __A=1\t, __A=[8, 5, 4, 2]\t, __A=\"weight_norm\"\t, __A=7\t, __A=7\t, __A=3\t, __A=2\t, __A=True\t, __A=\"reflect\"\t, __A=2\t, __A=2\t, __A=1.0\t, __A=1024\t, __A=None\t, __A=True\t, **__A\t, ) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= target_bandwidths\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= sampling_rate\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= audio_channels\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= normalize\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= chunk_length_s\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= overlap\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= hidden_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= num_filters\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= num_residual_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= upsampling_ratios\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= norm_type\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= kernel_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= last_kernel_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= residual_kernel_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= dilation_growth_rate\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= use_causal_conv\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= pad_mode\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= compress\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= num_lstm_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= trim_right_ratio\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= codebook_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= codebook_dim if codebook_dim is not None else hidden_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= use_conv_shortcut\r\n\r\n\t\t\t\t\t\tif self.norm_type not in [\"weight_norm\", \"time_group_norm\"]:\r\n\t\t\t\t\t\t\t\t\traise ValueError(\r\n\t\t\t\t\t\t\t\t\t f\"\"\"self.norm_type must be one of `\\\"weight_norm\\\"`, `\\\"time_group_norm\\\"`), got {self.norm_type}\"\"\"\t)\r\n\r\n\t\t\t\t\t\tsuper().__init__(**_lowerCamelCase\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tif self.chunk_length_s is None:\r\n\t\t\t\t\t\t\t\t\treturn None\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\treturn int(self.chunk_length_s * self.sampling_rate\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tif self.chunk_length_s is None or self.overlap is None:\r\n\t\t\t\t\t\t\t\t\treturn None\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\treturn max(1\t, int((1.0 - self.overlap) * self.chunk_length\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= np.prod(self.upsampling_ratios\t)\r\n\t\t\t\t\t\treturn math.ceil(self.sampling_rate / hop_length\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":368,"string":"368"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import (\r\n OptionalDependencyNotAvailable,\r\n _LazyModule,\r\n is_tf_available,\r\n is_torch_available,\r\n is_vision_available,\r\n)\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}\r\n\r\ntry:\r\n\t\t\t\tif not is_vision_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= ['DeiTFeatureExtractor']\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= ['DeiTImageProcessor']\r\n\r\ntry:\r\n\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\r\n\t\t\t\t 'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',\r\n\t\t\t\t 'DeiTForImageClassification',\r\n\t\t\t\t 'DeiTForImageClassificationWithTeacher',\r\n\t\t\t\t 'DeiTForMaskedImageModeling',\r\n\t\t\t\t 'DeiTModel',\r\n\t\t\t\t 'DeiTPreTrainedModel',\r\n\t\t\t\t]\r\n\r\ntry:\r\n\t\t\t\tif not is_tf_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\r\n\t\t\t\t 'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',\r\n\t\t\t\t 'TFDeiTForImageClassification',\r\n\t\t\t\t 'TFDeiTForImageClassificationWithTeacher',\r\n\t\t\t\t 'TFDeiTForMaskedImageModeling',\r\n\t\t\t\t 'TFDeiTModel',\r\n\t\t\t\t 'TFDeiTPreTrainedModel',\r\n\t\t\t\t]\r\n\r\n\r\nif TYPE_CHECKING:\r\n\t\t\t\tfrom .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_vision_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .feature_extraction_deit import DeiTFeatureExtractor\r\n\t\t\t\t\t\t\t\tfrom .image_processing_deit import DeiTImageProcessor\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .modeling_deit import (\r\n\t\t\t\t\t\t\t\t DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t DeiTForImageClassification,\r\n\t\t\t\t\t\t\t\t DeiTForImageClassificationWithTeacher,\r\n\t\t\t\t\t\t\t\t DeiTForMaskedImageModeling,\r\n\t\t\t\t\t\t\t\t DeiTModel,\r\n\t\t\t\t\t\t\t\t DeiTPreTrainedModel,\r\n\t\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_tf_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .modeling_tf_deit import (\r\n\t\t\t\t\t\t\t\t TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t TFDeiTForImageClassification,\r\n\t\t\t\t\t\t\t\t TFDeiTForImageClassificationWithTeacher,\r\n\t\t\t\t\t\t\t\t TFDeiTForMaskedImageModeling,\r\n\t\t\t\t\t\t\t\t TFDeiTModel,\r\n\t\t\t\t\t\t\t\t TFDeiTPreTrainedModel,\r\n\t\t\t\t\t\t\t\t)\r\n\r\n\r\nelse:\r\n\t\t\t\timport sys\r\n\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":679,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\t# we need a list not a string, so do something to change the type\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= arr.split(\"\"\",\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= [int(self.array[0]\t)] * len(self.array\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= [int(self.array[0]\t)] * len(self.array\t)\r\n\t\t\t\t\t\tfor i in range(1\t, len(self.array\t)\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= max(\r\n\t\t\t\t\t\t\t\t\t int(self.array[i]\t) + sum_value[i - 1]\t, int(self.array[i]\t)\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= max(sum_value[i]\t, rear[i - 1]\t)\r\n\t\t\t\t\t\treturn rear[len(self.array\t) - 1]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= input('please input some numbers:')\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= SubArray(whole_array)\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= array.solve_sub_array()\r\n\t\t\t\tprint(('the results is:', re))\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":369,"string":"369"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'configuration_squeezebert': [\r\n 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',\r\n 'SqueezeBertConfig',\r\n 'SqueezeBertOnnxConfig',\r\n ],\r\n 'tokenization_squeezebert': ['SqueezeBertTokenizer'],\r\n}\r\n\r\ntry:\r\n\t\t\t\tif not is_tokenizers_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= ['SqueezeBertTokenizerFast']\r\n\r\ntry:\r\n\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\r\n\t\t\t\t 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',\r\n\t\t\t\t 'SqueezeBertForMaskedLM',\r\n\t\t\t\t 'SqueezeBertForMultipleChoice',\r\n\t\t\t\t 'SqueezeBertForQuestionAnswering',\r\n\t\t\t\t 'SqueezeBertForSequenceClassification',\r\n\t\t\t\t 'SqueezeBertForTokenClassification',\r\n\t\t\t\t 'SqueezeBertModel',\r\n\t\t\t\t 'SqueezeBertModule',\r\n\t\t\t\t 'SqueezeBertPreTrainedModel',\r\n\t\t\t\t]\r\n\r\n\r\nif TYPE_CHECKING:\r\n\t\t\t\tfrom .configuration_squeezebert import (\r\n\t\t\t\t SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,\r\n\t\t\t\t SqueezeBertConfig,\r\n\t\t\t\t SqueezeBertOnnxConfig,\r\n\t\t\t\t)\r\n\t\t\t\tfrom .tokenization_squeezebert import SqueezeBertTokenizer\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_tokenizers_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .tokenization_squeezebert_fast import SqueezeBertTokenizerFast\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .modeling_squeezebert import (\r\n\t\t\t\t\t\t\t\t SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t SqueezeBertForMaskedLM,\r\n\t\t\t\t\t\t\t\t SqueezeBertForMultipleChoice,\r\n\t\t\t\t\t\t\t\t SqueezeBertForQuestionAnswering,\r\n\t\t\t\t\t\t\t\t SqueezeBertForSequenceClassification,\r\n\t\t\t\t\t\t\t\t SqueezeBertForTokenClassification,\r\n\t\t\t\t\t\t\t\t SqueezeBertModel,\r\n\t\t\t\t\t\t\t\t SqueezeBertModule,\r\n\t\t\t\t\t\t\t\t SqueezeBertPreTrainedModel,\r\n\t\t\t\t\t\t\t\t)\r\n\r\nelse:\r\n\t\t\t\timport sys\r\n\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":680,"cells":{"code":{"kind":"string","value":"\n\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\nimport os\nfrom shutil import copyfile\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport sentencepiece as spm\n\nfrom ...tokenization_utils import PreTrainedTokenizer\nfrom ...utils import logging\n\n\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\n\n__UpperCAmelCase\t\t\t\t\t\t\t= \"โ–\"\n\n__UpperCAmelCase\t\t\t\t\t\t\t= {\"vocab_file\": \"sentencepiece.bpe.model\"}\n\n__UpperCAmelCase\t\t\t\t\t\t\t= {\n \"vocab_file\": {\n \"facebook/xglm-564M\": \"https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model\",\n }\n}\n\n__UpperCAmelCase\t\t\t\t\t\t\t= {\n \"facebook/xglm-564M\": 20_48,\n}\n\n\n\n\nclass _SCREAMING_SNAKE_CASE ( _UpperCAmelCase\t\t\t\t\t\t\t):\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tVOCAB_FILES_NAMES\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\tPRETRAINED_VOCAB_FILES_MAP\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\tPRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\t[\"input_ids\", \"attention_mask\"]\n\n\n\n\n\n\n\t\t\tdef __init__( self\t, __A\t, __A=\"\"\t, __A=\"\"\t, __A=\"\"\t, __A=\"\"\t, __A=\"\"\t, __A=\"\"\t, __A = None\t, **__A\t, ) ->\t\t\t\t\tList[str]:\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= {} if sp_model_kwargs is None else sp_model_kwargs\n\n\t\t\t\t\t\t# Compatibility with the original tokenizer\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 7\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= [f\"\"\"\"\"\" for i in range(self.num_madeup_words\t)]\n\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= kwargs.get(\"\"\"additional_special_tokens\"\"\"\t, []\t)\n\t\t\t\t\t\tkwargs[\"additional_special_tokens\"] += [\n\t\t\t\t\t\t word for word in madeup_words if word not in kwargs[\"additional_special_tokens\"]\n\t\t\t\t\t\t]\n\n\t\t\t\t\t\tsuper().__init__(\n\t\t\t\t\t\t bos_token=lowercase_\t, eos_token=lowercase_\t, unk_token=lowercase_\t, sep_token=lowercase_\t, cls_token=lowercase_\t, pad_token=lowercase_\t, sp_model_kwargs=self.sp_model_kwargs\t, **lowercase_\t, )\n\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= spm.SentencePieceProcessor(**self.sp_model_kwargs\t)\n\t\t\t\t\t\tself.sp_model.Load(str(lowercase_\t)\t)\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= vocab_file\n\n\t\t\t\t\t\t# Original fairseq vocab and spm vocab must be \"aligned\":\n\t\t\t\t\t\t# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9\n\t\t\t\t\t\t# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----\n\t\t\t\t\t\t# fairseq | '' | '' | '' | '' | ',' | '.' | 'โ–' | 's' | 'โ–de' | '-'\n\t\t\t\t\t\t# spm | '' | '' | '' | ',' | '.' | 'โ–' | 's' | 'โ–de' | '-' | 'โ–a'\n\n\t\t\t\t\t\t# The first \"real\" token \",\" has position 4 in the original fairseq vocab and position 3 in the spm vocab\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= 1\n\n\t\t\t\t\t\t# Mimic fairseq token-to-id alignment for the first 4 token\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= {\"\"\"\"\"\": 0, \"\"\"\"\"\": 1, \"\"\"\"\"\": 2, \"\"\"\"\"\": 3}\n\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= len(self.sp_model\t)\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= {f\"\"\"\"\"\": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words\t)}\n\t\t\t\t\t\tself.fairseq_tokens_to_ids.update(lowercase_\t)\n\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {v: k for k, v in self.fairseq_tokens_to_ids.items()}\n\n\n\n\n\n\n\t\t\tdef __getstate__( self\t) ->\t\t\t\t\tOptional[Any]:\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.__dict__.copy()\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= None\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.sp_model.serialized_model_proto()\n\t\t\t\t\t\treturn state\n\n\n\n\n\n\n\t\t\tdef __setstate__( self\t, __A\t) ->\t\t\t\t\tint:\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= d\n\n\t\t\t\t\t\t# for backward compatibility\n\t\t\t\t\t\tif not hasattr(self\t, \"\"\"sp_model_kwargs\"\"\"\t):\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {}\n\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= spm.SentencePieceProcessor(**self.sp_model_kwargs\t)\n\t\t\t\t\t\tself.sp_model.LoadFromSerializedProto(self.sp_model_proto\t)\n\n\n\n\n\n\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A = None\t) ->\t\t\t\t\tstr:\n\n\t\t\t\t\t\tif token_ids_a is None:\n\t\t\t\t\t\t\t\t\treturn [self.sep_token_id] + token_ids_a\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= [self.sep_token_id]\n\t\t\t\t\t\treturn sep + token_ids_a + sep + sep + token_ids_a\n\n\n\n\n\n\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A = None\t, __A = False\t) ->\t\t\t\t\tOptional[Any]:\n\n\t\t\t\t\t\tif already_has_special_tokens:\n\t\t\t\t\t\t\t\t\treturn super().get_special_tokens_mask(\n\t\t\t\t\t\t\t\t\t token_ids_a=lowercase_\t, token_ids_a=lowercase_\t, already_has_special_tokens=lowercase_\t)\n\n\t\t\t\t\t\tif token_ids_a is None:\n\t\t\t\t\t\t\t\t\treturn [1] + ([0] * len(lowercase_\t))\n\t\t\t\t\t\treturn [1] + ([0] * len(lowercase_\t)) + [1, 1] + ([0] * len(lowercase_\t))\n\n\n\n\n\n\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A = None\t) ->\t\t\t\t\tstr:\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= [self.sep_token_id]\n\n\t\t\t\t\t\tif token_ids_a is None:\n\t\t\t\t\t\t\t\t\treturn len(sep + token_ids_a\t) * [0]\n\t\t\t\t\t\treturn len(sep + token_ids_a + sep + sep + token_ids_a\t) * [0]\n\n\n\n\n\n\n\t\t\t@property\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\n\t\t\t\t\t\treturn len(self.sp_model\t) + self.fairseq_offset + self.num_madeup_words\n\n\n\n\n\n\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= {self.convert_ids_to_tokens(lowercase_\t): i for i in range(self.vocab_size\t)}\n\t\t\t\t\t\tvocab.update(self.added_tokens_encoder\t)\n\t\t\t\t\t\treturn vocab\n\n\n\n\n\n\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tUnion[str, Any]:\n\t\t\t\t\t\treturn self.sp_model.encode(lowercase_\t, out_type=lowercase_\t)\n\n\n\n\n\n\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tDict:\n\t\t\t\t\t\tif token in self.fairseq_tokens_to_ids:\n\t\t\t\t\t\t\t\t\treturn self.fairseq_tokens_to_ids[token]\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.sp_model.PieceToId(lowercase_\t)\n\n\t\t\t\t\t\t# Need to return unknown token if the SP model returned 0\n\t\t\t\t\t\treturn spm_id + self.fairseq_offset if spm_id else self.unk_token_id\n\n\n\n\n\n\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tUnion[str, Any]:\n\t\t\t\t\t\tif index in self.fairseq_ids_to_tokens:\n\t\t\t\t\t\t\t\t\treturn self.fairseq_ids_to_tokens[index]\n\t\t\t\t\t\treturn self.sp_model.IdToPiece(index - self.fairseq_offset\t)\n\n\n\n\n\n\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tOptional[int]:\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= \"\"\"\"\"\".join(lowercase_\t).replace(lowercase_\t, \"\"\" \"\"\"\t).strip()\n\t\t\t\t\t\treturn out_string\n\n\n\n\n\n\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A = None\t) ->\t\t\t\t\tOptional[int]:\n\t\t\t\t\t\tif not os.path.isdir(lowercase_\t):\n\t\t\t\t\t\t\t\t\tlogger.error(f\"\"\"Vocabulary path ({save_directory}) should be a directory\"\"\"\t)\n\t\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= os.path.join(\n\t\t\t\t\t\t lowercase_\t, (filename_prefix + \"\"\"-\"\"\" if filename_prefix else \"\"\"\"\"\") + VOCAB_FILES_NAMES[\"\"\"vocab_file\"\"\"]\t)\n\n\t\t\t\t\t\tif os.path.abspath(self.vocab_file\t) != os.path.abspath(lowercase_\t) and os.path.isfile(self.vocab_file\t):\n\t\t\t\t\t\t\t\t\tcopyfile(self.vocab_file\t, lowercase_\t)\n\t\t\t\t\t\telif not os.path.isfile(self.vocab_file\t):\n\t\t\t\t\t\t\t\t\twith open(lowercase_\t, \"\"\"wb\"\"\"\t) as fi:\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.sp_model.serialized_model_proto()\n\t\t\t\t\t\t\t\t\t\t\t\tfi.write(lowercase_\t)\n\n\t\t\t\t\t\treturn (out_vocab_file,)\n\n\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":370,"string":"370"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 2_56\r\n# Modulus to hash a string\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 1_00_00_03\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str\t\t\t\t,\t\t\t\t\tlowercase__\t\t: str\t\t) -> bool:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= len(lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= len(lowercase__\t\t)\r\n\t\t\tif p_len > t_len:\r\n\t\t\t\t\t\treturn False\r\n\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= 1\r\n\r\n\t\t\t# Calculating the hash of pattern and substring of text\r\n\t\t\tfor i in range(lowercase__\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= (ord(pattern[i]\t\t) + p_hash * alphabet_size) % modulus\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= (ord(text[i]\t\t) + text_hash * alphabet_size) % modulus\r\n\t\t\t\t\t\tif i == p_len - 1:\r\n\t\t\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= (modulus_power * alphabet_size) % modulus\r\n\r\n\t\t\tfor i in range(0\t\t\t\t,\t\t\t\t\tt_len - p_len + 1\t\t):\r\n\t\t\t\t\t\tif text_hash == p_hash and text[i : i + p_len] == pattern:\r\n\t\t\t\t\t\t\t\t\treturn True\r\n\t\t\t\t\t\tif i == t_len - p_len:\r\n\t\t\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\t# Calculate the https://en.wikipedia.org/wiki/Rolling_hash\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= (\r\n\t\t\t\t\t\t (text_hash - ord(text[i]\t\t) * modulus_power) * alphabet_size\r\n\t\t\t\t\t\t + ord(text[i + p_len]\t\t)\r\n\t\t\t\t\t\t) % modulus\r\n\t\t\treturn False\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> None:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"abc1abc12\"\"\"\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= \"\"\"alskfjaldsabc1abc1abc12k23adsfabcabc\"\"\"\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"alskfjaldsk23adsfabcabc\"\"\"\r\n\t\t\tassert rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t) and not rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Test 2)\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= \"\"\"ABABX\"\"\"\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"ABABZABABYABABX\"\"\"\r\n\t\t\tassert rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Test 3)\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= \"\"\"AAAB\"\"\"\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= \"\"\"ABAAAAAB\"\"\"\r\n\t\t\tassert rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Test 4)\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= \"\"\"abcdabcy\"\"\"\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= \"\"\"abcxabcdabxabcdabcdabcy\"\"\"\r\n\t\t\tassert rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Test 5)\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"Lรผ\"\"\"\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"Lรผsai\"\"\"\r\n\t\t\tassert rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"Lue\"\"\"\r\n\t\t\tassert not rabin_karp(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\tprint(\"\"\"Success.\"\"\"\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\ttest_rabin_karp()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":681,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport unittest\r\n\r\nfrom transformers import XLMConfig, is_torch_available\r\nfrom transformers.testing_utils import require_torch, slow, torch_device\r\n\r\nfrom ...generation.test_utils import GenerationTesterMixin\r\nfrom ...test_configuration_common import ConfigTester\r\nfrom ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask\r\nfrom ...test_pipeline_mixin import PipelineTesterMixin\r\n\r\n\r\nif is_torch_available():\r\n\t\t\t\timport torch\r\n\r\n\t\t\t\tfrom transformers import (\r\n\t\t\t\t XLMForMultipleChoice,\r\n\t\t\t\t XLMForQuestionAnswering,\r\n\t\t\t\t XLMForQuestionAnsweringSimple,\r\n\t\t\t\t XLMForSequenceClassification,\r\n\t\t\t\t XLMForTokenClassification,\r\n\t\t\t\t XLMModel,\r\n\t\t\t\t XLMWithLMHeadModel,\r\n\t\t\t\t)\r\n\t\t\t\tfrom transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A=13\t, __A=7\t, __A=True\t, __A=True\t, __A=True\t, __A=True\t, __A=True\t, __A=False\t, __A=False\t, __A=False\t, __A=2\t, __A=99\t, __A=0\t, __A=32\t, __A=5\t, __A=4\t, __A=0.1\t, __A=0.1\t, __A=512\t, __A=2\t, __A=0.0_2\t, __A=2\t, __A=4\t, __A=\"last\"\t, __A=True\t, __A=None\t, __A=0\t, ) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= parent\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= batch_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= seq_length\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= is_training\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= use_input_lengths\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= use_token_type_ids\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= use_labels\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= gelu_activation\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= sinusoidal_embeddings\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= causal\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= asm\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= n_langs\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= vocab_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= n_special\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= hidden_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= num_hidden_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= num_attention_heads\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= hidden_dropout_prob\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= attention_probs_dropout_prob\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= max_position_embeddings\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= type_sequence_label_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= initializer_range\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= num_labels\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= num_choices\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= summary_type\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= use_proj\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= scope\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= bos_token_id\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= ids_tensor([self.batch_size, self.seq_length]\t, self.vocab_size\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= random_attention_mask([self.batch_size, self.seq_length]\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= None\r\n\t\t\t\t\t\tif self.use_input_lengths:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= (\r\n\t\t\t\t\t\t\t\t\t ids_tensor([self.batch_size]\t, vocab_size=2\t) + self.seq_length - 2\r\n\t\t\t\t\t\t\t\t\t) # small variation of seq_length\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= None\r\n\t\t\t\t\t\tif self.use_token_type_ids:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= ids_tensor([self.batch_size, self.seq_length]\t, self.n_langs\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= None\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= None\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= None\r\n\t\t\t\t\t\tif self.use_labels:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= ids_tensor([self.batch_size]\t, self.type_sequence_label_size\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= ids_tensor([self.batch_size, self.seq_length]\t, self.num_labels\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= ids_tensor([self.batch_size]\t, 2\t).float()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= ids_tensor([self.batch_size]\t, self.num_choices\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.get_config()\r\n\r\n\t\t\t\t\t\treturn (\r\n\t\t\t\t\t\t config,\r\n\t\t\t\t\t\t input_ids,\r\n\t\t\t\t\t\t token_type_ids,\r\n\t\t\t\t\t\t input_lengths,\r\n\t\t\t\t\t\t sequence_labels,\r\n\t\t\t\t\t\t token_labels,\r\n\t\t\t\t\t\t is_impossible_labels,\r\n\t\t\t\t\t\t choice_labels,\r\n\t\t\t\t\t\t input_mask,\r\n\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\treturn XLMConfig(\r\n\t\t\t\t\t\t vocab_size=self.vocab_size\t, n_special=self.n_special\t, emb_dim=self.hidden_size\t, n_layers=self.num_hidden_layers\t, n_heads=self.num_attention_heads\t, dropout=self.hidden_dropout_prob\t, attention_dropout=self.attention_probs_dropout_prob\t, gelu_activation=self.gelu_activation\t, sinusoidal_embeddings=self.sinusoidal_embeddings\t, asm=self.asm\t, causal=self.causal\t, n_langs=self.n_langs\t, max_position_embeddings=self.max_position_embeddings\t, initializer_range=self.initializer_range\t, summary_type=self.summary_type\t, use_proj=self.use_proj\t, num_labels=self.num_labels\t, bos_token_id=self.bos_token_id\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, ) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= XLMModel(config=_a\t)\r\n\t\t\t\t\t\tmodel.to(_a\t)\r\n\t\t\t\t\t\tmodel.eval()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= model(_a\t, lengths=_a\t, langs=_a\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= model(_a\t, langs=_a\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= model(_a\t)\r\n\t\t\t\t\t\tself.parent.assertEqual(result.last_hidden_state.shape\t, (self.batch_size, self.seq_length, self.hidden_size)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, ) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= XLMWithLMHeadModel(_a\t)\r\n\t\t\t\t\t\tmodel.to(_a\t)\r\n\t\t\t\t\t\tmodel.eval()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= model(_a\t, token_type_ids=_a\t, labels=_a\t)\r\n\t\t\t\t\t\tself.parent.assertEqual(result.loss.shape\t, ()\t)\r\n\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape\t, (self.batch_size, self.seq_length, self.vocab_size)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, ) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= XLMForQuestionAnsweringSimple(_a\t)\r\n\t\t\t\t\t\tmodel.to(_a\t)\r\n\t\t\t\t\t\tmodel.eval()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= model(_a\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= model(_a\t, start_positions=_a\t, end_positions=_a\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= outputs\r\n\t\t\t\t\t\tself.parent.assertEqual(result.start_logits.shape\t, (self.batch_size, self.seq_length)\t)\r\n\t\t\t\t\t\tself.parent.assertEqual(result.end_logits.shape\t, (self.batch_size, self.seq_length)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, ) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= XLMForQuestionAnswering(_a\t)\r\n\t\t\t\t\t\tmodel.to(_a\t)\r\n\t\t\t\t\t\tmodel.eval()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= model(_a\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= model(\r\n\t\t\t\t\t\t _a\t, start_positions=_a\t, end_positions=_a\t, cls_index=_a\t, is_impossible=_a\t, p_mask=_a\t, )\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= model(\r\n\t\t\t\t\t\t _a\t, start_positions=_a\t, end_positions=_a\t, cls_index=_a\t, is_impossible=_a\t, )\r\n\r\n\t\t\t\t\t\t((lowerCAmelCase_ )\t\t\t\t\t,\t ) :Union[str, Any] \t\t\t\t\t= result_with_labels.to_tuple()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= model(_a\t, start_positions=_a\t, end_positions=_a\t)\r\n\r\n\t\t\t\t\t\t((lowerCAmelCase_ )\t\t\t\t\t,\t ) :Tuple \t\t\t\t\t= result_with_labels.to_tuple()\r\n\r\n\t\t\t\t\t\tself.parent.assertEqual(result_with_labels.loss.shape\t, ()\t)\r\n\t\t\t\t\t\tself.parent.assertEqual(result.start_top_log_probs.shape\t, (self.batch_size, model.config.start_n_top)\t)\r\n\t\t\t\t\t\tself.parent.assertEqual(result.start_top_index.shape\t, (self.batch_size, model.config.start_n_top)\t)\r\n\t\t\t\t\t\tself.parent.assertEqual(\r\n\t\t\t\t\t\t result.end_top_log_probs.shape\t, (self.batch_size, model.config.start_n_top * model.config.end_n_top)\t)\r\n\t\t\t\t\t\tself.parent.assertEqual(\r\n\t\t\t\t\t\t result.end_top_index.shape\t, (self.batch_size, model.config.start_n_top * model.config.end_n_top)\t)\r\n\t\t\t\t\t\tself.parent.assertEqual(result.cls_logits.shape\t, (self.batch_size,)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, ) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= XLMForSequenceClassification(_a\t)\r\n\t\t\t\t\t\tmodel.to(_a\t)\r\n\t\t\t\t\t\tmodel.eval()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= model(_a\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= model(_a\t, labels=_a\t)\r\n\t\t\t\t\t\tself.parent.assertEqual(result.loss.shape\t, ()\t)\r\n\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape\t, (self.batch_size, self.type_sequence_label_size)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, ) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.num_labels\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= XLMForTokenClassification(_a\t)\r\n\t\t\t\t\t\tmodel.to(_a\t)\r\n\t\t\t\t\t\tmodel.eval()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= model(_a\t, attention_mask=_a\t, labels=_a\t)\r\n\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape\t, (self.batch_size, self.seq_length, self.num_labels)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, ) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.num_choices\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= XLMForMultipleChoice(config=_a\t)\r\n\t\t\t\t\t\tmodel.to(_a\t)\r\n\t\t\t\t\t\tmodel.eval()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= input_ids.unsqueeze(1\t).expand(-1\t, self.num_choices\t, -1\t).contiguous()\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= token_type_ids.unsqueeze(1\t).expand(-1\t, self.num_choices\t, -1\t).contiguous()\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= input_mask.unsqueeze(1\t).expand(-1\t, self.num_choices\t, -1\t).contiguous()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= model(\r\n\t\t\t\t\t\t _a\t, attention_mask=_a\t, token_type_ids=_a\t, labels=_a\t, )\r\n\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape\t, (self.batch_size, self.num_choices)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.prepare_config_and_inputs()\r\n\t\t\t\t\t\t(\r\n\t\t\t\t\t\t (\r\n\t\t\t\t\t\t lowerCAmelCase_ \r\n\t\t\t\t\t\t)\t\t\t\t\t,\t(\r\n\t\t\t\t\t\t lowerCAmelCase_ \r\n\t\t\t\t\t\t)\t\t\t\t\t,\t(\r\n\t\t\t\t\t\t lowerCAmelCase_ \r\n\t\t\t\t\t\t)\t\t\t\t\t,\t(\r\n\t\t\t\t\t\t lowerCAmelCase_ \r\n\t\t\t\t\t\t)\t\t\t\t\t,\t(\r\n\t\t\t\t\t\t lowerCAmelCase_ \r\n\t\t\t\t\t\t)\t\t\t\t\t,\t(\r\n\t\t\t\t\t\t lowerCAmelCase_ \r\n\t\t\t\t\t\t)\t\t\t\t\t,\t(\r\n\t\t\t\t\t\t lowerCAmelCase_ \r\n\t\t\t\t\t\t)\t\t\t\t\t,\t(\r\n\t\t\t\t\t\t lowerCAmelCase_ \r\n\t\t\t\t\t\t)\t\t\t\t\t,\t(\r\n\t\t\t\t\t\t lowerCAmelCase_ \r\n\t\t\t\t\t\t)\t\t\t\t\t,\t \r\n\t\t\t\t\t\t) :Optional[Any] \t\t\t\t\t= config_and_inputs\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= {\"\"\"input_ids\"\"\": input_ids, \"\"\"token_type_ids\"\"\": token_type_ids, \"\"\"lengths\"\"\": input_lengths}\r\n\t\t\t\t\t\treturn config, inputs_dict\r\n\r\n\r\n\r\n\r\n@require_torch\r\nclass _SCREAMING_SNAKE_CASE ( __lowercase ,\t\t\t__lowercase ,\t\t\t__lowercase ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[Any] \t\t\t=\t\t\t\t\t\t(\r\n\t\t\t (\r\n\t\t\t XLMModel,\r\n\t\t\t XLMWithLMHeadModel,\r\n\t\t\t XLMForQuestionAnswering,\r\n\t\t\t XLMForSequenceClassification,\r\n\t\t\t XLMForQuestionAnsweringSimple,\r\n\t\t\t XLMForTokenClassification,\r\n\t\t\t XLMForMultipleChoice,\r\n\t\t\t )\r\n\t\t\t if is_torch_available()\r\n\t\t\t else ()\r\n\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\t(\r\n\t\t\t (XLMWithLMHeadModel,) if is_torch_available() else ()\r\n\t\t\t) # TODO (PVP): Check other models whether language generation is also applicable\r\n\t\t\tUpperCAmelCase_ :Dict \t\t\t=\t\t\t\t\t\t(\r\n\t\t\t {\r\n\t\t\t \"feature-extraction\": XLMModel,\r\n\t\t\t \"fill-mask\": XLMWithLMHeadModel,\r\n\t\t\t \"question-answering\": XLMForQuestionAnsweringSimple,\r\n\t\t\t \"text-classification\": XLMForSequenceClassification,\r\n\t\t\t \"text-generation\": XLMWithLMHeadModel,\r\n\t\t\t \"token-classification\": XLMForTokenClassification,\r\n\t\t\t \"zero-shot\": XLMForSequenceClassification,\r\n\t\t\t }\r\n\t\t\t if is_torch_available()\r\n\t\t\t else {}\r\n\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t, __A\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tif (\r\n\t\t\t\t\t\t pipeline_test_casse_name == \"QAPipelineTests\"\r\n\t\t\t\t\t\t and tokenizer_name is not None\r\n\t\t\t\t\t\t and not tokenizer_name.endswith(\"\"\"Fast\"\"\"\t)\r\n\t\t\t\t\t\t):\r\n\t\t\t\t\t\t\t\t\t# `QAPipelineTests` fails for a few models when the slower tokenizer are used.\r\n\t\t\t\t\t\t\t\t\t# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)\r\n\t\t\t\t\t\t\t\t\t# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer\r\n\t\t\t\t\t\t\t\t\treturn True\r\n\r\n\t\t\t\t\t\treturn False\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A=False\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= super()._prepare_for_class(_a\t, _a\t, return_labels=_a\t)\r\n\r\n\t\t\t\t\t\tif return_labels:\r\n\t\t\t\t\t\t\t\t\tif model_class.__name__ == \"XLMForQuestionAnswering\":\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.zeros(\r\n\t\t\t\t\t\t\t\t\t\t\t\t self.model_tester.batch_size\t, dtype=torch.long\t, device=_a\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= torch.zeros(\r\n\t\t\t\t\t\t\t\t\t\t\t\t self.model_tester.batch_size\t, dtype=torch.long\t, device=_a\t)\r\n\r\n\t\t\t\t\t\treturn inputs_dict\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= XLMModelTester(self\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= ConfigTester(self\t, config_class=_a\t, emb_dim=37\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tself.config_tester.run_common_tests()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\tself.model_tester.create_and_check_xlm_model(*_a\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\tself.model_tester.create_and_check_xlm_lm_head(*_a\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\tself.model_tester.create_and_check_xlm_simple_qa(*_a\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\tself.model_tester.create_and_check_xlm_qa(*_a\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\tself.model_tester.create_and_check_xlm_sequence_classif(*_a\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\tself.model_tester.create_and_check_xlm_token_classif(*_a\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\tself.model_tester.create_and_check_xlm_for_multiple_choice(*_a\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A=False\t, __A=1\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tself.assertIsInstance(_a\t, _a\t)\r\n\t\t\t\t\t\tself.assertListEqual(\r\n\t\t\t\t\t\t [isinstance(_a\t, _a\t) for iter_attentions in attentions]\t, [True] * len(_a\t)\t)\r\n\t\t\t\t\t\tself.assertEqual(len(_a\t)\t, (max_length - min_length) * num_beam_groups\t)\r\n\r\n\t\t\t\t\t\tfor idx, iter_attentions in enumerate(_a\t):\r\n\t\t\t\t\t\t\t\t\t# adds PAD dummy token\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= min_length + idx + 1\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= min_length + idx + 1\r\n\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= (\r\n\t\t\t\t\t\t\t\t\t batch_size * num_beam_groups,\r\n\t\t\t\t\t\t\t\t\t config.num_attention_heads,\r\n\t\t\t\t\t\t\t\t\t tgt_len,\r\n\t\t\t\t\t\t\t\t\t src_len,\r\n\t\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\t# check attn size\r\n\t\t\t\t\t\t\t\t\tself.assertListEqual(\r\n\t\t\t\t\t\t\t\t\t [layer_attention.shape for layer_attention in iter_attentions]\t, [expected_shape] * len(_a\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A=False\t, __A=1\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tself.assertIsInstance(_a\t, _a\t)\r\n\t\t\t\t\t\tself.assertListEqual(\r\n\t\t\t\t\t\t [isinstance(_a\t, _a\t) for iter_hidden_states in hidden_states]\t, [True] * len(_a\t)\t, )\r\n\t\t\t\t\t\tself.assertEqual(len(_a\t)\t, (max_length - min_length) * num_beam_groups\t)\r\n\r\n\t\t\t\t\t\tfor idx, iter_hidden_states in enumerate(_a\t):\r\n\t\t\t\t\t\t\t\t\t# adds PAD dummy token\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= min_length + idx + 1\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= (batch_size * num_beam_groups, seq_len, config.hidden_size)\r\n\t\t\t\t\t\t\t\t\t# check hidden size\r\n\t\t\t\t\t\t\t\t\tself.assertListEqual(\r\n\t\t\t\t\t\t\t\t\t [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states]\t, [expected_shape] * len(_a\t)\t, )\r\n\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tfor model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= XLMModel.from_pretrained(_a\t)\r\n\t\t\t\t\t\t\t\t\tself.assertIsNotNone(_a\t)\r\n\r\n\r\n\r\n\r\n@require_torch\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= XLMWithLMHeadModel.from_pretrained(\"\"\"xlm-mlm-en-2048\"\"\"\t)\r\n\t\t\t\t\t\tmodel.to(_a\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= torch.tensor([[14, 447]]\t, dtype=torch.long\t, device=_a\t) # the president\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= [\r\n\t\t\t\t\t\t 14,\r\n\t\t\t\t\t\t 447,\r\n\t\t\t\t\t\t 14,\r\n\t\t\t\t\t\t 447,\r\n\t\t\t\t\t\t 14,\r\n\t\t\t\t\t\t 447,\r\n\t\t\t\t\t\t 14,\r\n\t\t\t\t\t\t 447,\r\n\t\t\t\t\t\t 14,\r\n\t\t\t\t\t\t 447,\r\n\t\t\t\t\t\t 14,\r\n\t\t\t\t\t\t 447,\r\n\t\t\t\t\t\t 14,\r\n\t\t\t\t\t\t 447,\r\n\t\t\t\t\t\t 14,\r\n\t\t\t\t\t\t 447,\r\n\t\t\t\t\t\t 14,\r\n\t\t\t\t\t\t 447,\r\n\t\t\t\t\t\t 14,\r\n\t\t\t\t\t\t 447,\r\n\t\t\t\t\t\t] # the president the president the president the president the president the president the president the president the president the president\r\n\t\t\t\t\t\t# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= model.generate(_a\t, do_sample=_a\t)\r\n\t\t\t\t\t\tself.assertListEqual(output_ids[0].cpu().numpy().tolist()\t, _a\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":371,"string":"371"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport argparse\r\nimport os\r\n\r\nimport evaluate\r\nimport torch\r\nfrom datasets import load_dataset\r\nfrom torch.optim import AdamW\r\nfrom torch.utils.data import DataLoader\r\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\r\n\r\nfrom accelerate import Accelerator, DistributedType\r\nfrom accelerate.local_sgd import LocalSGD\r\n\r\n\r\n########################################################################\r\n# This is a fully working simple example to use Accelerate\r\n# with LocalSGD, which is a method to synchronize model\r\n# parameters every K batches. It is different, but complementary\r\n# to gradient accumulation.\r\n#\r\n# This example trains a Bert base model on GLUE MRPC\r\n# in any of the following settings (with the same script):\r\n# - single CPU or single GPU\r\n# - multi GPUS (using PyTorch distributed mode)\r\n# - (multi) TPUs\r\n# - fp16 (mixed-precision) or fp32 (normal precision)\r\n#\r\n# To run it in each of these various modes, follow the instructions\r\n# in the readme for examples:\r\n# https://github.com/huggingface/accelerate/tree/main/examples\r\n#\r\n########################################################################\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 16\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 32\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Accelerator\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int = 1_6\t\t) -> str:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoTokenizer.from_pretrained(\"\"\"bert-base-cased\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= load_dataset(\"\"\"glue\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"mrpc\"\"\"\t\t)\r\n\r\n\t\t\tdef tokenize_function(lowercase__\t\t: int\t\t):\r\n\t\t\t\t\t\t# max_length=None => use the model max length (it's actually the default)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= tokenizer(examples[\"\"\"sentence1\"\"\"]\t\t\t\t,\t\t\t\t\texamples[\"\"\"sentence2\"\"\"]\t\t\t\t,\t\t\t\t\ttruncation=lowercase__\t\t\t\t,\t\t\t\t\tmax_length=lowercase__\t\t)\r\n\t\t\t\t\t\treturn outputs\r\n\r\n\t\t\t# Apply the method we just defined to all the examples in all the splits of the dataset\r\n\t\t\t# starting with the main process first:\r\n\t\t\twith accelerator.main_process_first():\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= datasets.map(\r\n\t\t\t\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tbatched=lowercase__\t\t\t\t,\t\t\t\t\tremove_columns=[\"\"\"idx\"\"\", \"\"\"sentence1\"\"\", \"\"\"sentence2\"\"\"]\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\r\n\t\t\t# transformers library\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokenized_datasets.rename_column(\"\"\"label\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"labels\"\"\"\t\t)\r\n\r\n\t\t\tdef collate_fn(lowercase__\t\t: Dict\t\t):\r\n\t\t\t\t\t\t# On TPU it's best to pad everything to the same length or training will be very slow.\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None\r\n\t\t\t\t\t\t# When using mixed precision we want round multiples of 8/16\r\n\t\t\t\t\t\tif accelerator.mixed_precision == \"fp8\":\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 1_6\r\n\t\t\t\t\t\telif accelerator.mixed_precision != \"no\":\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 8\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= None\r\n\r\n\t\t\t\t\t\treturn tokenizer.pad(\r\n\t\t\t\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tpadding=\"\"\"longest\"\"\"\t\t\t\t,\t\t\t\t\tmax_length=lowercase__\t\t\t\t,\t\t\t\t\tpad_to_multiple_of=lowercase__\t\t\t\t,\t\t\t\t\treturn_tensors=\"\"\"pt\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t# Instantiate dataloaders.\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= DataLoader(\r\n\t\t\t tokenized_datasets[\"\"\"train\"\"\"]\t\t\t\t,\t\t\t\t\tshuffle=lowercase__\t\t\t\t,\t\t\t\t\tcollate_fn=lowercase__\t\t\t\t,\t\t\t\t\tbatch_size=lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= DataLoader(\r\n\t\t\t tokenized_datasets[\"\"\"validation\"\"\"]\t\t\t\t,\t\t\t\t\tshuffle=lowercase__\t\t\t\t,\t\t\t\t\tcollate_fn=lowercase__\t\t\t\t,\t\t\t\t\tbatch_size=lowercase__\t\t)\r\n\r\n\t\t\treturn train_dataloader, eval_dataloader\r\n\r\n\r\n# For testing only\r\nif os.environ.get('TESTING_MOCKED_DATALOADERS', None) == \"1\":\r\n\t\t\t\tfrom accelerate.test_utils.training import mocked_dataloaders\r\n\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= mocked_dataloaders # noqa: F811\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: List[Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Optional[int]\t\t) -> Optional[Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif os.environ.get(\"\"\"TESTING_MOCKED_DATALOADERS\"\"\"\t\t\t\t,\t\t\t\t\tlowercase__\t\t) == \"1\":\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= 2\r\n\t\t\t# New Code #\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= int(args.gradient_accumulation_steps\t\t)\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= int(args.local_sgd_steps\t\t)\r\n\t\t\t# Initialize accelerator\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= Accelerator(\r\n\t\t\t cpu=args.cpu\t\t\t\t,\t\t\t\t\tmixed_precision=args.mixed_precision\t\t\t\t,\t\t\t\t\tgradient_accumulation_steps=lowercase__\t\t)\r\n\t\t\tif accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:\r\n\t\t\t\t\t\traise NotImplementedError(\"\"\"LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)\"\"\"\t\t)\r\n\t\t\t# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= config[\"\"\"lr\"\"\"]\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= int(config[\"\"\"num_epochs\"\"\"]\t\t)\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= int(config[\"\"\"seed\"\"\"]\t\t)\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= int(config[\"\"\"batch_size\"\"\"]\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= evaluate.load(\"\"\"glue\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"mrpc\"\"\"\t\t)\r\n\r\n\t\t\tset_seed(lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= get_dataloaders(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t# Instantiate the model (we build the model here so that the seed also control new weights initialization)\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= AutoModelForSequenceClassification.from_pretrained(\"\"\"bert-base-cased\"\"\"\t\t\t\t,\t\t\t\t\treturn_dict=lowercase__\t\t)\r\n\r\n\t\t\t# We could avoid this line since the accelerator is set with `device_placement=True` (default value).\r\n\t\t\t# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\r\n\t\t\t# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= model.to(accelerator.device\t\t)\r\n\r\n\t\t\t# Instantiate optimizer\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= AdamW(params=model.parameters()\t\t\t\t,\t\t\t\t\tlr=lowercase__\t\t)\r\n\r\n\t\t\t# Instantiate scheduler\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= get_linear_schedule_with_warmup(\r\n\t\t\t optimizer=lowercase__\t\t\t\t,\t\t\t\t\tnum_warmup_steps=1_0_0\t\t\t\t,\t\t\t\t\tnum_training_steps=(len(lowercase__\t\t) * num_epochs)\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t# Prepare everything\r\n\t\t\t# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\r\n\t\t\t# prepare method.\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= accelerator.prepare(\r\n\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Now we train the model\r\n\t\t\tfor epoch in range(lowercase__\t\t):\r\n\t\t\t\t\t\tmodel.train()\r\n\t\t\t\t\t\twith LocalSGD(\r\n\t\t\t\t\t\t accelerator=lowercase__\t\t\t\t,\t\t\t\t\tmodel=lowercase__\t\t\t\t,\t\t\t\t\tlocal_sgd_steps=lowercase__\t\t\t\t,\t\t\t\t\tenabled=local_sgd_steps is not None\t\t) as local_sgd:\r\n\t\t\t\t\t\t\t\t\tfor step, batch in enumerate(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t# We could avoid this line since we set the accelerator with `device_placement=True`.\r\n\t\t\t\t\t\t\t\t\t\t\t\tbatch.to(accelerator.device\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t# New code #\r\n\t\t\t\t\t\t\t\t\t\t\t\t# We use the new `accumulate` context manager to perform gradient accumulation\r\n\t\t\t\t\t\t\t\t\t\t\t\t# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.\r\n\t\t\t\t\t\t\t\t\t\t\t\twith accelerator.accumulate(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= model(**lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= output.loss\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\taccelerator.backward(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toptimizer.step()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlr_scheduler.step()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toptimizer.zero_grad()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# LocalSGD-specific line\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlocal_sgd.step()\r\n\r\n\t\t\t\t\t\tmodel.eval()\r\n\t\t\t\t\t\tfor step, batch in enumerate(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\t# We could avoid this line since we set the accelerator with `device_placement=True`.\r\n\t\t\t\t\t\t\t\t\tbatch.to(accelerator.device\t\t)\r\n\t\t\t\t\t\t\t\t\twith torch.no_grad():\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= model(**lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= outputs.logits.argmax(dim=-1\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= accelerator.gather_for_metrics((predictions, batch[\"\"\"labels\"\"\"])\t\t)\r\n\t\t\t\t\t\t\t\t\tmetric.add_batch(\r\n\t\t\t\t\t\t\t\t\t predictions=lowercase__\t\t\t\t,\t\t\t\t\treferences=lowercase__\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= metric.compute()\r\n\t\t\t\t\t\t# Use accelerator.print to print only on the main process.\r\n\t\t\t\t\t\taccelerator.print(f\"\"\"epoch {epoch}:\"\"\"\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> Tuple:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= argparse.ArgumentParser(description=\"\"\"Simple example of training script.\"\"\"\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--mixed_precision\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=lowercase__\t\t\t\t,\t\t\t\t\tchoices=[\"\"\"no\"\"\", \"\"\"fp16\"\"\", \"\"\"bf16\"\"\", \"\"\"fp8\"\"\"]\t\t\t\t,\t\t\t\t\thelp=\"\"\"Whether to use mixed precision. Choose\"\"\"\r\n\t\t\t \"\"\"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\"\"\r\n\t\t\t \"\"\"and an Nvidia Ampere GPU.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\t# New Code #\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--gradient_accumulation_steps\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=1\t\t\t\t,\t\t\t\t\thelp=\"\"\"The number of minibatches to be ran before gradients are accumulated.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--local_sgd_steps\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=8\t\t\t\t,\t\t\t\t\thelp=\"\"\"Number of local SGD steps or None to disable local SGD\"\"\"\t\t)\r\n\t\t\tparser.add_argument(\"\"\"--cpu\"\"\"\t\t\t\t,\t\t\t\t\taction=\"\"\"store_true\"\"\"\t\t\t\t,\t\t\t\t\thelp=\"\"\"If passed, will train on the CPU.\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= parser.parse_args()\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= {\"\"\"lr\"\"\": 2E-5, \"\"\"num_epochs\"\"\": 3, \"\"\"seed\"\"\": 4_2, \"\"\"batch_size\"\"\": 1_6}\r\n\t\t\ttraining_function(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tmain()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":682,"cells":{"code":{"kind":"string","value":"\n\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\nimport timeit\n\nimport numpy as np\n\nimport datasets\nfrom datasets.arrow_writer import ArrowWriter\nfrom datasets.features.features import _ArrayXD\n\n\n\n\n\n\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str\t\t) -> str:\n\n\n\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\tdef wrapper(*lowercase__\t\t: Optional[int]\t\t\t\t,\t\t\t\t\t**lowercase__\t\t: List[str]\t\t):\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= timeit.default_timer()\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= func(*__lowerCamelCase\t\t\t\t,\t\t\t\t\t**__lowerCamelCase\t\t)\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= timeit.default_timer() - starttime\n\t\t\t\t\t\treturn delta\n\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= func.__name__\n\n\t\t\treturn wrapper\n\n\n\n\n\n\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Optional[int]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[str]=1_0_0\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Dict=None\t\t) -> List[str]:\n\n\n\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= []\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= seq_shapes or {}\n\t\t\tfor i in range(__lowerCamelCase\t\t):\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {}\n\t\t\t\t\t\tfor col_id, (k, v) in enumerate(features.items()\t\t):\n\t\t\t\t\t\t\t\t\tif isinstance(__lowerCamelCase\t\t\t\t,\t\t\t\t\t_ArrayXD\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= np.random.rand(*v.shape\t\t).astype(v.dtype\t\t)\n\t\t\t\t\t\t\t\t\telif isinstance(__lowerCamelCase\t\t\t\t,\t\t\t\t\tdatasets.Value\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\tif v.dtype == \"string\":\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= \"The small grey turtle was surprisingly fast when challenged.\"\n\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= np.random.randint(1_0\t\t\t\t,\t\t\t\t\tsize=1\t\t).astype(v.dtype\t\t).item()\n\t\t\t\t\t\t\t\t\telif isinstance(__lowerCamelCase\t\t\t\t,\t\t\t\t\tdatasets.Sequence\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\twhile isinstance(__lowerCamelCase\t\t\t\t,\t\t\t\t\tdatasets.Sequence\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= v.feature\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= seq_shapes[k]\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= np.random.rand(*__lowerCamelCase\t\t).astype(v.dtype\t\t)\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= data\n\n\t\t\t\t\t\tdummy_data.append((i, example)\t\t)\n\n\t\t\treturn dummy_data\n\n\n\n\n\n\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: List[str]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Tuple\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int=1_0_0\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Any=None\t\t) -> Tuple:\n\n\n\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= generate_examples(__lowerCamelCase\t\t\t\t,\t\t\t\t\tnum_examples=__lowerCamelCase\t\t\t\t,\t\t\t\t\tseq_shapes=__lowerCamelCase\t\t)\n\n\t\t\twith ArrowWriter(features=__lowerCamelCase\t\t\t\t,\t\t\t\t\tpath=__lowerCamelCase\t\t) as writer:\n\t\t\t\t\t\tfor key, record in dummy_data:\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= features.encode_example(__lowerCamelCase\t\t)\n\t\t\t\t\t\t\t\t\twriter.write(__lowerCamelCase\t\t)\n\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= writer.finalize()\n\n\t\t\tif not num_final_examples == num_examples:\n\t\t\t\t\t\traise ValueError(\n\t\t\t\t\t\t f\"\"\"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.\"\"\"\t\t)\n\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= datasets.Dataset.from_file(filename=__lowerCamelCase\t\t\t\t,\t\t\t\t\tinfo=datasets.DatasetInfo(features=__lowerCamelCase\t\t)\t\t)\n\n\t\t\treturn dataset\n\n\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":350,"string":"350"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport argparse\r\nimport json\r\nimport os\r\n\r\nimport evaluate\r\nimport torch\r\nfrom datasets import load_dataset\r\nfrom torch.optim import AdamW\r\nfrom torch.utils.data import DataLoader\r\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\r\n\r\nfrom accelerate import Accelerator, DistributedType\r\nfrom accelerate.utils.deepspeed import DummyOptim, DummyScheduler\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 16\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 32\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Accelerator\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int = 1_6\t\t\t\t,\t\t\t\t\tlowercase__\t\t: str = \"bert-base-cased\"\t\t) -> Union[str, Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoTokenizer.from_pretrained(lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= load_dataset(\"\"\"glue\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"mrpc\"\"\"\t\t)\r\n\r\n\t\t\tdef tokenize_function(lowercase__\t\t: List[str]\t\t):\r\n\t\t\t\t\t\t# max_length=None => use the model max length (it's actually the default)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokenizer(examples[\"\"\"sentence1\"\"\"]\t\t\t\t,\t\t\t\t\texamples[\"\"\"sentence2\"\"\"]\t\t\t\t,\t\t\t\t\ttruncation=lowercase__\t\t\t\t,\t\t\t\t\tmax_length=lowercase__\t\t)\r\n\t\t\t\t\t\treturn outputs\r\n\r\n\t\t\t# Apply the method we just defined to all the examples in all the splits of the dataset\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= datasets.map(\r\n\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tbatched=lowercase__\t\t\t\t,\t\t\t\t\tremove_columns=[\"\"\"idx\"\"\", \"\"\"sentence1\"\"\", \"\"\"sentence2\"\"\"]\t\t\t\t,\t\t\t\t\tload_from_cache_file=lowercase__\t\t)\r\n\r\n\t\t\t# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\r\n\t\t\t# transformers library\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenized_datasets.rename_column(\"\"\"label\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"labels\"\"\"\t\t)\r\n\r\n\t\t\tdef collate_fn(lowercase__\t\t: Union[str, Any]\t\t):\r\n\t\t\t\t\t\t# On TPU it's best to pad everything to the same length or training will be very slow.\r\n\t\t\t\t\t\tif accelerator.distributed_type == DistributedType.TPU:\r\n\t\t\t\t\t\t\t\t\treturn tokenizer.pad(lowercase__\t\t\t\t,\t\t\t\t\tpadding=\"\"\"max_length\"\"\"\t\t\t\t,\t\t\t\t\tmax_length=1_2_8\t\t\t\t,\t\t\t\t\treturn_tensors=\"\"\"pt\"\"\"\t\t)\r\n\t\t\t\t\t\treturn tokenizer.pad(lowercase__\t\t\t\t,\t\t\t\t\tpadding=\"\"\"longest\"\"\"\t\t\t\t,\t\t\t\t\treturn_tensors=\"\"\"pt\"\"\"\t\t)\r\n\r\n\t\t\t# Instantiate dataloaders.\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= DataLoader(\r\n\t\t\t tokenized_datasets[\"\"\"train\"\"\"]\t\t\t\t,\t\t\t\t\tshuffle=lowercase__\t\t\t\t,\t\t\t\t\tcollate_fn=lowercase__\t\t\t\t,\t\t\t\t\tbatch_size=lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= DataLoader(\r\n\t\t\t tokenized_datasets[\"\"\"validation\"\"\"]\t\t\t\t,\t\t\t\t\tshuffle=lowercase__\t\t\t\t,\t\t\t\t\tcollate_fn=lowercase__\t\t\t\t,\t\t\t\t\tbatch_size=lowercase__\t\t)\r\n\r\n\t\t\treturn train_dataloader, eval_dataloader\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Optional[Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Union[str, Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Tuple\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t) -> List[str]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tmodel.eval()\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= 0\r\n\t\t\tfor step, batch in enumerate(lowercase__\t\t):\r\n\t\t\t\t\t\t# We could avoid this line since we set the accelerator with `device_placement=True`.\r\n\t\t\t\t\t\tbatch.to(accelerator.device\t\t)\r\n\t\t\t\t\t\twith torch.no_grad():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= model(**lowercase__\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= outputs.logits.argmax(dim=-1\t\t)\r\n\t\t\t\t\t\t# It is slightly faster to call this once, than multiple times\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= accelerator.gather(\r\n\t\t\t\t\t\t (predictions, batch[\"\"\"labels\"\"\"])\t\t) # If we are in a multiprocess environment, the last batch has duplicates\r\n\t\t\t\t\t\tif accelerator.use_distributed:\r\n\t\t\t\t\t\t\t\t\tif step == len(lowercase__\t\t) - 1:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= predictions[: len(eval_dataloader.dataset\t\t) - samples_seen]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= references[: len(eval_dataloader.dataset\t\t) - samples_seen]\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tsamples_seen += references.shape[0]\r\n\t\t\t\t\t\tmetric.add_batch(\r\n\t\t\t\t\t\t predictions=lowercase__\t\t\t\t,\t\t\t\t\treferences=lowercase__\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= metric.compute()\r\n\t\t\treturn eval_metric[\"accuracy\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[str]\t\t) -> Any:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= Accelerator()\r\n\r\n\t\t\t# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= config[\"\"\"lr\"\"\"]\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= int(config[\"\"\"num_epochs\"\"\"]\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= int(config[\"\"\"seed\"\"\"]\t\t)\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= int(config[\"\"\"batch_size\"\"\"]\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= args.model_name_or_path\r\n\r\n\t\t\tset_seed(lowercase__\t\t)\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Dict \t\t\t\t\t= get_dataloaders(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# Instantiate the model (we build the model here so that the seed also control new weights initialization)\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= AutoModelForSequenceClassification.from_pretrained(lowercase__\t\t\t\t,\t\t\t\t\treturn_dict=lowercase__\t\t)\r\n\r\n\t\t\t# Instantiate optimizer\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= (\r\n\t\t\t AdamW\r\n\t\t\t if accelerator.state.deepspeed_plugin is None\r\n\t\t\t or \"\"\"optimizer\"\"\" not in accelerator.state.deepspeed_plugin.deepspeed_config\r\n\t\t\t else DummyOptim\r\n\t\t\t)\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= optimizer_cls(params=model.parameters()\t\t\t\t,\t\t\t\t\tlr=lowercase__\t\t)\r\n\r\n\t\t\tif accelerator.state.deepspeed_plugin is not None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= accelerator.state.deepspeed_plugin.deepspeed_config[\r\n\t\t\t\t\t\t \"\"\"gradient_accumulation_steps\"\"\"\r\n\t\t\t\t\t\t]\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= 1\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= (len(lowercase__\t\t) * num_epochs) // gradient_accumulation_steps\r\n\r\n\t\t\t# Instantiate scheduler\r\n\t\t\tif (\r\n\t\t\t accelerator.state.deepspeed_plugin is None\r\n\t\t\t or \"scheduler\" not in accelerator.state.deepspeed_plugin.deepspeed_config\r\n\t\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= get_linear_schedule_with_warmup(\r\n\t\t\t\t\t\t optimizer=lowercase__\t\t\t\t,\t\t\t\t\tnum_warmup_steps=0\t\t\t\t,\t\t\t\t\tnum_training_steps=lowercase__\t\t\t\t,\t\t\t\t\t)\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= DummyScheduler(lowercase__\t\t\t\t,\t\t\t\t\ttotal_num_steps=lowercase__\t\t\t\t,\t\t\t\t\twarmup_num_steps=0\t\t)\r\n\r\n\t\t\t# Prepare everything\r\n\t\t\t# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\r\n\t\t\t# prepare method.\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= accelerator.prepare(\r\n\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t# We need to keep track of how many total steps we have iterated over\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 0\r\n\t\t\t# We also need to keep track of the stating epoch so files are named properly\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= evaluate.load(\"\"\"glue\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"mrpc\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= num_epochs\r\n\r\n\t\t\tif args.partial_train_epoch is not None:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= args.partial_train_epoch\r\n\r\n\t\t\tif args.resume_from_checkpoint:\r\n\t\t\t\t\t\taccelerator.load_state(args.resume_from_checkpoint\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= args.resume_from_checkpoint.split(\"\"\"epoch_\"\"\"\t\t)[1]\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"\"\"\"\r\n\t\t\t\t\t\tfor char in epoch_string:\r\n\t\t\t\t\t\t\t\t\tif char.isdigit():\r\n\t\t\t\t\t\t\t\t\t\t\t\tstate_epoch_num += char\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= int(lowercase__\t\t) + 1\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= evaluation_loop(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t\t\t\taccelerator.print(\"\"\"resumed checkpoint performance:\"\"\"\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t\t\t\taccelerator.print(\"\"\"resumed checkpoint's scheduler's lr:\"\"\"\t\t\t\t,\t\t\t\t\tlr_scheduler.get_lr()[0]\t\t)\r\n\t\t\t\t\t\taccelerator.print(\"\"\"resumed optimizers's lr:\"\"\"\t\t\t\t,\t\t\t\t\toptimizer.param_groups[0][\"\"\"lr\"\"\"]\t\t)\r\n\t\t\t\t\t\twith open(os.path.join(args.output_dir\t\t\t\t,\t\t\t\t\tf\"\"\"state_{starting_epoch-1}.json\"\"\"\t\t)\t\t\t\t,\t\t\t\t\t\"\"\"r\"\"\"\t\t) as f:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= json.load(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tassert resumed_state[\"accuracy\"] == accuracy, \"Accuracy mismatch, loading from checkpoint failed\"\r\n\t\t\t\t\t\t\t\t\tassert (\r\n\t\t\t\t\t\t\t\t\t resumed_state[\"lr\"] == lr_scheduler.get_lr()[0]\r\n\t\t\t\t\t\t\t\t\t), \"Scheduler learning rate mismatch, loading from checkpoint failed\"\r\n\t\t\t\t\t\t\t\t\tassert (\r\n\t\t\t\t\t\t\t\t\t resumed_state[\"optimizer_lr\"] == optimizer.param_groups[0][\"lr\"]\r\n\t\t\t\t\t\t\t\t\t), \"Optimizer learning rate mismatch, loading from checkpoint failed\"\r\n\t\t\t\t\t\t\t\t\tassert resumed_state[\"epoch\"] == starting_epoch - 1, \"Epoch mismatch, loading from checkpoint failed\"\r\n\t\t\t\t\t\t\t\t\treturn\r\n\r\n # Now we train the model\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= {}\r\n\t\t\tfor epoch in range(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t):\r\n\t\t\t\t\t\tmodel.train()\r\n\t\t\t\t\t\tfor step, batch in enumerate(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= model(**lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= outputs.loss\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= loss / gradient_accumulation_steps\r\n\t\t\t\t\t\t\t\t\taccelerator.backward(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tif step % gradient_accumulation_steps == 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\toptimizer.step()\r\n\t\t\t\t\t\t\t\t\t\t\t\tlr_scheduler.step()\r\n\t\t\t\t\t\t\t\t\t\t\t\toptimizer.zero_grad()\r\n\r\n\t\t\t\t\t\t\t\t\toverall_step += 1\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= f\"\"\"epoch_{epoch}\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= os.path.join(args.output_dir\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t\t\t\taccelerator.save_state(lowercase__\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= evaluation_loop(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= accuracy\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= lr_scheduler.get_lr()[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= optimizer.param_groups[0][\"\"\"lr\"\"\"]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= epoch\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= overall_step\r\n\t\t\t\t\t\taccelerator.print(f\"\"\"epoch {epoch}:\"\"\"\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\t\t\t\t\t\taccelerator.wait_for_everyone()\r\n\t\t\t\t\t\tif accelerator.is_main_process:\r\n\t\t\t\t\t\t\t\t\twith open(os.path.join(args.output_dir\t\t\t\t,\t\t\t\t\tf\"\"\"state_{epoch}.json\"\"\"\t\t)\t\t\t\t,\t\t\t\t\t\"\"\"w\"\"\"\t\t) as f:\r\n\t\t\t\t\t\t\t\t\t\t\t\tjson.dump(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= argparse.ArgumentParser(description=\"\"\"Simple example of training script tracking peak GPU memory usage.\"\"\"\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--model_name_or_path\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=\"\"\"bert-base-cased\"\"\"\t\t\t\t,\t\t\t\t\thelp=\"\"\"Path to pretrained model or model identifier from huggingface.co/models.\"\"\"\t\t\t\t,\t\t\t\t\trequired=lowercase__\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--output_dir\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=\"\"\".\"\"\"\t\t\t\t,\t\t\t\t\thelp=\"\"\"Optional save directory where all checkpoint folders will be stored. Default is the current working directory.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--resume_from_checkpoint\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=lowercase__\t\t\t\t,\t\t\t\t\thelp=\"\"\"If the training should continue from a checkpoint folder.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--partial_train_epoch\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=lowercase__\t\t\t\t,\t\t\t\t\thelp=\"\"\"If passed, the training will stop after this number of epochs.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--num_epochs\"\"\"\t\t\t\t,\t\t\t\t\ttype=lowercase__\t\t\t\t,\t\t\t\t\tdefault=2\t\t\t\t,\t\t\t\t\thelp=\"\"\"Number of train epochs.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= parser.parse_args()\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= {\"\"\"lr\"\"\": 2E-5, \"\"\"num_epochs\"\"\": args.num_epochs, \"\"\"seed\"\"\": 4_2, \"\"\"batch_size\"\"\": 1_6}\r\n\r\n\t\t\ttraining_function(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tmain()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":683,"cells":{"code":{"kind":"string","value":"\r\nimport json\r\nimport os\r\nimport tempfile\r\nfrom unittest.mock import patch\r\n\r\nimport torch\r\nfrom torch.utils.data import DataLoader, TensorDataset\r\n\r\nfrom accelerate import DistributedType, infer_auto_device_map, init_empty_weights\r\nfrom accelerate.accelerator import Accelerator\r\nfrom accelerate.state import GradientState, PartialState\r\nfrom accelerate.test_utils import require_bnb, require_multi_gpu, slow\r\nfrom accelerate.test_utils.testing import AccelerateTestCase, require_cuda\r\nfrom accelerate.utils import patch_environment\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> Optional[int]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= torch.nn.Linear(2\t\t\t\t,\t\t\t\t\t4\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= torch.optim.AdamW(model.parameters()\t\t\t\t,\t\t\t\t\tlr=1.0\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= torch.optim.lr_scheduler.OneCycleLR(_snake_case\t\t\t\t,\t\t\t\t\tmax_lr=0.01\t\t\t\t,\t\t\t\t\tsteps_per_epoch=2\t\t\t\t,\t\t\t\t\tepochs=1\t\t)\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= DataLoader(TensorDataset(torch.tensor([1, 2, 3]\t\t)\t\t)\t\t)\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= DataLoader(TensorDataset(torch.tensor([4, 5, 6]\t\t)\t\t)\t\t)\r\n\r\n\t\t\treturn model, optimizer, scheduler, train_dl, valid_dl\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str\t\t) -> Optional[Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\treturn (model.weight.abs().sum() + model.bias.abs().sum()).item()\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Union[str, Any]\t\t) -> Tuple:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= torch.nn.Linear(*tuple(model.weight.T.shape\t\t)\t\t).state_dict()\r\n\t\t\tmodel.load_state_dict(_snake_case\t\t)\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( __snake_case\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@require_cuda\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= Accelerator()\r\n\t\t\t\t\t\tassert PartialState._shared_state[\"_cpu\"] is False\r\n\t\t\t\t\t\tassert PartialState._shared_state[\"device\"].type == \"cuda\"\r\n\t\t\t\t\t\twith self.assertRaises(a_\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= Accelerator(cpu=a_\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= Accelerator()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= GradientState()\r\n\t\t\t\t\t\tassert state.num_steps == 1\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= 4\r\n\t\t\t\t\t\tassert state.num_steps == 4\r\n\r\n\t\t\t\t\t\tassert state.sync_gradients is True\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= False\r\n\t\t\t\t\t\tassert state.sync_gradients is False\r\n\t\t\t\t\t\tGradientState._reset_state()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= Accelerator()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= create_components()\r\n\r\n\t\t\t\t\t\t(\r\n\t\t\t\t\t\t lowerCAmelCase_\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t) :Union[str, Any] \t\t\t\t\t= accelerator.prepare(a_\t, a_\t, a_\t, a_\t, a_\t)\r\n\r\n\t\t\t\t\t\tself.assertTrue(prepared_model in accelerator._models\t)\r\n\t\t\t\t\t\tself.assertTrue(prepared_optimizer in accelerator._optimizers\t)\r\n\t\t\t\t\t\tself.assertTrue(prepared_scheduler in accelerator._schedulers\t)\r\n\t\t\t\t\t\tself.assertTrue(prepared_train_dl in accelerator._dataloaders\t)\r\n\t\t\t\t\t\tself.assertTrue(prepared_valid_dl in accelerator._dataloaders\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= Accelerator()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= create_components()\r\n\t\t\t\t\t\taccelerator.prepare(a_\t, a_\t, a_\t, a_\t, a_\t)\r\n\t\t\t\t\t\taccelerator.free_memory()\r\n\r\n\t\t\t\t\t\tself.assertTrue(len(accelerator._models\t) == 0\t)\r\n\t\t\t\t\t\tself.assertTrue(len(accelerator._optimizers\t) == 0\t)\r\n\t\t\t\t\t\tself.assertTrue(len(accelerator._schedulers\t) == 0\t)\r\n\t\t\t\t\t\tself.assertTrue(len(accelerator._dataloaders\t) == 0\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tPartialState._reset_state()\r\n\r\n\t\t\t\t\t\t# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist\r\n\t\t\t\t\t\tdef noop(*__A\t, **__A\t):\r\n\t\t\t\t\t\t\t\t\tpass\r\n\r\n\t\t\t\t\t\twith patch(\"\"\"torch.cuda.set_device\"\"\"\t, a_\t), patch_environment(ACCELERATE_TORCH_DEVICE=\"\"\"cuda:64\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= Accelerator()\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(str(accelerator.state.device\t)\t, \"\"\"cuda:64\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= Accelerator()\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= create_components()\r\n\t\t\t\t\t\taccelerator.prepare(a_\t, a_\t, a_\t, a_\t, a_\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= get_signature(a_\t)\r\n\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdirname:\r\n\t\t\t\t\t\t\t\t\taccelerator.save_state(a_\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# make sure random weights don't match\r\n\t\t\t\t\t\t\t\t\tload_random_weights(a_\t)\r\n\t\t\t\t\t\t\t\t\tself.assertTrue(abs(model_signature - get_signature(a_\t)\t) > 1E-3\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# make sure loaded weights match\r\n\t\t\t\t\t\t\t\t\taccelerator.load_state(a_\t)\r\n\t\t\t\t\t\t\t\t\tself.assertTrue(abs(model_signature - get_signature(a_\t)\t) < 1E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= Accelerator()\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= create_components()\r\n\t\t\t\t\t\taccelerator.prepare(a_\t, a_\t, a_\t, a_\t, a_\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= get_signature(a_\t)\r\n\r\n\t\t\t\t\t\t# saving hook\r\n\t\t\t\t\t\tdef save_config(__A\t, __A\t, __A\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= {'''class_name''': models[0].__class__.__name__}\r\n\r\n\t\t\t\t\t\t\t\t\twith open(os.path.join(a_\t, \"\"\"data.json\"\"\"\t)\t, \"\"\"w\"\"\"\t) as f:\r\n\t\t\t\t\t\t\t\t\t\t\t\tjson.dump(a_\t, a_\t)\r\n\r\n # loading hook\r\n\t\t\t\t\t\tdef load_config(__A\t, __A\t):\r\n\t\t\t\t\t\t\t\t\twith open(os.path.join(a_\t, \"\"\"data.json\"\"\"\t)\t, \"\"\"r\"\"\"\t) as f:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= json.load(a_\t)\r\n\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= config['''class_name''']\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= accelerator.register_save_state_pre_hook(a_\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= accelerator.register_load_state_pre_hook(a_\t)\r\n\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdirname:\r\n\t\t\t\t\t\t\t\t\taccelerator.save_state(a_\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# make sure random weights don't match with hooks\r\n\t\t\t\t\t\t\t\t\tload_random_weights(a_\t)\r\n\t\t\t\t\t\t\t\t\tself.assertTrue(abs(model_signature - get_signature(a_\t)\t) > 1E-3\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# random class name to verify correct one is loaded\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= '''random'''\r\n\r\n\t\t\t\t\t\t\t\t\t# make sure loaded weights match with hooks\r\n\t\t\t\t\t\t\t\t\taccelerator.load_state(a_\t)\r\n\t\t\t\t\t\t\t\t\tself.assertTrue(abs(model_signature - get_signature(a_\t)\t) < 1E-3\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# mode.class_name is loaded from config\r\n\t\t\t\t\t\t\t\t\tself.assertTrue(model.class_name == model.__class__.__name__\t)\r\n\r\n\t\t\t\t\t\t# remove hooks\r\n\t\t\t\t\t\tsave_hook.remove()\r\n\t\t\t\t\t\tload_hook.remove()\r\n\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdirname:\r\n\t\t\t\t\t\t\t\t\taccelerator.save_state(a_\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# make sure random weights don't match with hooks removed\r\n\t\t\t\t\t\t\t\t\tload_random_weights(a_\t)\r\n\t\t\t\t\t\t\t\t\tself.assertTrue(abs(model_signature - get_signature(a_\t)\t) > 1E-3\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# random class name to verify correct one is loaded\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= '''random'''\r\n\r\n\t\t\t\t\t\t\t\t\t# make sure loaded weights match with hooks removed\r\n\t\t\t\t\t\t\t\t\taccelerator.load_state(a_\t)\r\n\t\t\t\t\t\t\t\t\tself.assertTrue(abs(model_signature - get_signature(a_\t)\t) < 1E-3\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# mode.class_name is NOT loaded from config\r\n\t\t\t\t\t\t\t\t\tself.assertTrue(model.class_name != model.__class__.__name__\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= Accelerator()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= create_components()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= None\r\n\r\n\t\t\t\t\t\t# This should work\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= accelerator.prepare(\r\n\t\t\t\t\t\t a_\t, a_\t, a_\t, a_\t, a_\t, a_\t)\r\n\t\t\t\t\t\tself.assertTrue(dummy_obj is None\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= Accelerator()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= create_components()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= [1, 2, 3]\r\n\r\n\t\t\t\t\t\t# This should work\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= accelerator.prepare(\r\n\t\t\t\t\t\t a_\t, a_\t, a_\t, a_\t, a_\t, a_\t)\r\n\t\t\t\t\t\tself.assertEqual(\r\n\t\t\t\t\t\t getattr(a_\t, \"\"\"_is_accelerate_prepared\"\"\"\t, a_\t)\t, a_\t, \"\"\"Dummy object should have `_is_accelerate_prepared` set to `True`\"\"\"\t, )\r\n\t\t\t\t\t\tself.assertEqual(\r\n\t\t\t\t\t\t getattr(a_\t, \"\"\"_is_accelerate_prepared\"\"\"\t, a_\t)\t, a_\t, \"\"\"Model is missing `_is_accelerator_prepared` or is set to `False`\"\"\"\t, )\r\n\t\t\t\t\t\tself.assertEqual(\r\n\t\t\t\t\t\t getattr(a_\t, \"\"\"_is_accelerate_prepared\"\"\"\t, a_\t)\t, a_\t, \"\"\"Optimizer is missing `_is_accelerator_prepared` or is set to `False`\"\"\"\t, )\r\n\t\t\t\t\t\tself.assertEqual(\r\n\t\t\t\t\t\t getattr(a_\t, \"\"\"_is_accelerate_prepared\"\"\"\t, a_\t)\t, a_\t, \"\"\"Scheduler is missing `_is_accelerator_prepared` or is set to `False`\"\"\"\t, )\r\n\t\t\t\t\t\tself.assertEqual(\r\n\t\t\t\t\t\t getattr(a_\t, \"\"\"_is_accelerate_prepared\"\"\"\t, a_\t)\t, a_\t, \"\"\"Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`\"\"\"\t, )\r\n\t\t\t\t\t\tself.assertEqual(\r\n\t\t\t\t\t\t getattr(a_\t, \"\"\"_is_accelerate_prepared\"\"\"\t, a_\t)\t, a_\t, \"\"\"Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`\"\"\"\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\t@require_bnb\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tfrom transformers import AutoModelForCausalLM\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= AutoModelForCausalLM.from_pretrained(\r\n\t\t\t\t\t\t \"\"\"EleutherAI/gpt-neo-125m\"\"\"\t, load_in_abit=a_\t, device_map={\"\"\"\"\"\": 0}\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= Accelerator()\r\n\r\n\t\t\t\t\t\t# This should work\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= accelerator.prepare(a_\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\t@require_bnb\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tfrom transformers import AutoModelForCausalLM\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= Accelerator()\r\n\r\n\t\t\t\t\t\twith init_empty_weights():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoModelForCausalLM.from_pretrained(\r\n\t\t\t\t\t\t\t\t\t \"\"\"EleutherAI/gpt-neo-125m\"\"\"\t, )\r\n\t\t\t\t\t\t\t\t\tmodel.tie_weights()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= infer_auto_device_map(a_\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= '''cpu'''\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= AutoModelForCausalLM.from_pretrained(\r\n\t\t\t\t\t\t \"\"\"EleutherAI/gpt-neo-125m\"\"\"\t, device_map=a_\t, load_in_abit=a_\t, llm_inta_enable_fpaa_cpu_offload=a_\t)\r\n\r\n\t\t\t\t\t\t# This should not work and get value error\r\n\t\t\t\t\t\twith self.assertRaises(a_\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= accelerator.prepare(a_\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\t@require_bnb\r\n\t\t\t@require_multi_gpu\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tfrom transformers import AutoModelForCausalLM\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= {'''distributed_type''': DistributedType.MULTI_GPU}\r\n\r\n\t\t\t\t\t\twith init_empty_weights():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= AutoModelForCausalLM.from_pretrained(\r\n\t\t\t\t\t\t\t\t\t \"\"\"EleutherAI/gpt-neo-125m\"\"\"\t, )\r\n\t\t\t\t\t\t\t\t\tmodel.tie_weights()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= infer_auto_device_map(a_\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= 1\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= AutoModelForCausalLM.from_pretrained(\r\n\t\t\t\t\t\t \"\"\"EleutherAI/gpt-neo-125m\"\"\"\t, load_in_abit=a_\t, device_map=a_\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= Accelerator()\r\n\r\n\t\t\t\t\t\t# This should not work and get value error\r\n\t\t\t\t\t\twith self.assertRaises(a_\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= accelerator.prepare(a_\t)\r\n\r\n\t\t\t\t\t\tPartialState._reset_state()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\t@require_bnb\r\n\t\t\t@require_multi_gpu\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tfrom transformers import AutoModelForCausalLM\r\n\r\n\t\t\t\t\t\twith init_empty_weights():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= AutoModelForCausalLM.from_pretrained(\r\n\t\t\t\t\t\t\t\t\t \"\"\"EleutherAI/gpt-neo-125m\"\"\"\t, )\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= infer_auto_device_map(a_\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= 1\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= AutoModelForCausalLM.from_pretrained(\r\n\t\t\t\t\t\t \"\"\"EleutherAI/gpt-neo-125m\"\"\"\t, load_in_abit=a_\t, device_map=a_\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= Accelerator()\r\n\r\n\t\t\t\t\t\t# This should work\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= accelerator.prepare(a_\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@require_cuda\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.nn.Linear(10\t, 10\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= torch.optim.SGD(model.parameters()\t, lr=0.0_1\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= Accelerator(cpu=a_\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= accelerator.prepare(a_\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":351,"string":"351"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport baseaa\r\nimport io\r\nimport json\r\nimport os\r\nfrom copy import deepcopy\r\n\r\nfrom ..optimizer import AcceleratedOptimizer\r\nfrom ..scheduler import AcceleratedScheduler\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tif isinstance(__A\t, __A\t):\r\n\t\t\t\t\t\t\t\t\t# Don't modify user's data should they want to reuse it (e.g. in tests), because once we\r\n\t\t\t\t\t\t\t\t\t# modified it, it will not be accepted here again, since `auto` values would have been overridden\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= deepcopy(__A\t)\r\n\t\t\t\t\t\telif os.path.exists(__A\t):\r\n\t\t\t\t\t\t\t\t\twith io.open(__A\t, \"\"\"r\"\"\"\t, encoding=\"\"\"utf-8\"\"\"\t) as f:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= json.load(__A\t)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= baseaa.urlsafe_baadecode(__A\t).decode(\"\"\"utf-8\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= json.loads(__A\t)\r\n\t\t\t\t\t\t\t\t\texcept (UnicodeDecodeError, AttributeError, ValueError):\r\n\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\r\n\t\t\t\t\t\t\t\t\t\t\t\t f\"\"\"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= config\r\n\r\n\t\t\t\t\t\tself.set_stage_and_offload()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\t# zero stage - this is done as early as possible, before model is created, to allow\r\n\t\t\t\t\t\t# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object\r\n\t\t\t\t\t\t# during ``zero.Init()`` which needs to know the dtype, and some other hparams.\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.get_value(\"\"\"zero_optimization.stage\"\"\"\t, -1\t)\r\n\r\n\t\t\t\t\t\t# offload\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= False\r\n\t\t\t\t\t\tif self.is_zeroa() or self.is_zeroa():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= set([\"\"\"cpu\"\"\", \"\"\"nvme\"\"\"]\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= set(\r\n\t\t\t\t\t\t\t\t\t [\r\n\t\t\t\t\t\t\t\t\t self.get_value(\"\"\"zero_optimization.offload_optimizer.device\"\"\"\t),\r\n\t\t\t\t\t\t\t\t\t self.get_value(\"\"\"zero_optimization.offload_param.device\"\"\"\t),\r\n\t\t\t\t\t\t\t\t\t ]\t)\r\n\t\t\t\t\t\t\t\t\tif len(offload_devices & offload_devices_valid\t) > 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= True\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.config\r\n\r\n\t\t\t\t\t\t# find the config node of interest if it exists\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= ds_key_long.split(\"\"\".\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= nodes.pop()\r\n\t\t\t\t\t\tfor node in nodes:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= config.get(__A\t)\r\n\t\t\t\t\t\t\t\t\tif config is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\treturn None, ds_key\r\n\r\n\t\t\t\t\t\treturn config, ds_key\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=None\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.find_config_node(__A\t)\r\n\t\t\t\t\t\tif config is None:\r\n\t\t\t\t\t\t\t\t\treturn default\r\n\t\t\t\t\t\treturn config.get(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=False\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.config\r\n\r\n\t\t\t\t\t\t# find the config node of interest if it exists\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= ds_key_long.split(\"\"\".\"\"\"\t)\r\n\t\t\t\t\t\tfor node in nodes:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= config\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= config.get(__A\t)\r\n\t\t\t\t\t\t\t\t\tif config is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\tif must_exist:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(f\"\"\"Can't find {ds_key_long} entry in the config: {self.config}\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn\r\n\r\n # if found remove it\r\n\t\t\t\t\t\tif parent_config is not None:\r\n\t\t\t\t\t\t\t\t\tparent_config.pop(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.get_value(__A\t)\r\n\t\t\t\t\t\treturn False if value is None else bool(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.get_value(__A\t)\r\n\t\t\t\t\t\treturn False if value is None else not bool(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\treturn self._stage == 2\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\treturn self._stage == 3\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\treturn self._offload\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= engine\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, **__A\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\t# runs backpropagation and handles mixed precision\r\n\t\t\t\t\t\tself.engine.backward(__A\t, **__A\t)\r\n\r\n\t\t\t\t\t\t# Deepspeed's `engine.step` performs the following operations:\r\n\t\t\t\t\t\t# - gradient accumulation check\r\n\t\t\t\t\t\t# - gradient clipping\r\n\t\t\t\t\t\t# - optimizer step\r\n\t\t\t\t\t\t# - zero grad\r\n\t\t\t\t\t\t# - checking overflow\r\n\t\t\t\t\t\t# - lr_scheduler step (only if engine.lr_scheduler is not None)\r\n\t\t\t\t\t\tself.engine.step()\r\n\t\t\t\t\t\t# and this plugin overrides the above calls with no-ops when Accelerate runs under\r\n\t\t\t\t\t\t# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple\r\n\t\t\t\t\t\t# training loop that works transparently under many training regimes.\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tsuper().__init__(__A\t, device_placement=__A\t, scaler=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= hasattr(self.optimizer\t, \"\"\"overflow\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A=None\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tpass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tpass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tif self.__has_overflow__:\r\n\t\t\t\t\t\t\t\t\treturn self.optimizer.overflow\r\n\t\t\t\t\t\treturn False\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tsuper().__init__(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tpass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A=0.0_0_1\t, __A=0\t, **__A\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= params\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= lr\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= weight_decay\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= kwargs\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A=None\t, __A=0\t, **__A\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= optimizer\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= total_num_steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= warmup_num_steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= kwargs\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":684,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport math\r\nimport sys\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str\t\t) -> str:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= ''\r\n\t\t\ttry:\r\n\t\t\t\t\t\twith open(_A\t\t\t\t,\t\t\t\t\t\"\"\"rb\"\"\"\t\t) as binary_file:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= binary_file.read()\r\n\t\t\t\t\t\tfor dat in data:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= f\"\"\"{dat:08b}\"\"\"\r\n\t\t\t\t\t\t\t\t\tresult += curr_byte\r\n\t\t\t\t\t\treturn result\r\n\t\t\texcept OSError:\r\n\t\t\t\t\t\tprint(\"\"\"File not accessible\"\"\"\t\t)\r\n\t\t\t\t\t\tsys.exit()\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {'0': '0', '1': '1'}\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= '', ''\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= len(_A\t\t)\r\n\r\n\t\t\tfor i in range(len(_A\t\t)\t\t):\r\n\t\t\t\t\t\tcurr_string += data_bits[i]\r\n\t\t\t\t\t\tif curr_string not in lexicon:\r\n\t\t\t\t\t\t\t\t\tcontinue\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= lexicon[curr_string]\r\n\t\t\t\t\t\tresult += last_match_id\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= last_match_id + '0'\r\n\r\n\t\t\t\t\t\tif math.loga(_A\t\t).is_integer():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= {}\r\n\t\t\t\t\t\t\t\t\tfor curr_key in list(_A\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= lexicon.pop(_A\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= new_lex\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= last_match_id + '1'\r\n\t\t\t\t\t\tindex += 1\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= ''\r\n\t\t\treturn result\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Dict\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 8\r\n\t\t\ttry:\r\n\t\t\t\t\t\twith open(_A\t\t\t\t,\t\t\t\t\t\"\"\"wb\"\"\"\t\t) as opened_file:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= [\r\n\t\t\t\t\t\t\t\t\t to_write[i : i + byte_length]\r\n\t\t\t\t\t\t\t\t\t for i in range(0\t\t\t\t,\t\t\t\t\tlen(_A\t\t)\t\t\t\t,\t\t\t\t\t_A\t\t)\r\n\t\t\t\t\t\t\t\t\t]\r\n\r\n\t\t\t\t\t\t\t\t\tif len(result_byte_array[-1]\t\t) % byte_length == 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\tresult_byte_array.append(\"\"\"10000000\"\"\"\t\t)\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tresult_byte_array[-1] += \"1\" + \"0\" * (\r\n\t\t\t\t\t\t\t\t\t\t\t\t byte_length - len(result_byte_array[-1]\t\t) - 1\r\n\t\t\t\t\t\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\tfor elem in result_byte_array[:-1]:\r\n\t\t\t\t\t\t\t\t\t\t\t\topened_file.write(int(_A\t\t\t\t,\t\t\t\t\t2\t\t).to_bytes(1\t\t\t\t,\t\t\t\t\tbyteorder=\"\"\"big\"\"\"\t\t)\t\t)\r\n\t\t\texcept OSError:\r\n\t\t\t\t\t\tprint(\"\"\"File not accessible\"\"\"\t\t)\r\n\t\t\t\t\t\tsys.exit()\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Tuple\t\t) -> Any:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 0\r\n\t\t\tfor letter in data_bits:\r\n\t\t\t\t\t\tif letter == \"1\":\r\n\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\tcounter += 1\r\n\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= data_bits[counter:]\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= data_bits[counter + 1 :]\r\n\t\t\treturn data_bits\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Tuple\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Dict\t\t) -> Union[str, Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= read_file_binary(_A\t\t)\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= remove_prefix(_A\t\t)\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= decompress_data(_A\t\t)\r\n\t\t\twrite_file_binary(_A\t\t\t\t,\t\t\t\t\t_A\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tcompress(sys.argv[1], sys.argv[2])\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":352,"string":"352"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n# Copyright 2023 The HuggingFace Inc. team. All rights reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\nimport numpy as np\r\nimport torch\r\n\r\nfrom ..models.clipseg import CLIPSegForImageSegmentation\r\nfrom ..utils import is_vision_available, requires_backends\r\nfrom .base import PipelineTool\r\n\r\n\r\nif is_vision_available():\r\n\t\t\t\tfrom PIL import Image\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Dict \t\t\t=\t\t\t\t\t\t(\r\n\t\t\t \"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.\"\r\n\t\t\t \"It takes two arguments named `image` which should be the original image, and `label` which should be a text \"\r\n\t\t\t \"describing the elements what should be identified in the segmentation mask. The tool returns the mask.\"\r\n\t\t\t)\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\t\"CIDAS/clipseg-rd64-refined\"\r\n\t\t\tUpperCAmelCase_ :List[Any] \t\t\t=\t\t\t\t\t\t\"image_segmenter\"\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tCLIPSegForImageSegmentation\r\n\r\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\t[\"image\", \"text\"]\r\n\t\t\tUpperCAmelCase_ :Dict \t\t\t=\t\t\t\t\t\t[\"image\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, *__A\t, **__A\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\trequires_backends(self\t, [\"\"\"vision\"\"\"]\t)\r\n\t\t\t\t\t\tsuper().__init__(*__A\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\treturn self.pre_processor(text=[label]\t, images=[image]\t, padding=__A\t, return_tensors=\"\"\"pt\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\twith torch.no_grad():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.model(**__A\t).logits\r\n\t\t\t\t\t\treturn logits\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= outputs.cpu().detach().numpy()\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 0\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= 1\r\n\t\t\t\t\t\treturn Image.fromarray((array * 255).astype(np.uinta\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":685,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\nimport typing\r\nfrom collections.abc import Iterable\r\n\r\nimport numpy as np\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007\r\n__UpperCAmelCase\t\t\t\t\t\t\t= typing.Union[np.floataa, int, float] # noqa: UP007\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Vector\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Vector\t\t) -> VectorOut:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\treturn np.sqrt(np.sum((np.asarray(lowercase__\t\t) - np.asarray(lowercase__\t\t)) ** 2\t\t)\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Vector\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Vector\t\t) -> VectorOut:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\treturn sum((va - va) ** 2 for va, va in zip(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\t\t) ** (1 / 2)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t_snake_case\t( ) -> None:\r\n\r\n\r\n\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tfrom timeit import timeit\r\n\r\n\t\t\t\t\t\tprint(\"\"\"Without Numpy\"\"\"\t\t)\r\n\t\t\t\t\t\tprint(\r\n\t\t\t\t\t\t timeit(\r\n\t\t\t\t\t\t \"\"\"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])\"\"\"\t\t\t\t,\t\t\t\t\tnumber=1_0_0_0_0\t\t\t\t,\t\t\t\t\tglobals=globals()\t\t\t\t,\t\t\t\t\t)\t\t)\r\n\t\t\t\t\t\tprint(\"\"\"With Numpy\"\"\"\t\t)\r\n\t\t\t\t\t\tprint(\r\n\t\t\t\t\t\t timeit(\r\n\t\t\t\t\t\t \"\"\"euclidean_distance([1, 2, 3], [4, 5, 6])\"\"\"\t\t\t\t,\t\t\t\t\tnumber=1_0_0_0_0\t\t\t\t,\t\t\t\t\tglobals=globals()\t\t\t\t,\t\t\t\t\t)\t\t)\r\n\r\n\r\n\r\n\r\n\t\t\tbenchmark()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":353,"string":"353"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: list\t\t\t\t,\t\t\t\t\tlowercase__\t\t: list\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif index == number_of_items:\r\n\t\t\t\t\t\treturn 0\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= knapsack(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tindex + 1\t\t)\r\n\t\t\tif weights[index] <= max_weight:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= values[index] + knapsack(\r\n\t\t\t\t\t\t lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tmax_weight - weights[index]\t\t\t\t,\t\t\t\t\tindex + 1\t\t)\r\n\t\t\treturn max(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":686,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: List[Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Any\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Optional[Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Dict=None\t\t) -> List[str]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= (path or []) + [u]\r\n\t\t\tfor v in graph[u]:\r\n\t\t\t\t\t\tif visited_edge[u][v] is False:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= True, True\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= dfs(snake_case_\t\t\t\t,\t\t\t\t\tsnake_case_\t\t\t\t,\t\t\t\t\tsnake_case_\t\t\t\t,\t\t\t\t\tsnake_case_\t\t)\r\n\t\t\treturn path\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: List[Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Dict\t\t) -> Optional[int]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= -1\r\n\t\t\tfor i in range(snake_case_\t\t):\r\n\t\t\t\t\t\tif i not in graph.keys():\r\n\t\t\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\tif len(graph[i]\t\t) % 2 == 1:\r\n\t\t\t\t\t\t\t\t\todd_degree_nodes += 1\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= i\r\n\t\t\tif odd_degree_nodes == 0:\r\n\t\t\t\t\t\treturn 1, odd_node\r\n\t\t\tif odd_degree_nodes == 2:\r\n\t\t\t\t\t\treturn 2, odd_node\r\n\t\t\treturn 3, odd_node\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[Any]\t\t) -> List[str]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [[False for _ in range(max_node + 1\t\t)] for _ in range(max_node + 1\t\t)]\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= check_circuit_or_path(snake_case_\t\t\t\t,\t\t\t\t\tsnake_case_\t\t)\r\n\t\t\tif check == 3:\r\n\t\t\t\t\t\tprint(\"\"\"graph is not Eulerian\"\"\"\t\t)\r\n\t\t\t\t\t\tprint(\"\"\"no path\"\"\"\t\t)\r\n\t\t\t\t\t\treturn\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= 1\r\n\t\t\tif check == 2:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= odd_node\r\n\t\t\t\t\t\tprint(\"\"\"graph has a Euler path\"\"\"\t\t)\r\n\t\t\tif check == 1:\r\n\t\t\t\t\t\tprint(\"\"\"graph has a Euler cycle\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= dfs(snake_case_\t\t\t\t,\t\t\t\t\tsnake_case_\t\t\t\t,\t\t\t\t\tsnake_case_\t\t)\r\n\t\t\tprint(snake_case_\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> Any:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= {1: [2, 3], 2: [1, 3], 3: [1, 2]}\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= {\r\n\t\t\t 1: [],\r\n\t\t\t 2: []\r\n\t\t\t # all degree is zero\r\n\t\t\t}\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= 1_0\r\n\t\t\tcheck_euler(snake_case_\t\t\t\t,\t\t\t\t\tsnake_case_\t\t)\r\n\t\t\tcheck_euler(snake_case_\t\t\t\t,\t\t\t\t\tsnake_case_\t\t)\r\n\t\t\tcheck_euler(snake_case_\t\t\t\t,\t\t\t\t\tsnake_case_\t\t)\r\n\t\t\tcheck_euler(snake_case_\t\t\t\t,\t\t\t\t\tsnake_case_\t\t)\r\n\t\t\tcheck_euler(snake_case_\t\t\t\t,\t\t\t\t\tsnake_case_\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tmain()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":354,"string":"354"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom .imports import is_tqdm_available\r\n\r\n\r\nif is_tqdm_available():\r\n\t\t\t\tfrom tqdm.auto import tqdm as _tqdm\r\n\r\nfrom ..state import PartialState\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: bool = True\t\t\t\t,\t\t\t\t\t*lowercase__\t\t: Optional[int]\t\t\t\t,\t\t\t\t\t**lowercase__\t\t: str\t\t) -> Optional[Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif not is_tqdm_available():\r\n\t\t\t\t\t\traise ImportError(\"\"\"Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= False\r\n\t\t\tif main_process_only:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= PartialState().local_process_index == 0\r\n\t\t\treturn _tqdm(*lowercase__\t\t\t\t,\t\t\t\t\t**lowercase__\t\t\t\t,\t\t\t\t\tdisable=lowercase__\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":687,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport unittest\r\n\r\nfrom transformers import AutoTokenizer, NystromformerConfig, is_torch_available\r\nfrom transformers.testing_utils import require_torch, slow, torch_device\r\n\r\nfrom ...test_configuration_common import ConfigTester\r\nfrom ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask\r\nfrom ...test_pipeline_mixin import PipelineTesterMixin\r\n\r\n\r\nif is_torch_available():\r\n\t\t\t\timport torch\r\n\r\n\t\t\t\tfrom transformers import (\r\n\t\t\t\t NystromformerForMaskedLM,\r\n\t\t\t\t NystromformerForMultipleChoice,\r\n\t\t\t\t NystromformerForQuestionAnswering,\r\n\t\t\t\t NystromformerForSequenceClassification,\r\n\t\t\t\t NystromformerForTokenClassification,\r\n\t\t\t\t NystromformerModel,\r\n\t\t\t\t)\r\n\t\t\t\tfrom transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A=13\t, __A=7\t, __A=True\t, __A=True\t, __A=True\t, __A=True\t, __A=99\t, __A=32\t, __A=5\t, __A=4\t, __A=37\t, __A=\"gelu\"\t, __A=0.1\t, __A=0.1\t, __A=512\t, __A=16\t, __A=2\t, __A=0.0_2\t, __A=3\t, __A=4\t, __A=None\t, ) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= parent\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= batch_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= seq_length\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= is_training\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= use_input_mask\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= use_token_type_ids\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= use_labels\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= vocab_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= hidden_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= num_hidden_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= num_attention_heads\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= intermediate_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= hidden_act\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= hidden_dropout_prob\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= attention_probs_dropout_prob\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= max_position_embeddings\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= type_vocab_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= type_sequence_label_size\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= initializer_range\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= num_labels\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= num_choices\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= scope\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= ids_tensor([self.batch_size, self.seq_length]\t, self.vocab_size\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= None\r\n\t\t\t\t\t\tif self.use_input_mask:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= random_attention_mask([self.batch_size, self.seq_length]\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= None\r\n\t\t\t\t\t\tif self.use_token_type_ids:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= ids_tensor([self.batch_size, self.seq_length]\t, self.type_vocab_size\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= None\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= None\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= None\r\n\t\t\t\t\t\tif self.use_labels:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= ids_tensor([self.batch_size]\t, self.type_sequence_label_size\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= ids_tensor([self.batch_size, self.seq_length]\t, self.num_labels\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= ids_tensor([self.batch_size]\t, self.num_choices\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_config()\r\n\r\n\t\t\t\t\t\treturn config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\treturn NystromformerConfig(\r\n\t\t\t\t\t\t vocab_size=self.vocab_size\t, hidden_size=self.hidden_size\t, num_hidden_layers=self.num_hidden_layers\t, num_attention_heads=self.num_attention_heads\t, intermediate_size=self.intermediate_size\t, hidden_act=self.hidden_act\t, hidden_dropout_prob=self.hidden_dropout_prob\t, attention_probs_dropout_prob=self.attention_probs_dropout_prob\t, max_position_embeddings=self.max_position_embeddings\t, type_vocab_size=self.type_vocab_size\t, is_decoder=__A\t, initializer_range=self.initializer_range\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= NystromformerModel(config=__A\t)\r\n\t\t\t\t\t\tmodel.to(__A\t)\r\n\t\t\t\t\t\tmodel.eval()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= model(__A\t, attention_mask=__A\t, token_type_ids=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= model(__A\t, token_type_ids=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= model(__A\t)\r\n\t\t\t\t\t\tself.parent.assertEqual(result.last_hidden_state.shape\t, (self.batch_size, self.seq_length, self.hidden_size)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= NystromformerForMaskedLM(config=__A\t)\r\n\t\t\t\t\t\tmodel.to(__A\t)\r\n\t\t\t\t\t\tmodel.eval()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= model(__A\t, attention_mask=__A\t, token_type_ids=__A\t, labels=__A\t)\r\n\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape\t, (self.batch_size, self.seq_length, self.vocab_size)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= NystromformerForQuestionAnswering(config=__A\t)\r\n\t\t\t\t\t\tmodel.to(__A\t)\r\n\t\t\t\t\t\tmodel.eval()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= model(\r\n\t\t\t\t\t\t __A\t, attention_mask=__A\t, token_type_ids=__A\t, start_positions=__A\t, end_positions=__A\t, )\r\n\t\t\t\t\t\tself.parent.assertEqual(result.start_logits.shape\t, (self.batch_size, self.seq_length)\t)\r\n\t\t\t\t\t\tself.parent.assertEqual(result.end_logits.shape\t, (self.batch_size, self.seq_length)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.num_labels\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= NystromformerForSequenceClassification(__A\t)\r\n\t\t\t\t\t\tmodel.to(__A\t)\r\n\t\t\t\t\t\tmodel.eval()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= model(__A\t, attention_mask=__A\t, token_type_ids=__A\t, labels=__A\t)\r\n\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape\t, (self.batch_size, self.num_labels)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.num_labels\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= NystromformerForTokenClassification(config=__A\t)\r\n\t\t\t\t\t\tmodel.to(__A\t)\r\n\t\t\t\t\t\tmodel.eval()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= model(__A\t, attention_mask=__A\t, token_type_ids=__A\t, labels=__A\t)\r\n\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape\t, (self.batch_size, self.seq_length, self.num_labels)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t, __A\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.num_choices\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= NystromformerForMultipleChoice(config=__A\t)\r\n\t\t\t\t\t\tmodel.to(__A\t)\r\n\t\t\t\t\t\tmodel.eval()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= input_ids.unsqueeze(1\t).expand(-1\t, self.num_choices\t, -1\t).contiguous()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= token_type_ids.unsqueeze(1\t).expand(-1\t, self.num_choices\t, -1\t).contiguous()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= input_mask.unsqueeze(1\t).expand(-1\t, self.num_choices\t, -1\t).contiguous()\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= model(\r\n\t\t\t\t\t\t __A\t, attention_mask=__A\t, token_type_ids=__A\t, labels=__A\t, )\r\n\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape\t, (self.batch_size, self.num_choices)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.prepare_config_and_inputs()\r\n\t\t\t\t\t\t(\r\n\t\t\t\t\t\t lowerCAmelCase_\t\t\t\t\t\t\r\n\t\t\t\t\t\t) :Optional[int] \t\t\t\t\t= config_and_inputs\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= {\"\"\"input_ids\"\"\": input_ids, \"\"\"token_type_ids\"\"\": token_type_ids, \"\"\"attention_mask\"\"\": input_mask}\r\n\t\t\t\t\t\treturn config, inputs_dict\r\n\r\n\r\n\r\n\r\n@require_torch\r\nclass _SCREAMING_SNAKE_CASE ( __UpperCamelCase ,\t\t\t__UpperCamelCase ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\t(\r\n\t\t\t (\r\n\t\t\t NystromformerModel,\r\n\t\t\t NystromformerForMaskedLM,\r\n\t\t\t NystromformerForMultipleChoice,\r\n\t\t\t NystromformerForQuestionAnswering,\r\n\t\t\t NystromformerForSequenceClassification,\r\n\t\t\t NystromformerForTokenClassification,\r\n\t\t\t )\r\n\t\t\t if is_torch_available()\r\n\t\t\t else ()\r\n\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Dict \t\t\t=\t\t\t\t\t\t(\r\n\t\t\t {\r\n\t\t\t \"\"\"feature-extraction\"\"\": NystromformerModel,\r\n\t\t\t \"\"\"fill-mask\"\"\": NystromformerForMaskedLM,\r\n\t\t\t \"\"\"question-answering\"\"\": NystromformerForQuestionAnswering,\r\n\t\t\t \"\"\"text-classification\"\"\": NystromformerForSequenceClassification,\r\n\t\t\t \"\"\"token-classification\"\"\": NystromformerForTokenClassification,\r\n\t\t\t \"\"\"zero-shot\"\"\": NystromformerForSequenceClassification,\r\n\t\t\t }\r\n\t\t\t if is_torch_available()\r\n\t\t\t else {}\r\n\t\t\t)\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tFalse\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tFalse\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= NystromformerModelTester(self\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= ConfigTester(self\t, config_class=__A\t, hidden_size=37\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tself.config_tester.run_common_tests()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\tself.model_tester.create_and_check_model(*__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\tfor type in [\"absolute\", \"relative_key\", \"relative_key_query\"]:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= type\r\n\t\t\t\t\t\t\t\t\tself.model_tester.create_and_check_model(*__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\tself.model_tester.create_and_check_for_masked_lm(*__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\tself.model_tester.create_and_check_for_multiple_choice(*__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\tself.model_tester.create_and_check_for_question_answering(*__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\tself.model_tester.create_and_check_for_sequence_classification(*__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\t\tself.model_tester.create_and_check_for_token_classification(*__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tfor model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= NystromformerModel.from_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\tself.assertIsNotNone(__A\t)\r\n\r\n\r\n\r\n\r\n@require_torch\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= NystromformerModel.from_pretrained(\"\"\"uw-madison/nystromformer-512\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.tensor([[0, 1, 2, 3, 4, 5]]\t)\r\n\r\n\t\t\t\t\t\twith torch.no_grad():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= model(__A\t)[0]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= torch.Size((1, 6, 768)\t)\r\n\t\t\t\t\t\tself.assertEqual(output.shape\t, __A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= torch.tensor(\r\n\t\t\t\t\t\t [[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]]\t)\r\n\r\n\t\t\t\t\t\tself.assertTrue(torch.allclose(output[:, :3, :3]\t, __A\t, atol=1E-4\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"the [MASK] of Belgium is Brussels\"\"\"\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoTokenizer.from_pretrained(\"\"\"uw-madison/nystromformer-512\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= NystromformerForMaskedLM.from_pretrained(\"\"\"uw-madison/nystromformer-512\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= tokenizer(__A\t, return_tensors=\"\"\"pt\"\"\"\t)\r\n\r\n\t\t\t\t\t\twith torch.no_grad():\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= model(encoding.input_ids\t).logits\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= token_logits[:, 2, :].argmax(-1\t)[0]\r\n\r\n\t\t\t\t\t\tself.assertEqual(tokenizer.decode(__A\t)\t, \"\"\"capital\"\"\"\t)\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":355,"string":"355"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport importlib\r\nimport json\r\nimport os\r\nimport sys\r\nimport tempfile\r\nimport unittest\r\nfrom pathlib import Path\r\n\r\nimport transformers\r\nimport transformers.models.auto\r\nfrom transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig\r\nfrom transformers.models.bert.configuration_bert import BertConfig\r\nfrom transformers.models.roberta.configuration_roberta import RobertaConfig\r\nfrom transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir\r\n\r\n\r\nsys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))\r\n\r\nfrom test_module.custom_configuration import CustomConfig # noqa E402\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= get_tests_dir('fixtures/dummy-config.json')\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 0\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tself.assertIsNotNone(transformers.models.auto.__spec__\t)\r\n\t\t\t\t\t\tself.assertIsNotNone(importlib.util.find_spec(\"\"\"transformers.models.auto\"\"\"\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"bert-base-uncased\"\"\"\t)\r\n\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= AutoConfig.from_pretrained(__A\t)\r\n\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= AutoConfig.from_pretrained(__A\t)\r\n\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= AutoConfig.for_model(\"\"\"roberta\"\"\"\t)\r\n\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmp_dir:\r\n\t\t\t\t\t\t\t\t\t# This model name contains bert and roberta, but roberta ends up being picked.\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= os.path.join(__A\t, \"\"\"fake-roberta\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\tos.makedirs(__A\t, exist_ok=__A\t)\r\n\t\t\t\t\t\t\t\t\twith open(os.path.join(__A\t, \"\"\"config.json\"\"\"\t)\t, \"\"\"w\"\"\"\t) as f:\r\n\t\t\t\t\t\t\t\t\t\t\t\tf.write(json.dumps({}\t)\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= AutoConfig.from_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(type(__A\t)\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\tAutoConfig.register(\"\"\"custom\"\"\"\t, __A\t)\r\n\t\t\t\t\t\t\t\t\t# Wrong model type will raise an error\r\n\t\t\t\t\t\t\t\t\twith self.assertRaises(__A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tAutoConfig.register(\"\"\"model\"\"\"\t, __A\t)\r\n\t\t\t\t\t\t\t\t\t# Trying to register something existing in the Transformers library will raise an error\r\n\t\t\t\t\t\t\t\t\twith self.assertRaises(__A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tAutoConfig.register(\"\"\"bert\"\"\"\t, __A\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Now that the config is registered, it can be used as any other config with the auto-API\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= CustomConfig()\r\n\t\t\t\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmp_dir:\r\n\t\t\t\t\t\t\t\t\t\t\t\tconfig.save_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= AutoConfig.from_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertIsInstance(__A\t, __A\t)\r\n\r\n\t\t\t\t\t\tfinally:\r\n\t\t\t\t\t\t\t\t\tif \"custom\" in CONFIG_MAPPING._extra_content:\r\n\t\t\t\t\t\t\t\t\t\t\t\tdel CONFIG_MAPPING._extra_content[\"custom\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\twith self.assertRaisesRegex(\r\n\t\t\t\t\t\t __A\t, \"\"\"bert-base is not a local folder and is not a valid model identifier\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"bert-base\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\twith self.assertRaisesRegex(\r\n\t\t\t\t\t\t __A\t, r\"\"\"aaaaaa is not a valid git identifier \\(branch name, tag name or commit id\\)\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= AutoConfig.from_pretrained(__A\t, revision=\"\"\"aaaaaa\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\twith self.assertRaisesRegex(\r\n\t\t\t\t\t\t __A\t, \"\"\"hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.\"\"\"\t, ):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/no-config-test-repo\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\t# If remote code is not set, we will time out when asking whether to load the model.\r\n\t\t\t\t\t\twith self.assertRaises(__A\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t)\r\n\t\t\t\t\t\t# If remote code is disabled, we can't load this config.\r\n\t\t\t\t\t\twith self.assertRaises(__A\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t, trust_remote_code=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t, trust_remote_code=__A\t)\r\n\t\t\t\t\t\tself.assertEqual(config.__class__.__name__\t, \"\"\"NewModelConfig\"\"\"\t)\r\n\r\n\t\t\t\t\t\t# Test config can be reloaded.\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmp_dir:\r\n\t\t\t\t\t\t\t\t\tconfig.save_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= AutoConfig.from_pretrained(__A\t, trust_remote_code=__A\t)\r\n\t\t\t\t\t\tself.assertEqual(reloaded_config.__class__.__name__\t, \"\"\"NewModelConfig\"\"\"\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\t\t\t\t\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\t\"new-model\"\r\n\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\tAutoConfig.register(\"\"\"new-model\"\"\"\t, __A\t)\r\n\t\t\t\t\t\t\t\t\t# If remote code is not set, the default is to use local\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(config.__class__.__name__\t, \"\"\"NewModelConfigLocal\"\"\"\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# If remote code is disabled, we load the local one.\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t, trust_remote_code=__A\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(config.__class__.__name__\t, \"\"\"NewModelConfigLocal\"\"\"\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# If remote is enabled, we load from the Hub\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= AutoConfig.from_pretrained(\"\"\"hf-internal-testing/test_dynamic_model\"\"\"\t, trust_remote_code=__A\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(config.__class__.__name__\t, \"\"\"NewModelConfig\"\"\"\t)\r\n\r\n\t\t\t\t\t\tfinally:\r\n\t\t\t\t\t\t\t\t\tif \"new-model\" in CONFIG_MAPPING._extra_content:\r\n\t\t\t\t\t\t\t\t\t\t\t\tdel CONFIG_MAPPING._extra_content[\"new-model\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":688,"cells":{"code":{"kind":"string","value":"\r\nimport inspect\r\nimport logging\r\nimport os\r\nimport random\r\nimport shutil\r\nimport tempfile\r\nimport unittest\r\n\r\nimport pytest\r\nimport torch\r\nfrom torch import nn\r\nfrom torch.utils.data import DataLoader, TensorDataset\r\n\r\nfrom accelerate import Accelerator\r\nfrom accelerate.test_utils import execute_subprocess_async, require_cuda\r\nfrom accelerate.utils import ProjectConfiguration, set_seed\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.getLogger(__name__)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: List[Any]=2\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Tuple=3\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Union[str, Any]=1_6\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int = 1_0\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int = 2\t\t) -> Union[str, Any]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef get_dataset(lowercase__\t\t: List[str]\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= torch.randn(batch_size * n_batches\t\t\t\t,\t\t\t\t\t1\t\t)\r\n\t\t\t\t\t\treturn TensorDataset(A__\t\t\t\t,\t\t\t\t\ta * x + b + 0.1 * torch.randn(batch_size * n_batches\t\t\t\t,\t\t\t\t\t1\t\t)\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= get_dataset(A__\t\t)\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= get_dataset(A__\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= DataLoader(A__\t\t\t\t,\t\t\t\t\tshuffle=A__\t\t\t\t,\t\t\t\t\tbatch_size=A__\t\t\t\t,\t\t\t\t\tnum_workers=4\t\t)\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= DataLoader(A__\t\t\t\t,\t\t\t\t\tshuffle=A__\t\t\t\t,\t\t\t\t\tbatch_size=A__\t\t\t\t,\t\t\t\t\tnum_workers=4\t\t)\r\n\t\t\treturn (train_dataloader, valid_dataloader)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Tuple\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[str]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: str\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[str]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[Any]=None\t\t) -> Optional[int]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= []\r\n\t\t\tfor epoch in range(A__\t\t):\r\n\t\t\t\t\t\t# Train quickly\r\n\t\t\t\t\t\tmodel.train()\r\n\t\t\t\t\t\tfor batch in dataloader:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[str] \t\t\t\t\t= batch\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= model(A__\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= torch.nn.functional.mse_loss(A__\t\t\t\t,\t\t\t\t\tA__\t\t)\r\n\t\t\t\t\t\t\t\t\taccelerator.backward(A__\t\t)\r\n\t\t\t\t\t\t\t\t\toptimizer.step()\r\n\t\t\t\t\t\t\t\t\toptimizer.zero_grad()\r\n\t\t\t\t\t\trands.append(random.random()\t\t) # Introduce some randomness\r\n\t\t\t\t\t\tif scheduler is not None:\r\n\t\t\t\t\t\t\t\t\tscheduler.step()\r\n\t\t\treturn rands\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( nn.Module\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tsuper().__init__()\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= nn.Parameter(torch.randn(1\t)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= nn.Parameter(torch.randn(1\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\treturn x * self.a + self.b\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdir:\r\n\t\t\t\t\t\t\t\t\tset_seed(42\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= DummyModel()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= torch.optim.Adam(params=model.parameters()\t, lr=1E-3\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= dummy_dataloaders()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= ProjectConfiguration(total_limit=1\t, project_dir=UpperCamelCase_\t, automatic_checkpoint_naming=UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\t# Train baseline\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= Accelerator(project_config=UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :str \t\t\t\t\t= accelerator.prepare(\r\n\t\t\t\t\t\t\t\t\t UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\t# Save initial\r\n\t\t\t\t\t\t\t\t\taccelerator.save_state()\r\n\r\n\t\t\t\t\t\t\t\t\t# Save second state\r\n\t\t\t\t\t\t\t\t\taccelerator.save_state()\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(len(os.listdir(accelerator.project_dir\t)\t)\t, 1\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdir:\r\n\t\t\t\t\t\t\t\t\tset_seed(42\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= DummyModel()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= torch.optim.Adam(params=model.parameters()\t, lr=1E-3\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= dummy_dataloaders()\r\n\t\t\t\t\t\t\t\t\t# Train baseline\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= Accelerator()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= accelerator.prepare(\r\n\t\t\t\t\t\t\t\t\t UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\t# Save initial\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= os.path.join(UpperCamelCase_\t, \"\"\"initial\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\taccelerator.save_state(UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\t((lowerCAmelCase_)\t\t\t\t\t,\t(lowerCAmelCase_)) :Optional[Any] \t\t\t\t\t= model.a.item(), model.b.item()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= optimizer.state_dict()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= train(3\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\t((lowerCAmelCase_)\t\t\t\t\t,\t(lowerCAmelCase_)) :List[str] \t\t\t\t\t= model.a.item(), model.b.item()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= optimizer.state_dict()\r\n\r\n\t\t\t\t\t\t\t\t\t# Train partially\r\n\t\t\t\t\t\t\t\t\tset_seed(42\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= DummyModel()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.optim.Adam(params=model.parameters()\t, lr=1E-3\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :int \t\t\t\t\t= dummy_dataloaders()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= Accelerator()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Any \t\t\t\t\t= accelerator.prepare(\r\n\t\t\t\t\t\t\t\t\t UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\taccelerator.load_state(UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\t((lowerCAmelCase_)\t\t\t\t\t,\t(lowerCAmelCase_)) :Dict \t\t\t\t\t= model.a.item(), model.b.item()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= optimizer.state_dict()\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(UpperCamelCase_\t, UpperCamelCase_\t)\r\n\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= train(2\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\t# Save everything\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= os.path.join(UpperCamelCase_\t, \"\"\"checkpoint\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\taccelerator.save_state(UpperCamelCase_\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Load everything back in and make sure all states work\r\n\t\t\t\t\t\t\t\t\taccelerator.load_state(UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\ttest_rands += train(1\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\t((lowerCAmelCase_)\t\t\t\t\t,\t(lowerCAmelCase_)) :Dict \t\t\t\t\t= model.a.item(), model.b.item()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= optimizer.state_dict()\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(UpperCamelCase_\t, UpperCamelCase_\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdir:\r\n\t\t\t\t\t\t\t\t\tset_seed(42\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= DummyModel()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= torch.optim.Adam(params=model.parameters()\t, lr=1E-3\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :int \t\t\t\t\t= dummy_dataloaders()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Train baseline\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= Accelerator(project_dir=UpperCamelCase_\t, project_config=UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :int \t\t\t\t\t= accelerator.prepare(\r\n\t\t\t\t\t\t\t\t\t UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\t# Save initial\r\n\t\t\t\t\t\t\t\t\taccelerator.save_state()\r\n\t\t\t\t\t\t\t\t\t((lowerCAmelCase_)\t\t\t\t\t,\t(lowerCAmelCase_)) :Tuple \t\t\t\t\t= model.a.item(), model.b.item()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= optimizer.state_dict()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= train(3\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\t((lowerCAmelCase_)\t\t\t\t\t,\t(lowerCAmelCase_)) :int \t\t\t\t\t= model.a.item(), model.b.item()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= optimizer.state_dict()\r\n\r\n\t\t\t\t\t\t\t\t\t# Train partially\r\n\t\t\t\t\t\t\t\t\tset_seed(42\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= DummyModel()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= torch.optim.Adam(params=model.parameters()\t, lr=1E-3\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= dummy_dataloaders()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= ProjectConfiguration(iteration=1\t, automatic_checkpoint_naming=UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= Accelerator(project_dir=UpperCamelCase_\t, project_config=UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :int \t\t\t\t\t= accelerator.prepare(\r\n\t\t\t\t\t\t\t\t\t UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\taccelerator.load_state(os.path.join(UpperCamelCase_\t, \"\"\"checkpoints\"\"\"\t, \"\"\"checkpoint_0\"\"\"\t)\t)\r\n\t\t\t\t\t\t\t\t\t((lowerCAmelCase_)\t\t\t\t\t,\t(lowerCAmelCase_)) :Any \t\t\t\t\t= model.a.item(), model.b.item()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= optimizer.state_dict()\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(UpperCamelCase_\t, UpperCamelCase_\t)\r\n\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= train(2\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\t# Save everything\r\n\t\t\t\t\t\t\t\t\taccelerator.save_state()\r\n\r\n\t\t\t\t\t\t\t\t\t# Load everything back in and make sure all states work\r\n\t\t\t\t\t\t\t\t\taccelerator.load_state(os.path.join(UpperCamelCase_\t, \"\"\"checkpoints\"\"\"\t, \"\"\"checkpoint_1\"\"\"\t)\t)\r\n\t\t\t\t\t\t\t\t\ttest_rands += train(1\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\t((lowerCAmelCase_)\t\t\t\t\t,\t(lowerCAmelCase_)) :Any \t\t\t\t\t= model.a.item(), model.b.item()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= optimizer.state_dict()\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(UpperCamelCase_\t, UpperCamelCase_\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= torch.tensor([1, 2, 3]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= torch.tensor([2, 3, 4]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= DummyModel()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= torch.optim.Adam(net.parameters()\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= Accelerator()\r\n\t\t\t\t\t\twith self.assertRaises(UpperCamelCase_\t) as ve:\r\n\t\t\t\t\t\t\t\t\taccelerator.register_for_checkpointing(UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= str(ve.exception\t)\r\n\t\t\t\t\t\tself.assertTrue(\"\"\"Item at index 0\"\"\" in message\t)\r\n\t\t\t\t\t\tself.assertTrue(\"\"\"Item at index 1\"\"\" in message\t)\r\n\t\t\t\t\t\tself.assertFalse(\"\"\"Item at index 2\"\"\" in message\t)\r\n\t\t\t\t\t\tself.assertFalse(\"\"\"Item at index 3\"\"\" in message\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdir:\r\n\t\t\t\t\t\t\t\t\tset_seed(42\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= DummyModel()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= torch.optim.Adam(params=model.parameters()\t, lr=1E-3\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= torch.optim.lr_scheduler.StepLR(UpperCamelCase_\t, step_size=1\t, gamma=0.9_9\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Dict \t\t\t\t\t= dummy_dataloaders()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\t# Train baseline\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= Accelerator(project_dir=UpperCamelCase_\t, project_config=UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Tuple \t\t\t\t\t= accelerator.prepare(\r\n\t\t\t\t\t\t\t\t\t UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\t# Save initial\r\n\t\t\t\t\t\t\t\t\taccelerator.save_state()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= scheduler.state_dict()\r\n\t\t\t\t\t\t\t\t\ttrain(3\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t, UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\tself.assertNotEqual(UpperCamelCase_\t, scheduler.state_dict()\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Load everything back in and make sure all states work\r\n\t\t\t\t\t\t\t\t\taccelerator.load_state(os.path.join(UpperCamelCase_\t, \"\"\"checkpoints\"\"\"\t, \"\"\"checkpoint_0\"\"\"\t)\t)\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(UpperCamelCase_\t, scheduler.state_dict()\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdir:\r\n\t\t\t\t\t\t\t\t\tset_seed(42\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= DummyModel()\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_\t, total_limit=2\t)\r\n\t\t\t\t\t\t\t\t\t# Train baseline\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= Accelerator(project_dir=UpperCamelCase_\t, project_config=UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= accelerator.prepare(UpperCamelCase_\t)\r\n\t\t\t\t\t\t\t\t\t# Save 3 states:\r\n\t\t\t\t\t\t\t\t\tfor _ in range(11\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\taccelerator.save_state()\r\n\t\t\t\t\t\t\t\t\tself.assertTrue(not os.path.exists(os.path.join(UpperCamelCase_\t, \"\"\"checkpoints\"\"\"\t, \"\"\"checkpoint_0\"\"\"\t)\t)\t)\r\n\t\t\t\t\t\t\t\t\tself.assertTrue(os.path.exists(os.path.join(UpperCamelCase_\t, \"\"\"checkpoints\"\"\"\t, \"\"\"checkpoint_9\"\"\"\t)\t)\t)\r\n\t\t\t\t\t\t\t\t\tself.assertTrue(os.path.exists(os.path.join(UpperCamelCase_\t, \"\"\"checkpoints\"\"\"\t, \"\"\"checkpoint_10\"\"\"\t)\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@require_cuda\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= [\"\"\"torchrun\"\"\", f\"\"\"--nproc_per_node={torch.cuda.device_count()}\"\"\", inspect.getfile(self.__class__\t)]\r\n\t\t\t\t\t\texecute_subprocess_async(UpperCamelCase_\t, env=os.environ.copy()\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= '/tmp/accelerate/state_checkpointing'\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= DummyModel()\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= torch.optim.Adam(params=model.parameters(), lr=1e-3)\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)\r\n\t\t\t\t__UpperCAmelCase\t\t\t,\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= dummy_dataloaders()\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= ProjectConfiguration(automatic_checkpoint_naming=True)\r\n\t\t\t\t# Train baseline\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')\r\n\t\t\t\tif accelerator.process_index == 0:\r\n\t\t\t\t\t\t\t\tif os.path.exists(savedir):\r\n\t\t\t\t\t\t\t\t\t\t\t\tshutil.rmtree(savedir)\r\n\t\t\t\t\t\t\t\tos.makedirs(savedir)\r\n\t\t\t\t__UpperCAmelCase\t\t\t,\t\t\t__UpperCAmelCase\t\t\t,\t\t\t__UpperCAmelCase\t\t\t,\t\t\t__UpperCAmelCase\t\t\t,\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= accelerator.prepare(\r\n\t\t\t\t model, optimizer, train_dataloader, valid_dataloader, scheduler\r\n\t\t\t\t)\r\n\t\t\t\t__UpperCAmelCase\t\t\t,\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= accelerator.prepare(model, optimizer)\r\n\t\t\t\ttrain(3, model, train_dataloader, optimizer, accelerator, scheduler)\r\n\t\t\t\t# Check that the intial optimizer is loaded on the GPU\r\n\t\t\t\tfor group in optimizer.param_groups:\r\n\t\t\t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= group['params'][0].device\r\n\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\tassert param_device.type == accelerator.device.type\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= model.cpu()\r\n\t\t\t\taccelerator.wait_for_everyone()\r\n\t\t\t\taccelerator.save_state()\r\n\t\t\t\taccelerator.wait_for_everyone()\r\n\r\n\t\t\t\t# Check CPU state\r\n\t\t\t\taccelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')\r\n\t\t\t\tfor group in optimizer.param_groups:\r\n\t\t\t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= group['params'][0].device\r\n\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\tassert (\r\n\t\t\t\t param_device.type == torch.device('cpu').type\r\n\t\t\t\t), F\"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}\"\r\n\r\n\t\t\t\t# Check device state\r\n\t\t\t\tmodel.to(accelerator.device)\r\n\t\t\t\taccelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')\r\n\t\t\t\tfor group in optimizer.param_groups:\r\n\t\t\t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= group['params'][0].device\r\n\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\tassert (\r\n\t\t\t\t param_device.type == accelerator.device.type\r\n\t\t\t\t), F\"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}\"\r\n\r\n\t\t\t\t# Check error\r\n\t\t\t\twith pytest.raises(TypeError, match='Unsupported optimizer map location passed'):\r\n\t\t\t\t\t\t\t\taccelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')\r\n\t\t\t\taccelerator.wait_for_everyone()\r\n\t\t\t\tif accelerator.process_index == 0:\r\n\t\t\t\t\t\t\t\tshutil.rmtree(savedir)\r\n\t\t\t\taccelerator.wait_for_everyone()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":356,"string":"356"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport json\r\nimport os\r\nimport unittest\r\n\r\nfrom transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (\r\n VOCAB_FILES_NAMES,\r\n GPTSanJapaneseTokenizer,\r\n)\r\nfrom transformers.testing_utils import require_tokenizers, slow\r\n\r\nfrom ...test_tokenization_common import TokenizerTesterMixin\r\n\r\n\r\n\r\n\r\n@require_tokenizers\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tGPTSanJapaneseTokenizer\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\tFalse\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\t{\"do_clean_text\": False, \"add_prefix_space\": False}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tsuper().setUp()\r\n\r\n\t\t\t\t\t\t# fmt: off\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= [\"\"\"ใ“ใ‚“\"\"\", \"\"\"ใ“ใ‚“ใซ\"\"\", \"\"\"ใซใกใฏ\"\"\", \"\"\"ใฐใ‚“ใฏ\"\"\", \"\"\"ไธ–็•Œ,ใ”บ็•Œ\"\"\", \"\"\"ใ€\"\"\", \"\"\"ใ€‚\"\"\", \"\"\"
\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"\"\"\", \"\"\"<|emoji1|>\"\"\", \"\"\"\"\"\", \"\"\"<|bagoftoken|>\"\"\", \"\"\"<|endoftext|>\"\"\"]\r\n\t\t\t\t\t\t# fmt: on\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {\"\"\"emoji\"\"\": {\"\"\"\\ud83d\\ude00\"\"\": \"\"\"<|emoji1|>\"\"\"}, \"\"\"emoji_inv\"\"\": {\"\"\"<|emoji1|>\"\"\": \"\"\"\\ud83d\\ude00\"\"\"}} # ๐Ÿ˜€\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= {\"\"\"unk_token\"\"\": \"\"\"\"\"\"}\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= os.path.join(self.tmpdirname\t, VOCAB_FILES_NAMES[\"\"\"vocab_file\"\"\"]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= os.path.join(self.tmpdirname\t, VOCAB_FILES_NAMES[\"\"\"emoji_file\"\"\"]\t)\r\n\t\t\t\t\t\twith open(self.vocab_file\t, \"\"\"w\"\"\"\t, encoding=\"\"\"utf-8\"\"\"\t) as vocab_writer:\r\n\t\t\t\t\t\t\t\t\tvocab_writer.write(\"\"\"\"\"\".join([x + \"\"\"\\n\"\"\" for x in vocab_tokens]\t)\t)\r\n\t\t\t\t\t\twith open(self.emoji_file\t, \"\"\"w\"\"\"\t) as emoji_writer:\r\n\t\t\t\t\t\t\t\t\temoji_writer.write(json.dumps(__A\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, **__A\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tkwargs.update(self.special_tokens_map\t)\r\n\t\t\t\t\t\treturn GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \\nใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \\nใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\t\t\t\t\t\treturn input_text, output_text\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.get_input_output_texts(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer.encode(__A\t, add_special_tokens=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokenizer.decode(__A\t, clean_up_tokenization_spaces=__A\t)\r\n\t\t\t\t\t\treturn text, ids\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tpass # TODO add if relevant\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tpass # TODO add if relevant\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tpass # TODO add if relevant\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\t# Testing tokenization\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ€€ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= [\"\"\"ใ“ใ‚“\"\"\", \"\"\"ใซใกใฏ\"\"\", \"\"\"ใ€\"\"\", \"\"\"ไธ–็•Œ\"\"\", \"\"\"ใ€‚\"\"\", \"\"\"\"\"\", \"\"\"ใ“ใ‚“\"\"\", \"\"\"ใฐใ‚“ใฏ\"\"\", \"\"\"ใ€\"\"\", \"\"\"ใ”บ็•Œ\"\"\", \"\"\"ใ€‚\"\"\"]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= tokenizer.tokenize(__A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\t\t\t\t\t\t# Testing conversion to ids without special tokens\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer.convert_tokens_to_ids(__A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\t\t\t\t\t\t# Testing conversion to ids with special tokens\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= tokens + [tokenizer.unk_token]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= tokenizer.convert_tokens_to_ids(__A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\t# Testing tokenization\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€<|bagoftoken|>ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€<|bagoftoken|>ใ”บ็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokenizer.encode(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer.decode(__A\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.tokenizer_class.from_pretrained(\"\"\"Tanrei/GPTSAN-japanese\"\"\"\t)\r\n\r\n\t\t\t\t\t\t# Testing tokenization\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= \"\"\"ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tokenizer.encode(prefix_text + input_text\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer.encode(\"\"\"\"\"\"\t, prefix_text=prefix_text + input_text\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer.encode(__A\t, prefix_text=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer.decode(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer.decode(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= tokenizer.decode(__A\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.tokenizer_class.from_pretrained(\"\"\"Tanrei/GPTSAN-japanese\"\"\"\t)\r\n\r\n\t\t\t\t\t\t# Testing tokenization\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= \"\"\"ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€\"\"\"\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= len(tokenizer.encode(__A\t)\t) - 2\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= len(tokenizer.encode(__A\t)\t) - 2\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [1] + [0] * (len_prefix + len_text + 1)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= [1] * (len_prefix + len_text + 1) + [0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= [1] + [1] * (len_prefix) + [0] * (len_text + 1)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tokenizer(prefix_text + input_text\t).token_type_ids\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer(\"\"\"\"\"\"\t, prefix_text=prefix_text + input_text\t).token_type_ids\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tokenizer(__A\t, prefix_text=__A\t).token_type_ids\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.tokenizer_class.from_pretrained(\"\"\"Tanrei/GPTSAN-japanese\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer.encode(\"\"\"ใ‚ใƒณใ„ใƒฏ\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tokenizer.encode(\"\"\"\"\"\"\t, prefix_text=\"\"\"ใ‚ใƒณใ„ใƒฏ\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= tokenizer.encode(\"\"\"ใ„ใƒฏ\"\"\"\t, prefix_text=\"\"\"ใ‚ใƒณ\"\"\"\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(tokenizer.decode(__A\t)\t, tokenizer.decode(__A\t)\t)\r\n\t\t\t\t\t\tself.assertEqual(tokenizer.decode(__A\t)\t, tokenizer.decode(__A\t)\t)\r\n\t\t\t\t\t\tself.assertNotEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertNotEqual(__A\t, __A\t)\r\n\t\t\t\t\t\tself.assertEqual(x_token_a[1]\t, x_token_a[-1]\t) # SEG token\r\n\t\t\t\t\t\tself.assertEqual(x_token_a[1]\t, x_token_a[3]\t) # SEG token\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.tokenizer_class.from_pretrained(\"\"\"Tanrei/GPTSAN-japanese\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [[\"\"\"ๆญฆ็”ฐไฟก็Ž„\"\"\", \"\"\"ใฏใ€\"\"\"], [\"\"\"็น”็”ฐไฟก้•ท\"\"\", \"\"\"ใฎ้…ไธ‹ใฎใ€\"\"\"]]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenizer(__A\t, padding=__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= tokenizer.batch_encode_plus(__A\t, padding=__A\t)\r\n\r\n\t\t\t\t\t\t# fmt: off\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]\r\n\t\t\t\t\t\t# fmt: on\r\n\t\t\t\t\t\tself.assertListEqual(x_token.input_ids\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token.token_type_ids\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token.attention_mask\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token_a.input_ids\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token_a.token_type_ids\t, __A\t)\r\n\t\t\t\t\t\tself.assertListEqual(x_token_a.attention_mask\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\t# Intentionally convert some words to accommodate character fluctuations unique to Japanese\r\n\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\t# tokenizer has no padding token\r\n\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":689,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Optional[Any] = 4_0_0_0_0_0_0\t\t) -> str:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= []\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Dict \t\t\t\t\t= 0, 1\r\n\t\t\twhile b <= n:\r\n\t\t\t\t\t\tif b % 2 == 0:\r\n\t\t\t\t\t\t\t\t\teven_fibs.append(lowercase__\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= b, a + b\r\n\t\t\treturn sum(lowercase__\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tprint(F\"\"\"{solution() = }\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":357,"string":"357"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nfrom sklearn.linear_model import LinearRegression\r\n\r\n# Splitting the dataset into the Training set and Test set\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# Fitting Polynomial Regression to the dataset\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\n\r\n# Importing the dataset\r\n__UpperCAmelCase\t\t\t\t\t\t\t= pd.read_csv(\r\n 'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'\r\n 'position_salaries.csv'\r\n)\r\n__UpperCAmelCase\t\t\t\t\t\t\t= dataset.iloc[:, 1:2].values\r\n__UpperCAmelCase\t\t\t\t\t\t\t= dataset.iloc[:, 2].values\r\n\r\n\r\n__UpperCAmelCase\t\t\t,\t\t\t__UpperCAmelCase\t\t\t,\t\t\t__UpperCAmelCase\t\t\t,\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= train_test_split(X, y, test_size=0.2, random_state=0)\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= PolynomialFeatures(degree=4)\r\n__UpperCAmelCase\t\t\t\t\t\t\t= poly_reg.fit_transform(X)\r\n__UpperCAmelCase\t\t\t\t\t\t\t= LinearRegression()\r\npol_reg.fit(X_poly, y)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> str:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tplt.scatter(lowercase__\t\t\t\t,\t\t\t\t\tlowercase__\t\t\t\t,\t\t\t\t\tcolor=\"\"\"red\"\"\"\t\t)\r\n\t\t\tplt.plot(lowercase__\t\t\t\t,\t\t\t\t\tpol_reg.predict(poly_reg.fit_transform(lowercase__\t\t)\t\t)\t\t\t\t,\t\t\t\t\tcolor=\"\"\"blue\"\"\"\t\t)\r\n\t\t\tplt.title(\"\"\"Truth or Bluff (Linear Regression)\"\"\"\t\t)\r\n\t\t\tplt.xlabel(\"\"\"Position level\"\"\"\t\t)\r\n\t\t\tplt.ylabel(\"\"\"Salary\"\"\"\t\t)\r\n\t\t\tplt.show()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tviz_polymonial()\r\n\r\n\t\t\t\t# Predicting a new result with Polymonial Regression\r\n\t\t\t\tpol_reg.predict(poly_reg.fit_transform([[5.5]]))\r\n\t\t\t\t# output should be 132148.43750003\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":690,"cells":{"code":{"kind":"string","value":"\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str\t\t\t\t,\t\t\t\t\tlowercase__\t\t: bool = False\t\t) -> str:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif not isinstance(_UpperCamelCase\t\t\t\t,\t\t\t\t\t_UpperCamelCase\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= f\"\"\"Expected string as input, found {type(_UpperCamelCase\t\t)}\"\"\"\r\n\t\t\t\t\t\traise ValueError(_UpperCamelCase\t\t)\r\n\t\t\tif not isinstance(_UpperCamelCase\t\t\t\t,\t\t\t\t\t_UpperCamelCase\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= f\"\"\"Expected boolean as use_pascal parameter, found {type(_UpperCamelCase\t\t)}\"\"\"\r\n\t\t\t\t\t\traise ValueError(_UpperCamelCase\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= input_str.split(\"\"\"_\"\"\"\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= 0 if use_pascal else 1\r\n\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= words[start_index:]\r\n\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [word[0].upper() + word[1:] for word in words_to_capitalize]\r\n\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= \"\"\"\"\"\" if use_pascal else words[0]\r\n\r\n\t\t\treturn \"\".join([initial_word, *capitalized_words]\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tfrom doctest import testmod\r\n\r\n\t\t\t\ttestmod()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":358,"string":"358"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 1.6021e-19 # units = C\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: float\t\t\t\t,\t\t\t\t\tlowercase__\t\t: float\t\t\t\t,\t\t\t\t\tlowercase__\t\t: float\t\t\t\t,\t\t\t\t\t) -> tuple[str, float]:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif (conductivity, electron_conc, mobility).count(0\t\t) != 1:\r\n\t\t\t\t\t\traise ValueError(\"\"\"You cannot supply more or less than 2 values\"\"\"\t\t)\r\n\t\t\telif conductivity < 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"Conductivity cannot be negative\"\"\"\t\t)\r\n\t\t\telif electron_conc < 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"Electron concentration cannot be negative\"\"\"\t\t)\r\n\t\t\telif mobility < 0:\r\n\t\t\t\t\t\traise ValueError(\"\"\"mobility cannot be negative\"\"\"\t\t)\r\n\t\t\telif conductivity == 0:\r\n\t\t\t\t\t\treturn (\r\n\t\t\t\t\t\t \"conductivity\",\r\n\t\t\t\t\t\t mobility * electron_conc * ELECTRON_CHARGE,\r\n\t\t\t\t\t\t)\r\n\t\t\telif electron_conc == 0:\r\n\t\t\t\t\t\treturn (\r\n\t\t\t\t\t\t \"electron_conc\",\r\n\t\t\t\t\t\t conductivity / (mobility * ELECTRON_CHARGE),\r\n\t\t\t\t\t\t)\r\n\t\t\telse:\r\n\t\t\t\t\t\treturn (\r\n\t\t\t\t\t\t \"mobility\",\r\n\t\t\t\t\t\t conductivity / (electron_conc * ELECTRON_CHARGE),\r\n\t\t\t\t\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":691,"cells":{"code":{"kind":"string","value":"\r\nfrom math import factorial\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int = 2_0\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,\r\n\t\t\t# 2, 3,...\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= n // 2\r\n\r\n\t\t\treturn int(factorial(lowerCAmelCase__\t\t) / (factorial(lowerCAmelCase__\t\t) * factorial(n - k\t\t))\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport sys\r\n\r\n\t\t\t\tif len(sys.argv) == 1:\r\n\t\t\t\t\t\t\t\tprint(solution(20))\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= int(sys.argv[1])\r\n\t\t\t\t\t\t\t\t\t\t\t\tprint(solution(n))\r\n\t\t\t\t\t\t\t\texcept ValueError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tprint('Invalid entry - please enter a number.')\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":359,"string":"359"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport warnings\r\n\r\nfrom ...utils import logging\r\nfrom .image_processing_clip import CLIPImageProcessor\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, *__A\t, **__A\t) ->\t\t\t\t\tNone:\r\n\t\t\t\t\t\twarnings.warn(\r\n\t\t\t\t\t\t \"\"\"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please\"\"\"\r\n\t\t\t\t\t\t \"\"\" use CLIPImageProcessor instead.\"\"\"\t, __A\t, )\r\n\t\t\t\t\t\tsuper().__init__(*__A\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":692,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import (\r\n OptionalDependencyNotAvailable,\r\n _LazyModule,\r\n is_tf_available,\r\n is_torch_available,\r\n is_vision_available,\r\n)\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],\r\n}\r\n\r\ntry:\r\n\t\t\t\tif not is_vision_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= ['MobileViTFeatureExtractor']\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= ['MobileViTImageProcessor']\r\n\r\ntry:\r\n\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\r\n\t\t\t\t 'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',\r\n\t\t\t\t 'MobileViTForImageClassification',\r\n\t\t\t\t 'MobileViTForSemanticSegmentation',\r\n\t\t\t\t 'MobileViTModel',\r\n\t\t\t\t 'MobileViTPreTrainedModel',\r\n\t\t\t\t]\r\n\r\ntry:\r\n\t\t\t\tif not is_tf_available():\r\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\nelse:\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= [\r\n\t\t\t\t 'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',\r\n\t\t\t\t 'TFMobileViTForImageClassification',\r\n\t\t\t\t 'TFMobileViTForSemanticSegmentation',\r\n\t\t\t\t 'TFMobileViTModel',\r\n\t\t\t\t 'TFMobileViTPreTrainedModel',\r\n\t\t\t\t]\r\n\r\nif TYPE_CHECKING:\r\n\t\t\t\tfrom .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_vision_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .feature_extraction_mobilevit import MobileViTFeatureExtractor\r\n\t\t\t\t\t\t\t\tfrom .image_processing_mobilevit import MobileViTImageProcessor\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .modeling_mobilevit import (\r\n\t\t\t\t\t\t\t\t MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t MobileViTForImageClassification,\r\n\t\t\t\t\t\t\t\t MobileViTForSemanticSegmentation,\r\n\t\t\t\t\t\t\t\t MobileViTModel,\r\n\t\t\t\t\t\t\t\t MobileViTPreTrainedModel,\r\n\t\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tif not is_tf_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfrom .modeling_tf_mobilevit import (\r\n\t\t\t\t\t\t\t\t TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t TFMobileViTForImageClassification,\r\n\t\t\t\t\t\t\t\t TFMobileViTForSemanticSegmentation,\r\n\t\t\t\t\t\t\t\t TFMobileViTModel,\r\n\t\t\t\t\t\t\t\t TFMobileViTPreTrainedModel,\r\n\t\t\t\t\t\t\t\t)\r\n\r\n\r\nelse:\r\n\t\t\t\timport sys\r\n\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":360,"string":"360"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom itertools import zip_longest\r\n\r\nimport requests\r\nfrom bsa import BeautifulSoup\r\nfrom pandas import DataFrame\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str = \"laptop\"\t\t) -> DataFrame:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= f\"\"\"https://www.amazon.in/laptop/s?k={product}\"\"\"\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {\r\n\t\t\t \"\"\"User-Agent\"\"\": \"\"\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36\"\"\",\r\n\t\t\t \"\"\"Accept-Language\"\"\": \"\"\"en-US, en;q=0.5\"\"\",\r\n\t\t\t}\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= BeautifulSoup(requests.get(lowercase__\t\t\t\t,\t\t\t\t\theaders=lowercase__\t\t).text\t\t)\r\n\t\t\t# Initialize a Pandas dataframe with the column titles\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= DataFrame(\r\n\t\t\t columns=[\r\n\t\t\t \"\"\"Product Title\"\"\",\r\n\t\t\t \"\"\"Product Link\"\"\",\r\n\t\t\t \"\"\"Current Price of the product\"\"\",\r\n\t\t\t \"\"\"Product Rating\"\"\",\r\n\t\t\t \"\"\"MRP of the product\"\"\",\r\n\t\t\t \"\"\"Discount\"\"\",\r\n\t\t\t ]\t\t)\r\n\t\t\t# Loop through each entry and store them in the dataframe\r\n\t\t\tfor item, _ in zip_longest(\r\n\t\t\t soup.find_all(\r\n\t\t\t \"\"\"div\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"s-result-item\"\"\", \"\"\"data-component-type\"\"\": \"\"\"s-search-result\"\"\"}\t\t\t\t,\t\t\t\t\t)\t\t\t\t,\t\t\t\t\tsoup.find_all(\"\"\"div\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-row a-size-base a-color-base\"\"\"}\t\t)\t\t\t\t,\t\t\t\t\t):\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= item.ha.text\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= \"\"\"https://www.amazon.in/\"\"\" + item.ha.a[\"\"\"href\"\"\"]\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= item.find(\"\"\"span\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-offscreen\"\"\"}\t\t).text\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= item.find(\"\"\"span\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-icon-alt\"\"\"}\t\t).text\r\n\t\t\t\t\t\t\t\t\texcept AttributeError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= \"\"\"Not available\"\"\"\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= (\r\n\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"โ‚น\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t + item.find(\r\n\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"span\"\"\"\t\t\t\t,\t\t\t\t\tattrs={\"\"\"class\"\"\": \"\"\"a-price a-text-price\"\"\"}\t\t).text.split(\"\"\"โ‚น\"\"\"\t\t)[1]\r\n\t\t\t\t\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\texcept AttributeError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= \"\"\"\"\"\"\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= float(\r\n\t\t\t\t\t\t\t\t\t\t\t\t (\r\n\t\t\t\t\t\t\t\t\t\t\t\t (\r\n\t\t\t\t\t\t\t\t\t\t\t\t float(product_mrp.strip(\"\"\"โ‚น\"\"\"\t\t).replace(\"\"\",\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"\"\"\"\t\t)\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t - float(product_price.strip(\"\"\"โ‚น\"\"\"\t\t).replace(\"\"\",\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"\"\"\"\t\t)\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t )\r\n\t\t\t\t\t\t\t\t\t\t\t\t / float(product_mrp.strip(\"\"\"โ‚น\"\"\"\t\t).replace(\"\"\",\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"\"\"\"\t\t)\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t )\r\n\t\t\t\t\t\t\t\t\t\t\t\t * 1_0_0\t\t)\r\n\t\t\t\t\t\t\t\t\texcept ValueError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= float(\"\"\"nan\"\"\"\t\t)\r\n\t\t\t\t\t\texcept AttributeError:\r\n\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= [\r\n\t\t\t\t\t\t product_title,\r\n\t\t\t\t\t\t product_link,\r\n\t\t\t\t\t\t product_price,\r\n\t\t\t\t\t\t product_rating,\r\n\t\t\t\t\t\t product_mrp,\r\n\t\t\t\t\t\t discount,\r\n\t\t\t\t\t\t]\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\" \"\"\"\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= \"\"\" \"\"\"\r\n\t\t\tdata_frame.index += 1\r\n\t\t\treturn data_frame\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= 'headphones'\r\n\t\t\t\tget_amazon_product_data(product).to_csv(F\"\"\"Amazon Product Data for {product}.csv\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":693,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom abc import ABC, abstractmethod\r\nfrom argparse import ArgumentParser\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( __snake_case\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@staticmethod\r\n\t\t\t@abstractmethod\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( __A\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\traise NotImplementedError()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@abstractmethod\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\traise NotImplementedError()\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":361,"string":"361"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport shutil\r\nimport tempfile\r\nimport unittest\r\n\r\nfrom transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast\r\nfrom transformers.testing_utils import require_sentencepiece, require_torchaudio\r\n\r\nfrom .test_feature_extraction_clap import floats_list\r\n\r\n\r\n\r\n\r\n@require_torchaudio\r\n@require_sentencepiece\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= \"\"\"laion/clap-htsat-unfused\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tempfile.mkdtemp()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, **__A\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\treturn RobertaTokenizer.from_pretrained(self.checkpoint\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, **__A\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\treturn ClapFeatureExtractor.from_pretrained(self.checkpoint\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tshutil.rmtree(self.tmpdirname\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_tokenizer()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_feature_extractor()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tprocessor.save_pretrained(self.tmpdirname\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= ClapProcessor.from_pretrained(self.tmpdirname\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor.tokenizer.get_vocab()\t, tokenizer.get_vocab()\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor.tokenizer\t, __A\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor.feature_extractor.to_json_string()\t, feature_extractor.to_json_string()\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor.feature_extractor\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= ClapProcessor(tokenizer=self.get_tokenizer()\t, feature_extractor=self.get_feature_extractor()\t)\r\n\t\t\t\t\t\tprocessor.save_pretrained(self.tmpdirname\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.get_tokenizer(bos_token=\"\"\"(BOS)\"\"\"\t, eos_token=\"\"\"(EOS)\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.get_feature_extractor(do_normalize=__A\t, padding_value=1.0\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= ClapProcessor.from_pretrained(\r\n\t\t\t\t\t\t self.tmpdirname\t, bos_token=\"\"\"(BOS)\"\"\"\t, eos_token=\"\"\"(EOS)\"\"\"\t, do_normalize=__A\t, padding_value=1.0\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor.tokenizer.get_vocab()\t, tokenizer_add_kwargs.get_vocab()\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor.tokenizer\t, __A\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor.feature_extractor.to_json_string()\t, feature_extractor_add_kwargs.to_json_string()\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor.feature_extractor\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.get_feature_extractor()\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= floats_list((3, 1000)\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= feature_extractor(__A\t, return_tensors=\"\"\"np\"\"\"\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= processor(audios=__A\t, return_tensors=\"\"\"np\"\"\"\t)\r\n\r\n\t\t\t\t\t\tfor key in input_feat_extract.keys():\r\n\t\t\t\t\t\t\t\t\tself.assertAlmostEqual(input_feat_extract[key].sum()\t, input_processor[key].sum()\t, delta=1E-2\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.get_feature_extractor()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"This is a test string\"\"\"\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= processor(text=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= tokenizer(__A\t)\r\n\r\n\t\t\t\t\t\tfor key in encoded_tok.keys():\r\n\t\t\t\t\t\t\t\t\tself.assertListEqual(encoded_tok[key]\t, encoded_processor[key]\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.get_feature_extractor()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= processor.batch_decode(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tokenizer.batch_decode(__A\t)\r\n\r\n\t\t\t\t\t\tself.assertListEqual(__A\t, __A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_feature_extractor()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= ClapProcessor(tokenizer=__A\t, feature_extractor=__A\t)\r\n\r\n\t\t\t\t\t\tself.assertListEqual(\r\n\t\t\t\t\t\t processor.model_input_names[2:]\t, feature_extractor.model_input_names\t, msg=\"\"\"`processor` and `feature_extractor` model input names do not match\"\"\"\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":694,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport argparse\r\n\r\nimport evaluate\r\nimport torch\r\nfrom datasets import load_dataset\r\nfrom torch.optim import AdamW\r\nfrom torch.utils.data import DataLoader\r\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\r\n\r\nfrom accelerate import Accelerator, DistributedType\r\n\r\n\r\n########################################################################\r\n# This is a fully working simple example to use Accelerate\r\n#\r\n# This example trains a Bert base model on GLUE MRPC\r\n# in any of the following settings (with the same script):\r\n# - single CPU or single GPU\r\n# - multi GPUS (using PyTorch distributed mode)\r\n# - (multi) TPUs\r\n# - fp16 (mixed-precision) or fp32 (normal precision)\r\n#\r\n# To run it in each of these various modes, follow the instructions\r\n# in the readme for examples:\r\n# https://github.com/huggingface/accelerate/tree/main/examples\r\n#\r\n########################################################################\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 16\r\n__UpperCAmelCase\t\t\t\t\t\t\t= 32\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Accelerator\t\t\t\t,\t\t\t\t\tlowercase__\t\t: int = 1_6\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= AutoTokenizer.from_pretrained(\"\"\"bert-base-cased\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= load_dataset(\"\"\"glue\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"mrpc\"\"\"\t\t)\r\n\r\n\t\t\tdef tokenize_function(lowercase__\t\t: Dict\t\t):\r\n\t\t\t\t\t\t# max_length=None => use the model max length (it's actually the default)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= tokenizer(examples[\"\"\"sentence1\"\"\"]\t\t\t\t,\t\t\t\t\texamples[\"\"\"sentence2\"\"\"]\t\t\t\t,\t\t\t\t\ttruncation=UpperCamelCase__\t\t\t\t,\t\t\t\t\tmax_length=UpperCamelCase__\t\t)\r\n\t\t\t\t\t\treturn outputs\r\n\r\n\t\t\t# Apply the method we just defined to all the examples in all the splits of the dataset\r\n\t\t\t# starting with the main process first:\r\n\t\t\twith accelerator.main_process_first():\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= datasets.map(\r\n\t\t\t\t\t\t UpperCamelCase__\t\t\t\t,\t\t\t\t\tbatched=UpperCamelCase__\t\t\t\t,\t\t\t\t\tremove_columns=[\"\"\"idx\"\"\", \"\"\"sentence1\"\"\", \"\"\"sentence2\"\"\"]\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\r\n\t\t\t# transformers library\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= tokenized_datasets.rename_column(\"\"\"label\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"labels\"\"\"\t\t)\r\n\r\n\t\t\tdef collate_fn(lowercase__\t\t: Optional[int]\t\t):\r\n\t\t\t\t\t\t# On TPU it's best to pad everything to the same length or training will be very slow.\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None\r\n\t\t\t\t\t\t# When using mixed precision we want round multiples of 8/16\r\n\t\t\t\t\t\tif accelerator.mixed_precision == \"fp8\":\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 1_6\r\n\t\t\t\t\t\telif accelerator.mixed_precision != \"no\":\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= 8\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= None\r\n\r\n\t\t\t\t\t\treturn tokenizer.pad(\r\n\t\t\t\t\t\t UpperCamelCase__\t\t\t\t,\t\t\t\t\tpadding=\"\"\"longest\"\"\"\t\t\t\t,\t\t\t\t\tmax_length=UpperCamelCase__\t\t\t\t,\t\t\t\t\tpad_to_multiple_of=UpperCamelCase__\t\t\t\t,\t\t\t\t\treturn_tensors=\"\"\"pt\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t# Instantiate dataloaders.\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= DataLoader(\r\n\t\t\t tokenized_datasets[\"\"\"train\"\"\"]\t\t\t\t,\t\t\t\t\tshuffle=UpperCamelCase__\t\t\t\t,\t\t\t\t\tcollate_fn=UpperCamelCase__\t\t\t\t,\t\t\t\t\tbatch_size=UpperCamelCase__\t\t\t\t,\t\t\t\t\tdrop_last=UpperCamelCase__\t\t)\r\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= DataLoader(\r\n\t\t\t tokenized_datasets[\"\"\"validation\"\"\"]\t\t\t\t,\t\t\t\t\tshuffle=UpperCamelCase__\t\t\t\t,\t\t\t\t\tcollate_fn=UpperCamelCase__\t\t\t\t,\t\t\t\t\tbatch_size=UpperCamelCase__\t\t\t\t,\t\t\t\t\tdrop_last=(accelerator.mixed_precision == \"\"\"fp8\"\"\")\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\treturn train_dataloader, eval_dataloader\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: Any\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Tuple\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= Accelerator(cpu=args.cpu\t\t\t\t,\t\t\t\t\tmixed_precision=args.mixed_precision\t\t)\r\n\t\t\t# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= config[\"\"\"lr\"\"\"]\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= int(config[\"\"\"num_epochs\"\"\"]\t\t)\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= int(config[\"\"\"seed\"\"\"]\t\t)\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= int(config[\"\"\"batch_size\"\"\"]\t\t)\r\n\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= evaluate.load(\"\"\"glue\"\"\"\t\t\t\t,\t\t\t\t\t\"\"\"mrpc\"\"\"\t\t)\r\n\r\n\t\t\t# If the batch size is too big we use gradient accumulation\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 1\r\n\t\t\tif batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= batch_size // MAX_GPU_BATCH_SIZE\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= MAX_GPU_BATCH_SIZE\r\n\r\n\t\t\tset_seed(UpperCamelCase__\t\t)\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= get_dataloaders(UpperCamelCase__\t\t\t\t,\t\t\t\t\tUpperCamelCase__\t\t)\r\n\t\t\t# Instantiate the model (we build the model here so that the seed also control new weights initialization)\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= AutoModelForSequenceClassification.from_pretrained(\"\"\"bert-base-cased\"\"\"\t\t\t\t,\t\t\t\t\treturn_dict=UpperCamelCase__\t\t)\r\n\r\n\t\t\t# We could avoid this line since the accelerator is set with `device_placement=True` (default value).\r\n\t\t\t# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer\r\n\t\t\t# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).\r\n\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= model.to(accelerator.device\t\t)\r\n\t\t\t# Instantiate optimizer\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= AdamW(params=model.parameters()\t\t\t\t,\t\t\t\t\tlr=UpperCamelCase__\t\t)\r\n\r\n\t\t\t# Instantiate scheduler\r\n\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= get_linear_schedule_with_warmup(\r\n\t\t\t optimizer=UpperCamelCase__\t\t\t\t,\t\t\t\t\tnum_warmup_steps=1_0_0\t\t\t\t,\t\t\t\t\tnum_training_steps=(len(UpperCamelCase__\t\t) * num_epochs) // gradient_accumulation_steps\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t# Prepare everything\r\n\t\t\t# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\r\n\t\t\t# prepare method.\r\n\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :str \t\t\t\t\t= accelerator.prepare(\r\n\t\t\t UpperCamelCase__\t\t\t\t,\t\t\t\t\tUpperCamelCase__\t\t\t\t,\t\t\t\t\tUpperCamelCase__\t\t\t\t,\t\t\t\t\tUpperCamelCase__\t\t\t\t,\t\t\t\t\tUpperCamelCase__\t\t)\r\n\r\n\t\t\t# Now we train the model\r\n\t\t\tfor epoch in range(UpperCamelCase__\t\t):\r\n\t\t\t\t\t\tmodel.train()\r\n\t\t\t\t\t\tfor step, batch in enumerate(UpperCamelCase__\t\t):\r\n\t\t\t\t\t\t\t\t\t# We could avoid this line since we set the accelerator with `device_placement=True`.\r\n\t\t\t\t\t\t\t\t\tbatch.to(accelerator.device\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= model(**UpperCamelCase__\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= outputs.loss\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= loss / gradient_accumulation_steps\r\n\t\t\t\t\t\t\t\t\taccelerator.backward(UpperCamelCase__\t\t)\r\n\t\t\t\t\t\t\t\t\tif step % gradient_accumulation_steps == 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\toptimizer.step()\r\n\t\t\t\t\t\t\t\t\t\t\t\tlr_scheduler.step()\r\n\t\t\t\t\t\t\t\t\t\t\t\toptimizer.zero_grad()\r\n\r\n\t\t\t\t\t\tmodel.eval()\r\n\t\t\t\t\t\tfor step, batch in enumerate(UpperCamelCase__\t\t):\r\n\t\t\t\t\t\t\t\t\t# We could avoid this line since we set the accelerator with `device_placement=True`.\r\n\t\t\t\t\t\t\t\t\tbatch.to(accelerator.device\t\t)\r\n\t\t\t\t\t\t\t\t\twith torch.no_grad():\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= model(**UpperCamelCase__\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= outputs.logits.argmax(dim=-1\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Dict \t\t\t\t\t= accelerator.gather_for_metrics((predictions, batch[\"\"\"labels\"\"\"])\t\t)\r\n\t\t\t\t\t\t\t\t\tmetric.add_batch(\r\n\t\t\t\t\t\t\t\t\t predictions=UpperCamelCase__\t\t\t\t,\t\t\t\t\treferences=UpperCamelCase__\t\t\t\t,\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= metric.compute()\r\n\t\t\t\t\t\t# Use accelerator.print to print only on the main process.\r\n\t\t\t\t\t\taccelerator.print(F\"\"\"epoch {epoch}:\"\"\"\t\t\t\t,\t\t\t\t\tUpperCamelCase__\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= argparse.ArgumentParser(description=\"\"\"Simple example of training script.\"\"\"\t\t)\r\n\t\t\tparser.add_argument(\r\n\t\t\t \"\"\"--mixed_precision\"\"\"\t\t\t\t,\t\t\t\t\ttype=UpperCamelCase__\t\t\t\t,\t\t\t\t\tdefault=UpperCamelCase__\t\t\t\t,\t\t\t\t\tchoices=[\"\"\"no\"\"\", \"\"\"fp16\"\"\", \"\"\"bf16\"\"\", \"\"\"fp8\"\"\"]\t\t\t\t,\t\t\t\t\thelp=\"\"\"Whether to use mixed precision. Choose\"\"\"\r\n\t\t\t \"\"\"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.\"\"\"\r\n\t\t\t \"\"\"and an Nvidia Ampere GPU.\"\"\"\t\t\t\t,\t\t\t\t\t)\r\n\t\t\tparser.add_argument(\"\"\"--cpu\"\"\"\t\t\t\t,\t\t\t\t\taction=\"\"\"store_true\"\"\"\t\t\t\t,\t\t\t\t\thelp=\"\"\"If passed, will train on the CPU.\"\"\"\t\t)\r\n\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= parser.parse_args()\r\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= {\"\"\"lr\"\"\": 2E-5, \"\"\"num_epochs\"\"\": 3, \"\"\"seed\"\"\": 4_2, \"\"\"batch_size\"\"\": 1_6}\r\n\t\t\ttraining_function(UpperCamelCase__\t\t\t\t,\t\t\t\t\tUpperCamelCase__\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tmain()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":362,"string":"362"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport os\r\nfrom math import logaa\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: str = \"base_exp.txt\"\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :float \t\t\t\t\t= 0\r\n\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 0\r\n\t\t\tfor i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__\t\t)\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\t\t)\t\t):\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= list(map(lowercase__\t\t\t\t,\t\t\t\t\tline.split(\"\"\",\"\"\"\t\t)\t\t)\t\t)\r\n\t\t\t\t\t\tif x * logaa(lowercase__\t\t) > largest:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= x * logaa(lowercase__\t\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= i + 1\r\n\t\t\treturn result\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tprint(solution())\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":695,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom collections import OrderedDict\r\nfrom typing import Mapping\r\n\r\nfrom packaging import version\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...onnx import OnnxConfig\r\nfrom ...utils import logging\r\nfrom ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n '''microsoft/swin-tiny-patch4-window7-224''': (\r\n '''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''\r\n ),\r\n # See all Swin models at https://huggingface.co/models?filter=swin\r\n}\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ,\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t):\r\n UpperCAmelCase_ :Union[str, Any] \t\t\t=\t\t\t\t\t\t'swin'\r\n\r\n UpperCAmelCase_ :Optional[Any] \t\t\t=\t\t\t\t\t\t{\r\n 'num_attention_heads': 'num_heads',\r\n 'num_hidden_layers': 'num_layers',\r\n }\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __init__( self\t, __A=224\t, __A=4\t, __A=3\t, __A=96\t, __A=[2, 2, 6, 2]\t, __A=[3, 6, 12, 24]\t, __A=7\t, __A=4.0\t, __A=True\t, __A=0.0\t, __A=0.0\t, __A=0.1\t, __A=\"gelu\"\t, __A=False\t, __A=0.0_2\t, __A=1E-5\t, __A=32\t, __A=None\t, __A=None\t, **__A\t, ) ->\t\t\t\t\tDict:\r\n super().__init__(**_SCREAMING_SNAKE_CASE\t)\r\n\r\n lowerCAmelCase_ :Any \t\t\t\t\t= image_size\r\n lowerCAmelCase_ :Any \t\t\t\t\t= patch_size\r\n lowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= num_channels\r\n lowerCAmelCase_ :int \t\t\t\t\t= embed_dim\r\n lowerCAmelCase_ :Any \t\t\t\t\t= depths\r\n lowerCAmelCase_ :Tuple \t\t\t\t\t= len(_SCREAMING_SNAKE_CASE\t)\r\n lowerCAmelCase_ :Optional[Any] \t\t\t\t\t= num_heads\r\n lowerCAmelCase_ :List[str] \t\t\t\t\t= window_size\r\n lowerCAmelCase_ :int \t\t\t\t\t= mlp_ratio\r\n lowerCAmelCase_ :Any \t\t\t\t\t= qkv_bias\r\n lowerCAmelCase_ :int \t\t\t\t\t= hidden_dropout_prob\r\n lowerCAmelCase_ :Any \t\t\t\t\t= attention_probs_dropout_prob\r\n lowerCAmelCase_ :Tuple \t\t\t\t\t= drop_path_rate\r\n lowerCAmelCase_ :Tuple \t\t\t\t\t= hidden_act\r\n lowerCAmelCase_ :Any \t\t\t\t\t= use_absolute_embeddings\r\n lowerCAmelCase_ :Optional[Any] \t\t\t\t\t= layer_norm_eps\r\n lowerCAmelCase_ :Tuple \t\t\t\t\t= initializer_range\r\n lowerCAmelCase_ :Any \t\t\t\t\t= encoder_stride\r\n # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel\r\n # this indicates the channel dimension after the last stage of the model\r\n lowerCAmelCase_ :Optional[int] \t\t\t\t\t= int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE\t) - 1)\t)\r\n lowerCAmelCase_ :Dict \t\t\t\t\t= [\"stem\"] + [f\"\"\"stage{idx}\"\"\" for idx in range(1\t, len(_SCREAMING_SNAKE_CASE\t) + 1\t)]\r\n lowerCAmelCase_ :List[str] \t\t\t\t\t= get_aligned_output_features_output_indices(\r\n out_features=_SCREAMING_SNAKE_CASE\t, out_indices=_SCREAMING_SNAKE_CASE\t, stage_names=self.stage_names\t)\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t):\r\n UpperCAmelCase_ :List[Any] \t\t\t=\t\t\t\t\t\tversion.parse(\"1.11\"\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n @property\r\n def \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tMapping[str, Mapping[int, str]]:\r\n return OrderedDict(\r\n [\r\n (\"\"\"pixel_values\"\"\", {0: \"\"\"batch\"\"\", 1: \"\"\"num_channels\"\"\", 2: \"\"\"height\"\"\", 3: \"\"\"width\"\"\"}),\r\n ]\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n @property\r\n def \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tfloat:\r\n return 1E-4\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":363,"string":"363"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport itertools\r\nimport math\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int\t\t) -> bool:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif 1 < number < 4:\r\n\t\t\t\t\t\t# 2 and 3 are primes\r\n\t\t\t\t\t\treturn True\r\n\t\t\telif number < 2 or number % 2 == 0 or number % 3 == 0:\r\n\t\t\t\t\t\t# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes\r\n\t\t\t\t\t\treturn False\r\n\r\n\t\t\t# All primes number are in format of 6k +/- 1\r\n\t\t\tfor i in range(5\t\t\t\t,\t\t\t\t\tint(math.sqrt(lowercase__\t\t) + 1\t\t)\t\t\t\t,\t\t\t\t\t6\t\t):\r\n\t\t\t\t\t\tif number % i == 0 or number % (i + 2) == 0:\r\n\t\t\t\t\t\t\t\t\treturn False\r\n\t\t\treturn True\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> Dict:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 2\r\n\t\t\twhile True:\r\n\t\t\t\t\t\tif is_prime(lowercase__\t\t):\r\n\t\t\t\t\t\t\t\t\tyield num\r\n\t\t\t\t\t\tnum += 1\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int = 1_0_0_0_1\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\treturn next(itertools.islice(prime_generator()\t\t\t\t,\t\t\t\t\tnth - 1\t\t\t\t,\t\t\t\t\tlowercase__\t\t)\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tprint(F\"\"\"{solution() = }\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":696,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import List, Optional, Union\r\n\r\nfrom ...processing_utils import ProcessorMixin\r\nfrom ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy\r\nfrom ...utils import TensorType\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( snake_case__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :Union[str, Any] \t\t\t=\t\t\t\t\t\t[\"image_processor\", \"tokenizer\"]\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\t\"BridgeTowerImageProcessor\"\r\n\t\t\tUpperCAmelCase_ :Optional[int] \t\t\t=\t\t\t\t\t\t(\"RobertaTokenizer\", \"RobertaTokenizerFast\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A\t, __A\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tsuper().__init__(UpperCAmelCase_\t, UpperCAmelCase_\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __call__( self\t, __A\t, __A = None\t, __A = True\t, __A = False\t, __A = None\t, __A = None\t, __A = 0\t, __A = None\t, __A = None\t, __A = None\t, __A = False\t, __A = False\t, __A = False\t, __A = False\t, __A = True\t, __A = None\t, **__A\t, ) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= self.tokenizer(\r\n\t\t\t\t\t\t text=UpperCAmelCase_\t, add_special_tokens=UpperCAmelCase_\t, padding=UpperCAmelCase_\t, truncation=UpperCAmelCase_\t, max_length=UpperCAmelCase_\t, stride=UpperCAmelCase_\t, pad_to_multiple_of=UpperCAmelCase_\t, return_token_type_ids=UpperCAmelCase_\t, return_attention_mask=UpperCAmelCase_\t, return_overflowing_tokens=UpperCAmelCase_\t, return_special_tokens_mask=UpperCAmelCase_\t, return_offsets_mapping=UpperCAmelCase_\t, return_length=UpperCAmelCase_\t, verbose=UpperCAmelCase_\t, return_tensors=UpperCAmelCase_\t, **UpperCAmelCase_\t, )\r\n\t\t\t\t\t\t# add pixel_values + pixel_mask\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.image_processor(\r\n\t\t\t\t\t\t UpperCAmelCase_\t, return_tensors=UpperCAmelCase_\t, do_normalize=UpperCAmelCase_\t, do_center_crop=UpperCAmelCase_\t, **UpperCAmelCase_\t)\r\n\t\t\t\t\t\tencoding.update(UpperCAmelCase_\t)\r\n\r\n\t\t\t\t\t\treturn encoding\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, *__A\t, **__A\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\treturn self.tokenizer.batch_decode(*UpperCAmelCase_\t, **UpperCAmelCase_\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, *__A\t, **__A\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\treturn self.tokenizer.decode(*UpperCAmelCase_\t, **UpperCAmelCase_\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.tokenizer.model_input_names\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.image_processor.model_input_names\r\n\t\t\t\t\t\treturn list(dict.fromkeys(tokenizer_input_names + image_processor_input_names\t)\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":364,"string":"364"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: int = 5_0\t\t) -> int:\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= [1] * (length + 1)\r\n\r\n\t\t\tfor row_length in range(3\t\t\t\t,\t\t\t\t\tlength + 1\t\t):\r\n\t\t\t\t\t\tfor block_length in range(3\t\t\t\t,\t\t\t\t\trow_length + 1\t\t):\r\n\t\t\t\t\t\t\t\t\tfor block_start in range(row_length - block_length\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tways_number[row_length] += ways_number[\r\n\t\t\t\t\t\t\t\t\t\t\t\t row_length - block_start - block_length - 1\r\n\t\t\t\t\t\t\t\t\t\t\t\t]\r\n\r\n\t\t\t\t\t\t\t\t\tways_number[row_length] += 1\r\n\r\n\t\t\treturn ways_number[length]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tprint(F\"\"\"{solution() = }\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":697,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport os\r\nimport sys\r\nfrom contextlib import contextmanager\r\n\r\n\r\n# Windows only\r\nif os.name == \"nt\":\r\n import ctypes\r\n import msvcrt # noqa\r\n\r\n\r\n\r\n\r\n class _SCREAMING_SNAKE_CASE ( ctypes.Structure\t\t\t\t\t\t\t):\r\n UpperCAmelCase_ :List[Any] \t\t\t=\t\t\t\t\t\t[(\"\"\"size\"\"\", ctypes.c_int), (\"\"\"visible\"\"\", ctypes.c_byte)]\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> str:\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n if os.name == \"nt\":\r\n lowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= CursorInfo()\r\n lowerCAmelCase_ :Dict \t\t\t\t\t= ctypes.windll.kernelaa.GetStdHandle(-1_1\t\t)\r\n ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE_\t\t\t\t,\t\t\t\t\tctypes.byref(SCREAMING_SNAKE_CASE_\t\t)\t\t)\r\n lowerCAmelCase_ :List[Any] \t\t\t\t\t= False\r\n ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE_\t\t\t\t,\t\t\t\t\tctypes.byref(SCREAMING_SNAKE_CASE_\t\t)\t\t)\r\n elif os.name == \"posix\":\r\n sys.stdout.write(\"\"\"\\033[?25l\"\"\"\t\t)\r\n sys.stdout.flush()\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> Optional[int]:\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n if os.name == \"nt\":\r\n lowerCAmelCase_ :Dict \t\t\t\t\t= CursorInfo()\r\n lowerCAmelCase_ :Optional[Any] \t\t\t\t\t= ctypes.windll.kernelaa.GetStdHandle(-1_1\t\t)\r\n ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE_\t\t\t\t,\t\t\t\t\tctypes.byref(SCREAMING_SNAKE_CASE_\t\t)\t\t)\r\n lowerCAmelCase_ :Tuple \t\t\t\t\t= True\r\n ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE_\t\t\t\t,\t\t\t\t\tctypes.byref(SCREAMING_SNAKE_CASE_\t\t)\t\t)\r\n elif os.name == \"posix\":\r\n sys.stdout.write(\"\"\"\\033[?25h\"\"\"\t\t)\r\n sys.stdout.flush()\r\n\r\n\r\n\r\n\r\n\r\n\r\n@contextmanager\r\ndef \t\t\t\t\t\t_snake_case\t( ) -> str:\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n try:\r\n hide_cursor()\r\n yield\r\n finally:\r\n show_cursor()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":365,"string":"365"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/\r\n\r\nimport gc\r\nimport random\r\nimport tempfile\r\nimport unittest\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom PIL import Image\r\nfrom transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer\r\n\r\nfrom diffusers import (\r\n AutoencoderKL,\r\n ControlNetModel,\r\n DDIMScheduler,\r\n StableDiffusionControlNetImgaImgPipeline,\r\n UNetaDConditionModel,\r\n)\r\nfrom diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel\r\nfrom diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device\r\nfrom diffusers.utils.import_utils import is_xformers_available\r\nfrom diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu\r\n\r\nfrom ..pipeline_params import (\r\n IMAGE_TO_IMAGE_IMAGE_PARAMS,\r\n TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,\r\n TEXT_GUIDED_IMAGE_VARIATION_PARAMS,\r\n)\r\nfrom ..test_pipelines_common import (\r\n PipelineKarrasSchedulerTesterMixin,\r\n PipelineLatentTesterMixin,\r\n PipelineTesterMixin,\r\n)\r\n\r\n\r\nenable_full_determinism()\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tA__ ,\t\t\tA__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tStableDiffusionControlNetImgaImgPipeline\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_PARAMS - {\"height\", \"width\"}\r\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\tIMAGE_TO_IMAGE_IMAGE_PARAMS.union({\"control_image\"}\t\t\t\t\t\t\t)\r\n\t\t\tUpperCAmelCase_ :Optional[Any] \t\t\t=\t\t\t\t\t\tIMAGE_TO_IMAGE_IMAGE_PARAMS\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= UNetaDConditionModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, sample_size=32\t, in_channels=4\t, out_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, up_block_types=(\"\"\"CrossAttnUpBlock2D\"\"\", \"\"\"UpBlock2D\"\"\")\t, cross_attention_dim=32\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= ControlNetModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, in_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, cross_attention_dim=32\t, conditioning_embedding_out_channels=(16, 32)\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= DDIMScheduler(\r\n\t\t\t\t\t\t beta_start=0.0_0_0_8_5\t, beta_end=0.0_1_2\t, beta_schedule=\"\"\"scaled_linear\"\"\"\t, clip_sample=__A\t, set_alpha_to_one=__A\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= AutoencoderKL(\r\n\t\t\t\t\t\t block_out_channels=[32, 64]\t, in_channels=3\t, out_channels=3\t, down_block_types=[\"\"\"DownEncoderBlock2D\"\"\", \"\"\"DownEncoderBlock2D\"\"\"]\t, up_block_types=[\"\"\"UpDecoderBlock2D\"\"\", \"\"\"UpDecoderBlock2D\"\"\"]\t, latent_channels=4\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= CLIPTextConfig(\r\n\t\t\t\t\t\t bos_token_id=0\t, eos_token_id=2\t, hidden_size=32\t, intermediate_size=37\t, layer_norm_eps=1E-05\t, num_attention_heads=4\t, num_hidden_layers=5\t, pad_token_id=1\t, vocab_size=1000\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= CLIPTextModel(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= CLIPTokenizer.from_pretrained(\"\"\"hf-internal-testing/tiny-random-clip\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"unet\"\"\": unet,\r\n\t\t\t\t\t\t \"\"\"controlnet\"\"\": controlnet,\r\n\t\t\t\t\t\t \"\"\"scheduler\"\"\": scheduler,\r\n\t\t\t\t\t\t \"\"\"vae\"\"\": vae,\r\n\t\t\t\t\t\t \"\"\"text_encoder\"\"\": text_encoder,\r\n\t\t\t\t\t\t \"\"\"tokenizer\"\"\": tokenizer,\r\n\t\t\t\t\t\t \"\"\"safety_checker\"\"\": None,\r\n\t\t\t\t\t\t \"\"\"feature_extractor\"\"\": None,\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\treturn components\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=0\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tif str(__A\t).startswith(\"\"\"mps\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.manual_seed(__A\t)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= torch.Generator(device=__A\t).manual_seed(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= 2\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= randn_tensor(\r\n\t\t\t\t\t\t (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor)\t, generator=__A\t, device=torch.device(__A\t)\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= floats_tensor(control_image.shape\t, rng=random.Random(__A\t)\t).to(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= image.cpu().permute(0\t, 2\t, 3\t, 1\t)[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= Image.fromarray(np.uinta(__A\t)\t).convert(\"\"\"RGB\"\"\"\t).resize((64, 64)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"prompt\"\"\": \"\"\"A painting of a squirrel eating a burger\"\"\",\r\n\t\t\t\t\t\t \"\"\"generator\"\"\": generator,\r\n\t\t\t\t\t\t \"\"\"num_inference_steps\"\"\": 2,\r\n\t\t\t\t\t\t \"\"\"guidance_scale\"\"\": 6.0,\r\n\t\t\t\t\t\t \"\"\"output_type\"\"\": \"\"\"numpy\"\"\",\r\n\t\t\t\t\t\t \"\"\"image\"\"\": image,\r\n\t\t\t\t\t\t \"\"\"control_image\"\"\": control_image,\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\treturn inputs\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn self._test_attention_slicing_forward_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@unittest.skipIf(\r\n\t\t\t torch_device != \"\"\"cuda\"\"\" or not is_xformers_available()\t, reason=\"\"\"XFormers attention is only available with CUDA and `xformers` installed\"\"\"\t, )\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tself._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tself._test_inference_batch_single_identical(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tA__ ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\tStableDiffusionControlNetImgaImgPipeline\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_PARAMS - {\"height\", \"width\"}\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\tTEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS\r\n\t\t\tUpperCAmelCase_ :int \t\t\t=\t\t\t\t\t\tfrozenset([]\t\t\t\t\t\t\t) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= UNetaDConditionModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, sample_size=32\t, in_channels=4\t, out_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, up_block_types=(\"\"\"CrossAttnUpBlock2D\"\"\", \"\"\"UpBlock2D\"\"\")\t, cross_attention_dim=32\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\r\n\t\t\t\t\t\tdef init_weights(__A\t):\r\n\t\t\t\t\t\t\t\t\tif isinstance(__A\t, torch.nn.Convad\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\ttorch.nn.init.normal(m.weight\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tm.bias.data.fill_(1.0\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= ControlNetModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, in_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, cross_attention_dim=32\t, conditioning_embedding_out_channels=(16, 32)\t, )\r\n\t\t\t\t\t\tcontrolneta.controlnet_down_blocks.apply(__A\t)\r\n\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= ControlNetModel(\r\n\t\t\t\t\t\t block_out_channels=(32, 64)\t, layers_per_block=2\t, in_channels=4\t, down_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t, cross_attention_dim=32\t, conditioning_embedding_out_channels=(16, 32)\t, )\r\n\t\t\t\t\t\tcontrolneta.controlnet_down_blocks.apply(__A\t)\r\n\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= DDIMScheduler(\r\n\t\t\t\t\t\t beta_start=0.0_0_0_8_5\t, beta_end=0.0_1_2\t, beta_schedule=\"\"\"scaled_linear\"\"\"\t, clip_sample=__A\t, set_alpha_to_one=__A\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= AutoencoderKL(\r\n\t\t\t\t\t\t block_out_channels=[32, 64]\t, in_channels=3\t, out_channels=3\t, down_block_types=[\"\"\"DownEncoderBlock2D\"\"\", \"\"\"DownEncoderBlock2D\"\"\"]\t, up_block_types=[\"\"\"UpDecoderBlock2D\"\"\", \"\"\"UpDecoderBlock2D\"\"\"]\t, latent_channels=4\t, )\r\n\t\t\t\t\t\ttorch.manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= CLIPTextConfig(\r\n\t\t\t\t\t\t bos_token_id=0\t, eos_token_id=2\t, hidden_size=32\t, intermediate_size=37\t, layer_norm_eps=1E-05\t, num_attention_heads=4\t, num_hidden_layers=5\t, pad_token_id=1\t, vocab_size=1000\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= CLIPTextModel(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= CLIPTokenizer.from_pretrained(\"\"\"hf-internal-testing/tiny-random-clip\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= MultiControlNetModel([controlneta, controlneta]\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"unet\"\"\": unet,\r\n\t\t\t\t\t\t \"\"\"controlnet\"\"\": controlnet,\r\n\t\t\t\t\t\t \"\"\"scheduler\"\"\": scheduler,\r\n\t\t\t\t\t\t \"\"\"vae\"\"\": vae,\r\n\t\t\t\t\t\t \"\"\"text_encoder\"\"\": text_encoder,\r\n\t\t\t\t\t\t \"\"\"tokenizer\"\"\": tokenizer,\r\n\t\t\t\t\t\t \"\"\"safety_checker\"\"\": None,\r\n\t\t\t\t\t\t \"\"\"feature_extractor\"\"\": None,\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\treturn components\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A=0\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tif str(__A\t).startswith(\"\"\"mps\"\"\"\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= torch.manual_seed(__A\t)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.Generator(device=__A\t).manual_seed(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= 2\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= [\r\n\t\t\t\t\t\t randn_tensor(\r\n\t\t\t\t\t\t (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor)\t, generator=__A\t, device=torch.device(__A\t)\t, ),\r\n\t\t\t\t\t\t randn_tensor(\r\n\t\t\t\t\t\t (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor)\t, generator=__A\t, device=torch.device(__A\t)\t, ),\r\n\t\t\t\t\t\t]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= floats_tensor(control_image[0].shape\t, rng=random.Random(__A\t)\t).to(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= image.cpu().permute(0\t, 2\t, 3\t, 1\t)[0]\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= Image.fromarray(np.uinta(__A\t)\t).convert(\"\"\"RGB\"\"\"\t).resize((64, 64)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= {\r\n\t\t\t\t\t\t \"\"\"prompt\"\"\": \"\"\"A painting of a squirrel eating a burger\"\"\",\r\n\t\t\t\t\t\t \"\"\"generator\"\"\": generator,\r\n\t\t\t\t\t\t \"\"\"num_inference_steps\"\"\": 2,\r\n\t\t\t\t\t\t \"\"\"guidance_scale\"\"\": 6.0,\r\n\t\t\t\t\t\t \"\"\"output_type\"\"\": \"\"\"numpy\"\"\",\r\n\t\t\t\t\t\t \"\"\"image\"\"\": image,\r\n\t\t\t\t\t\t \"\"\"control_image\"\"\": control_image,\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\treturn inputs\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.get_dummy_components()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.pipeline_class(**__A\t)\r\n\t\t\t\t\t\tpipe.to(__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 1_0.0\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= 4\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= pipe(**__A\t)[0]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= pipe(**__A\t, control_guidance_start=0.1\t, control_guidance_end=0.2\t)[0]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= pipe(**__A\t, control_guidance_start=[0.1, 0.3]\t, control_guidance_end=[0.2, 0.7]\t)[0]\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= self.get_dummy_inputs(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= steps\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= scale\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= pipe(**__A\t, control_guidance_start=0.4\t, control_guidance_end=[0.5, 0.8]\t)[0]\r\n\r\n\t\t\t\t\t\t# make sure that all outputs are different\r\n\t\t\t\t\t\tassert np.sum(np.abs(output_a - output_a\t)\t) > 1E-3\r\n\t\t\t\t\t\tassert np.sum(np.abs(output_a - output_a\t)\t) > 1E-3\r\n\t\t\t\t\t\tassert np.sum(np.abs(output_a - output_a\t)\t) > 1E-3\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\n\t\t\t\t\t\treturn self._test_attention_slicing_forward_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@unittest.skipIf(\r\n\t\t\t torch_device != \"\"\"cuda\"\"\" or not is_xformers_available()\t, reason=\"\"\"XFormers attention is only available with CUDA and `xformers` installed\"\"\"\t, )\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tTuple:\r\n\t\t\t\t\t\tself._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tself._test_inference_batch_single_identical(expected_max_diff=2E-3\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.get_dummy_components()\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.pipeline_class(**__A\t)\r\n\t\t\t\t\t\tpipe.to(__A\t)\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__A\t)\r\n\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdir:\r\n\t\t\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\t\t\t# save_pretrained is not implemented for Multi-ControlNet\r\n\t\t\t\t\t\t\t\t\t\t\t\tpipe.save_pretrained(__A\t)\r\n\t\t\t\t\t\t\t\t\texcept NotImplementedError:\r\n\t\t\t\t\t\t\t\t\t\t\t\tpass\r\n\r\n\r\n\r\n\r\n@slow\r\n@require_torch_gpu\r\nclass _SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\tsuper().tearDown()\r\n\t\t\t\t\t\tgc.collect()\r\n\t\t\t\t\t\ttorch.cuda.empty_cache()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tstr:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= ControlNetModel.from_pretrained(\"\"\"lllyasviel/sd-controlnet-canny\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= StableDiffusionControlNetImgaImgPipeline.from_pretrained(\r\n\t\t\t\t\t\t \"\"\"runwayml/stable-diffusion-v1-5\"\"\"\t, safety_checker=__A\t, controlnet=__A\t)\r\n\t\t\t\t\t\tpipe.enable_model_cpu_offload()\r\n\t\t\t\t\t\tpipe.set_progress_bar_config(disable=__A\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= torch.Generator(device=\"\"\"cpu\"\"\"\t).manual_seed(0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= \"\"\"evil space-punk bird\"\"\"\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= load_image(\r\n\t\t\t\t\t\t \"\"\"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png\"\"\"\t).resize((512, 512)\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= load_image(\r\n\t\t\t\t\t\t \"\"\"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png\"\"\"\t).resize((512, 512)\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= pipe(\r\n\t\t\t\t\t\t __A\t, __A\t, control_image=__A\t, generator=__A\t, output_type=\"\"\"np\"\"\"\t, num_inference_steps=50\t, strength=0.6\t, )\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= output.images[0]\r\n\r\n\t\t\t\t\t\tassert image.shape == (512, 512, 3)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= load_numpy(\r\n\t\t\t\t\t\t \"\"\"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy\"\"\"\t)\r\n\r\n\t\t\t\t\t\tassert np.abs(expected_image - image\t).max() < 9E-2\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":698,"cells":{"code":{"kind":"string","value":"\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\rimport re\rfrom pathlib import Path\rfrom unittest import TestCase\r\rimport pytest\r\r\r\r\r@pytest.mark.integration\rclass _SCREAMING_SNAKE_CASE ( UpperCamelCase__\t\t\t\t\t\t\t):\r\r\r\r\r\r\r\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tList[str]:\r\r\t\t\t\t\t\twith open(__lowerCamelCase\t, encoding=\"\"\"utf-8\"\"\"\t) as input_file:\r\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= re.compile(r\"\"\"(?!.*\\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\\b)(?<=\\s)(open)\\((.*)\\)\"\"\"\t)\r\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= input_file.read()\r\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= regexp.search(__lowerCamelCase\t)\r\r\t\t\t\t\t\treturn match\r\r\r\r\r\r\r\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tstr:\r\r\t\t\t\t\t\twith open(__lowerCamelCase\t, encoding=\"\"\"utf-8\"\"\"\t) as input_file:\r\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= re.compile(r\"\"\"#[^\\r\\n]*print\\(|\\\"[^\\r\\n]*print\\(|\\\"\\\"\\\".*?print\\(.*?\\\"\\\"\\\"|(print\\()\"\"\"\t, re.DOTALL\t)\r\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= input_file.read()\r\t\t\t\t\t\t\t\t\t# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`\r\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= regexp.finditer(__lowerCamelCase\t)\r\r\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= [match for match in matches if match is not None and match.group(1\t) is not None]\r\t\t\t\t\t\treturn matches[0] if matches else None\r\r\r\r\r\r\r\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tUnion[str, Any]:\r\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= Path(\"\"\"./datasets\"\"\"\t)\r\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= list(dataset_paths.absolute().glob(\"\"\"**/*.py\"\"\"\t)\t)\r\r\t\t\t\t\t\tfor dataset in dataset_files:\r\t\t\t\t\t\t\t\t\tif self._no_encoding_on_file_open(str(__lowerCamelCase\t)\t):\r\t\t\t\t\t\t\t\t\t\t\t\traise AssertionError(f\"\"\"open(...) must use utf-8 encoding in {dataset}\"\"\"\t)\r\r\r\r\r\r\r\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict:\r\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= Path(\"\"\"./datasets\"\"\"\t)\r\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= list(dataset_paths.absolute().glob(\"\"\"**/*.py\"\"\"\t)\t)\r\r\t\t\t\t\t\tfor dataset in dataset_files:\r\t\t\t\t\t\t\t\t\tif self._no_print_statements(str(__lowerCamelCase\t)\t):\r\t\t\t\t\t\t\t\t\t\t\t\traise AssertionError(f\"\"\"print statement found in {dataset}. Use datasets.logger/logging instead.\"\"\"\t)\r\r\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":366,"string":"366"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import Optional\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom torch import nn\r\nfrom transformers import GPTaConfig, GPTaLMHeadModel\r\nfrom transformers.modeling_utils import ModuleUtilsMixin\r\n\r\nfrom ...configuration_utils import ConfigMixin, register_to_config\r\nfrom ...models import ModelMixin\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__ ,\t\t\tA__ ,\t\t\tA__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[str] \t\t\t=\t\t\t\t\t\t[r\"h\\.\\d+\\.attn\\.bias\", r\"h\\.\\d+\\.attn\\.masked_bias\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@register_to_config\r\n\t\t\tdef __init__( self\t, __A\t, __A\t, __A = None\t, __A = 5_0257\t, __A = 1024\t, __A = 768\t, __A = 12\t, __A = 12\t, __A = None\t, __A = \"gelu_new\"\t, __A = 0.1\t, __A = 0.1\t, __A = 0.1\t, __A = 1E-5\t, __A = 0.0_2\t, __A = True\t, __A = True\t, __A = False\t, __A = False\t, ) ->\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\tsuper().__init__()\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= prefix_length\r\n\r\n\t\t\t\t\t\tif prefix_inner_dim != n_embd and prefix_hidden_dim is None:\r\n\t\t\t\t\t\t\t\t\traise ValueError(\r\n\t\t\t\t\t\t\t\t\t f\"\"\"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and\"\"\"\r\n\t\t\t\t\t\t\t\t\t f\"\"\" `n_embd`: {n_embd} are not equal.\"\"\"\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= prefix_inner_dim\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= prefix_hidden_dim\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= (\r\n\t\t\t\t\t\t nn.Linear(self.prefix_inner_dim\t, self.prefix_hidden_dim\t)\r\n\t\t\t\t\t\t if self.prefix_hidden_dim is not None\r\n\t\t\t\t\t\t else nn.Identity()\r\n\t\t\t\t\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= (\r\n\t\t\t\t\t\t nn.Linear(self.prefix_hidden_dim\t, __A\t) if self.prefix_hidden_dim is not None else nn.Identity()\r\n\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= GPTaConfig(\r\n\t\t\t\t\t\t vocab_size=__A\t, n_positions=__A\t, n_embd=__A\t, n_layer=__A\t, n_head=__A\t, n_inner=__A\t, activation_function=__A\t, resid_pdrop=__A\t, embd_pdrop=__A\t, attn_pdrop=__A\t, layer_norm_epsilon=__A\t, initializer_range=__A\t, scale_attn_weights=__A\t, use_cache=__A\t, scale_attn_by_inverse_layer_idx=__A\t, reorder_and_upcast_attn=__A\t, )\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= GPTaLMHeadModel(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A = None\t, __A = None\t, ) ->\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.transformer.transformer.wte(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= self.encode_prefix(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.decode_prefix(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= torch.cat((prefix_embeds, embedding_text)\t, dim=1\t)\r\n\r\n\t\t\t\t\t\tif labels is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= self.get_dummy_token(input_ids.shape[0]\t, input_ids.device\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= torch.cat((dummy_token, input_ids)\t, dim=1\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.transformer(inputs_embeds=__A\t, labels=__A\t, attention_mask=__A\t)\r\n\t\t\t\t\t\tif self.prefix_hidden_dim is not None:\r\n\t\t\t\t\t\t\t\t\treturn out, hidden\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\treturn out\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t) ->\t\t\t\t\ttorch.Tensor:\r\n\t\t\t\t\t\treturn torch.zeros(__A\t, self.prefix_length\t, dtype=torch.intaa\t, device=__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\treturn self.encode_prefix(__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@torch.no_grad()\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A\t, __A\t, __A\t) ->\t\t\t\t\tOptional[int]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.split(__A\t, 1\t, dim=0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= []\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= []\r\n\t\t\t\t\t\tfor feature in features:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= self.decode_prefix(feature.to(__A\t)\t) # back to the clip feature\r\n\t\t\t\t\t\t\t\t\t# Only support beam search for now\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= self.generate_beam(\r\n\t\t\t\t\t\t\t\t\t input_embeds=__A\t, device=__A\t, eos_token_id=__A\t)\r\n\t\t\t\t\t\t\t\t\tgenerated_tokens.append(output_tokens[0]\t)\r\n\t\t\t\t\t\t\t\t\tgenerated_seq_lengths.append(seq_lengths[0]\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.stack(__A\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= torch.stack(__A\t)\r\n\t\t\t\t\t\treturn generated_tokens, generated_seq_lengths\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@torch.no_grad()\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t, __A=None\t, __A=None\t, __A=None\t, __A = 5\t, __A = 67\t, __A = 1.0\t, __A = None\t, ) ->\t\t\t\t\tUnion[str, Any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= eos_token_id\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= None\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= None\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= torch.ones(__A\t, device=__A\t, dtype=torch.int\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= torch.zeros(__A\t, device=__A\t, dtype=torch.bool\t)\r\n\r\n\t\t\t\t\t\tif input_embeds is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= input_embeds\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= self.transformer.transformer.wte(__A\t)\r\n\r\n\t\t\t\t\t\tfor i in range(__A\t):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= self.transformer(inputs_embeds=__A\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= outputs.logits\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= logits[:, -1, :] / (temperature if temperature > 0 else 1.0)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= logits.softmax(-1\t).log()\r\n\r\n\t\t\t\t\t\t\t\t\tif scores is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Any \t\t\t\t\t= logits.topk(__A\t, -1\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= generated.expand(__A\t, *generated.shape[1:]\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :List[str] \t\t\t\t\t= next_tokens.permute(1\t, 0\t), scores.squeeze(0\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tif tokens is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= next_tokens\r\n\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= tokens.expand(__A\t, *tokens.shape[1:]\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= torch.cat((tokens, next_tokens)\t, dim=1\t)\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= -float(np.inf\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= 0\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= scores[:, None] + logits\r\n\t\t\t\t\t\t\t\t\t\t\t\tseq_lengths[~is_stopped] += 1\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= scores_sum / seq_lengths[:, None]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Tuple \t\t\t\t\t= scores_sum_average.view(-1\t).topk(__A\t, -1\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= next_tokens // scores_sum.shape[1]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= seq_lengths[next_tokens_source]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= next_tokens % scores_sum.shape[1]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= next_tokens.unsqueeze(1\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= tokens[next_tokens_source]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.cat((tokens, next_tokens)\t, dim=1\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= generated[next_tokens_source]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= scores_sum_average * seq_lengths\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= is_stopped[next_tokens_source]\r\n\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.transformer.transformer.wte(next_tokens.squeeze()\t).view(generated.shape[0]\t, 1\t, -1\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= torch.cat((generated, next_token_embed)\t, dim=1\t)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= is_stopped + next_tokens.eq(__A\t).squeeze()\r\n\t\t\t\t\t\t\t\t\tif is_stopped.all():\r\n\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= scores / seq_lengths\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= scores.argsort(descending=__A\t)\r\n\t\t\t\t\t\t# tokens tensors are already padded to max_seq_length\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= [tokens[i] for i in order]\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= torch.stack(__A\t, dim=0\t)\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= torch.tensor([seq_lengths[i] for i in order]\t, dtype=seq_lengths.dtype\t)\r\n\t\t\t\t\t\treturn output_texts, seq_lengths\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":699,"cells":{"code":{"kind":"string","value":"\n\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: List[Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Dict\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Optional[int]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Tuple\t\t) -> Union[str, Any]:\n\n\n\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= [False] * len(__lowerCamelCase\t\t)\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= []\n\t\t\tqueue.append(__lowerCamelCase\t\t)\n\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= True\n\n\t\t\twhile queue:\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= queue.pop(0\t\t)\n\t\t\t\t\t\tfor ind in range(len(graph[u]\t\t)\t\t):\n\t\t\t\t\t\t\t\t\tif visited[ind] is False and graph[u][ind] > 0:\n\t\t\t\t\t\t\t\t\t\t\t\tqueue.append(__lowerCamelCase\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= True\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= u\n\n\t\t\treturn visited[t]\n\n\n\n\n\n\ndef \t\t\t\t\t\t_snake_case\t( lowercase__\t\t: List[str]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: List[Any]\t\t\t\t,\t\t\t\t\tlowercase__\t\t: Dict\t\t) -> Dict:\n\n\n\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= [-1] * (len(__lowerCamelCase\t\t))\n\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= 0\n\t\t\twhile bfs(__lowerCamelCase\t\t\t\t,\t\t\t\t\t__lowerCamelCase\t\t\t\t,\t\t\t\t\t__lowerCamelCase\t\t\t\t,\t\t\t\t\t__lowerCamelCase\t\t):\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= float(\"\"\"Inf\"\"\"\t\t)\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= sink\n\n\t\t\t\t\t\twhile s != source:\n\t\t\t\t\t\t\t\t\t# Find the minimum value in select path\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= min(__lowerCamelCase\t\t\t\t,\t\t\t\t\tgraph[parent[s]][s]\t\t)\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= parent[s]\n\n\t\t\t\t\t\tmax_flow += path_flow\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= sink\n\n\t\t\t\t\t\twhile v != source:\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= parent[v]\n\t\t\t\t\t\t\t\t\tgraph[u][v] -= path_flow\n\t\t\t\t\t\t\t\t\tgraph[v][u] += path_flow\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= parent[v]\n\t\t\treturn max_flow\n\n\n__UpperCAmelCase\t\t\t\t\t\t\t= [\n [0, 16, 13, 0, 0, 0],\n [0, 0, 10, 12, 0, 0],\n [0, 4, 0, 0, 14, 0],\n [0, 0, 9, 0, 0, 20],\n [0, 0, 0, 7, 0, 4],\n [0, 0, 0, 0, 0, 0],\n]\n\n__UpperCAmelCase\t\t\t,\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= 0, 5\n\n\n\n\nprint(ford_fulkerson(graph, source, sink))\n\n\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":367,"string":"367"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport copy\r\nfrom collections import OrderedDict\r\nfrom typing import Dict, Mapping\r\n\r\nfrom packaging import version\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...onnx import OnnxConfig\r\nfrom ...utils import logging\r\nfrom ..auto import CONFIG_MAPPING\r\n\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n__UpperCAmelCase\t\t\t\t\t\t\t= {\r\n 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',\r\n # See all DETR models at https://huggingface.co/models?filter=detr\r\n}\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\t\"detr\"\r\n\t\t\tUpperCAmelCase_ :str \t\t\t=\t\t\t\t\t\t[\"past_key_values\"]\r\n\t\t\tUpperCAmelCase_ :Tuple \t\t\t=\t\t\t\t\t\t{\r\n\t\t\t \"hidden_size\": \"d_model\",\r\n\t\t\t \"num_attention_heads\": \"encoder_attention_heads\",\r\n\t\t\t}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef __init__( self\t, __A=True\t, __A=None\t, __A=3\t, __A=100\t, __A=6\t, __A=2048\t, __A=8\t, __A=6\t, __A=2048\t, __A=8\t, __A=0.0\t, __A=0.0\t, __A=True\t, __A=\"relu\"\t, __A=256\t, __A=0.1\t, __A=0.0\t, __A=0.0\t, __A=0.0_2\t, __A=1.0\t, __A=False\t, __A=\"sine\"\t, __A=\"resnet50\"\t, __A=True\t, __A=False\t, __A=1\t, __A=5\t, __A=2\t, __A=1\t, __A=1\t, __A=5\t, __A=2\t, __A=0.1\t, **__A\t, ) ->\t\t\t\t\tList[Any]:\r\n\t\t\t\t\t\tif backbone_config is not None and use_timm_backbone:\r\n\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"You can't specify both `backbone_config` and `use_timm_backbone`.\"\"\"\t)\r\n\r\n\t\t\t\t\t\tif not use_timm_backbone:\r\n\t\t\t\t\t\t\t\t\tif backbone_config is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlogger.info(\"\"\"`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= CONFIG_MAPPING[\"\"\"resnet\"\"\"](out_features=[\"\"\"stage4\"\"\"]\t)\r\n\t\t\t\t\t\t\t\t\telif isinstance(__A\t, __A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= backbone_config.get(\"\"\"model_type\"\"\"\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= CONFIG_MAPPING[backbone_model_type]\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= config_class.from_dict(__A\t)\r\n\t\t\t\t\t\t\t\t\t# set timm attributes to None\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= None, None, None\r\n\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= use_timm_backbone\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= backbone_config\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= num_channels\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= num_queries\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= d_model\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= encoder_ffn_dim\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= encoder_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= encoder_attention_heads\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[Any] \t\t\t\t\t= decoder_ffn_dim\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= decoder_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= decoder_attention_heads\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= dropout\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= attention_dropout\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= activation_dropout\r\n\t\t\t\t\t\tlowerCAmelCase_ :Any \t\t\t\t\t= activation_function\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= init_std\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= init_xavier_std\r\n\t\t\t\t\t\tlowerCAmelCase_ :int \t\t\t\t\t= encoder_layerdrop\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= decoder_layerdrop\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= encoder_layers\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= auxiliary_loss\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= position_embedding_type\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= backbone\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= use_pretrained_backbone\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= dilation\r\n\t\t\t\t\t\t# Hungarian matcher\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[Any] \t\t\t\t\t= class_cost\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= bbox_cost\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= giou_cost\r\n\t\t\t\t\t\t# Loss coefficients\r\n\t\t\t\t\t\tlowerCAmelCase_ :Optional[int] \t\t\t\t\t= mask_loss_coefficient\r\n\t\t\t\t\t\tlowerCAmelCase_ :Union[str, Any] \t\t\t\t\t= dice_loss_coefficient\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= bbox_loss_coefficient\r\n\t\t\t\t\t\tlowerCAmelCase_ :Tuple \t\t\t\t\t= giou_loss_coefficient\r\n\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= eos_coefficient\r\n\t\t\t\t\t\tsuper().__init__(is_encoder_decoder=__A\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn self.encoder_attention_heads\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn self.d_model\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@classmethod\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( cls\t, __A\t, **__A\t) ->\t\t\t\t\tAny:\r\n\t\t\t\t\t\treturn cls(backbone_config=__A\t, **__A\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tDict[str, any]:\r\n\t\t\t\t\t\tlowerCAmelCase_ :List[str] \t\t\t\t\t= copy.deepcopy(self.__dict__\t)\r\n\t\t\t\t\t\tif output[\"backbone_config\"] is not None:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_ :Dict \t\t\t\t\t= self.backbone_config.to_dict()\r\n\t\t\t\t\t\tlowerCAmelCase_ :str \t\t\t\t\t= self.__class__.model_type\r\n\t\t\t\t\t\treturn output\r\n\r\n\r\n\r\n\r\nclass _SCREAMING_SNAKE_CASE ( A__\t\t\t\t\t\t\t):\r\n\t\t\tUpperCAmelCase_ :List[Any] \t\t\t=\t\t\t\t\t\tversion.parse(\"1.11\"\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tMapping[str, Mapping[int, str]]:\r\n\t\t\t\t\t\treturn OrderedDict(\r\n\t\t\t\t\t\t [\r\n\t\t\t\t\t\t (\"\"\"pixel_values\"\"\", {0: \"\"\"batch\"\"\", 1: \"\"\"num_channels\"\"\", 2: \"\"\"height\"\"\", 3: \"\"\"width\"\"\"}),\r\n\t\t\t\t\t\t (\"\"\"pixel_mask\"\"\", {0: \"\"\"batch\"\"\"}),\r\n\t\t\t\t\t\t ]\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tfloat:\r\n\t\t\t\t\t\treturn 1E-5\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t@property\r\n\t\t\tdef \t\t\t__lowerCAmelCase\t\t\t( self\t) ->\t\t\t\t\tint:\r\n\t\t\t\t\t\treturn 12\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":1,"string":"1"},"label":{"kind":"number","value":0,"string":"0"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":6,"numItemsPerPage":100,"numTotalItems":153991,"offset":600,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NTI3OTUyNiwic3ViIjoiL2RhdGFzZXRzL2luZmluaXR5b2ZzcGFjZS9weXRob25fY29kZXN0eWxlcy1zaW5nbGUtNTAwIiwiZXhwIjoxNzU1MjgzMTI2LCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0.L5q0OzgMfs84muYU-9GUWbJOHqHtSU_4rtezaHbJ-lNAm8wmRv4cF8Z15xx2dXX1pY7TxwuA8F3SNpeax64MAg","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :int = LongformerTokenizer UpperCAmelCase_ :Union[str, Any] = True UpperCAmelCase_ :Optional[int] = LongformerTokenizerFast UpperCAmelCase_ :Tuple = True def __lowerCAmelCase ( self ) -> int: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase_ :int = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] lowerCAmelCase_ :Union[str, Any] = dict(zip(__A , range(len(__A ) ) ) ) lowerCAmelCase_ :Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] lowerCAmelCase_ :Optional[Any] = {"""unk_token""": """<unk>"""} lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase_ :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__A ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__A ) ) def __lowerCAmelCase ( self , **__A ) -> Any: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__A ) def __lowerCAmelCase ( self , **__A ) -> Optional[int]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__A ) def __lowerCAmelCase ( self , __A ) -> List[str]: lowerCAmelCase_ :List[str] = """lower newer""" lowerCAmelCase_ :List[str] = """lower newer""" return input_text, output_text def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowerCAmelCase_ :Dict = """lower newer""" lowerCAmelCase_ :int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] lowerCAmelCase_ :Tuple = tokenizer.tokenize(__A ) # , add_prefix_space=True) self.assertListEqual(__A , __A ) lowerCAmelCase_ :int = tokens + [tokenizer.unk_token] lowerCAmelCase_ :Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Union[str, Any] = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__A ) , [0, 3_1414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cรฉcรฉ herlolip 418""" , add_special_tokens=__A ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , ) @slow def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :List[Any] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" ) lowerCAmelCase_ :List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__A ) lowerCAmelCase_ :List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A ) lowerCAmelCase_ :List[Any] = tokenizer.encode( """sequence builders""" , add_special_tokens=__A , add_prefix_space=__A ) lowerCAmelCase_ :Dict = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=__A , add_prefix_space=__A ) lowerCAmelCase_ :List[Any] = tokenizer.build_inputs_with_special_tokens(__A ) lowerCAmelCase_ :Dict = tokenizer.build_inputs_with_special_tokens(__A , __A ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Any = self.get_tokenizer() lowerCAmelCase_ :Any = """Encode this sequence.""" lowerCAmelCase_ :Any = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments lowerCAmelCase_ :List[Any] = tokenizer.encode(__A , add_special_tokens=__A , add_prefix_space=__A ) lowerCAmelCase_ :Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(__A , __A ) lowerCAmelCase_ :Dict = tokenizer.encode(__A , add_special_tokens=__A , add_prefix_space=__A ) lowerCAmelCase_ :str = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(__A , __A ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) lowerCAmelCase_ :List[Any] = tokenizer.encode(__A , add_special_tokens=__A ) lowerCAmelCase_ :Optional[int] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(__A , __A ) # Testing spaces after special tokens lowerCAmelCase_ :Dict = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(__A , lstrip=__A , rstrip=__A )} ) # mask token has a left space lowerCAmelCase_ :Any = tokenizer.convert_tokens_to_ids(__A ) lowerCAmelCase_ :Tuple = """Encode <mask> sequence""" lowerCAmelCase_ :Optional[Any] = """Encode <mask>sequence""" lowerCAmelCase_ :str = tokenizer.encode(__A ) lowerCAmelCase_ :Dict = encoded.index(__A ) lowerCAmelCase_ :List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(__A , __A ) lowerCAmelCase_ :Dict = tokenizer.encode(__A ) lowerCAmelCase_ :List[str] = encoded.index(__A ) lowerCAmelCase_ :Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(__A , __A ) def __lowerCAmelCase ( self ) -> List[str]: pass def __lowerCAmelCase ( self ) -> Tuple: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCAmelCase_ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__A , **__A ) lowerCAmelCase_ :Dict = self.tokenizer_class.from_pretrained(__A , **__A ) lowerCAmelCase_ :Union[str, Any] = """A, <mask> AllenNLP sentence.""" lowerCAmelCase_ :str = tokenizer_r.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A ) lowerCAmelCase_ :Optional[int] = tokenizer_p.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) lowerCAmelCase_ :Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) lowerCAmelCase_ :Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( __A , ["""<s>""", """A""", """,""", """<mask>""", """ฤ Allen""", """N""", """LP""", """ฤ sentence""", """.""", """</s>"""] ) self.assertSequenceEqual( __A , ["""<s>""", """A""", """,""", """<mask>""", """ฤ Allen""", """N""", """LP""", """ฤ sentence""", """.""", """</s>"""] ) def __lowerCAmelCase ( self ) -> Tuple: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): lowerCAmelCase_ :Optional[Any] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=__A , add_prefix_space=__A , trim_offsets=__A ) lowerCAmelCase_ :Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) lowerCAmelCase_ :Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __A ) self.assertEqual(post_processor_state["""add_prefix_space"""] , __A ) self.assertEqual(post_processor_state["""trim_offsets"""] , __A ) def __lowerCAmelCase ( self ) -> int: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCAmelCase_ :int = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` lowerCAmelCase_ :List[str] = f"""{text_of_1_token} {text_of_1_token}""" lowerCAmelCase_ :Optional[int] = self.rust_tokenizer_class.from_pretrained( __A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A ) lowerCAmelCase_ :Any = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__A ) + 1, len(__A ) + 1 + len(__A )) , ) lowerCAmelCase_ :Dict = self.rust_tokenizer_class.from_pretrained( __A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A ) lowerCAmelCase_ :Union[str, Any] = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__A ) + 1, len(__A ) + 1 + len(__A )) , ) lowerCAmelCase_ :Dict = self.rust_tokenizer_class.from_pretrained( __A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A ) lowerCAmelCase_ :List[str] = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__A ), len(__A ) + 1 + len(__A )) , ) lowerCAmelCase_ :Optional[int] = self.rust_tokenizer_class.from_pretrained( __A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A ) lowerCAmelCase_ :List[Any] = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__A ), len(__A ) + 1 + len(__A )) , ) lowerCAmelCase_ :Dict = f""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) lowerCAmelCase_ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained( __A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A ) lowerCAmelCase_ :Union[str, Any] = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__A ) + 1, 1 + len(__A ) + 1 + len(__A )) , ) lowerCAmelCase_ :Any = self.rust_tokenizer_class.from_pretrained( __A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A ) lowerCAmelCase_ :str = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) , ) lowerCAmelCase_ :List[Any] = self.rust_tokenizer_class.from_pretrained( __A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A ) lowerCAmelCase_ :Tuple = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) , )
1
"""simple docstring""" import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :str = GPTSanJapaneseTokenizer UpperCAmelCase_ :Optional[int] = False UpperCAmelCase_ :Optional[int] = {"do_clean_text": False, "add_prefix_space": False} def __lowerCAmelCase ( self ) -> Tuple: super().setUp() # fmt: off lowerCAmelCase_ :Dict = ["""ใ“ใ‚“""", """ใ“ใ‚“ใซ""", """ใซใกใฏ""", """ใฐใ‚“ใฏ""", """ไธ–็•Œ,ใ”บ็•Œ""", """ใ€""", """ใ€‚""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""] # fmt: on lowerCAmelCase_ :List[str] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # ๐Ÿ˜€ lowerCAmelCase_ :int = {"""unk_token""": """<unk>"""} lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) with open(self.emoji_file , """w""" ) as emoji_writer: emoji_writer.write(json.dumps(__A ) ) def __lowerCAmelCase ( self , **__A ) -> int: kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A ) def __lowerCAmelCase ( self , __A ) -> Dict: lowerCAmelCase_ :List[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \nใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \nใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€""" return input_text, output_text def __lowerCAmelCase ( self , __A ) -> str: lowerCAmelCase_ , lowerCAmelCase_ :Tuple = self.get_input_output_texts(__A ) lowerCAmelCase_ :List[str] = tokenizer.encode(__A , add_special_tokens=__A ) lowerCAmelCase_ :str = tokenizer.decode(__A , clean_up_tokenization_spaces=__A ) return text, ids def __lowerCAmelCase ( self ) -> str: pass # TODO add if relevant def __lowerCAmelCase ( self ) -> Dict: pass # TODO add if relevant def __lowerCAmelCase ( self ) -> int: pass # TODO add if relevant def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Union[str, Any] = self.get_tokenizer() # Testing tokenization lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ€€ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚""" lowerCAmelCase_ :Any = ["""ใ“ใ‚“""", """ใซใกใฏ""", """ใ€""", """ไธ–็•Œ""", """ใ€‚""", """<SP>""", """ใ“ใ‚“""", """ใฐใ‚“ใฏ""", """ใ€""", """ใ”บ็•Œ""", """ใ€‚"""] lowerCAmelCase_ :Tuple = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) # Testing conversion to ids without special tokens lowerCAmelCase_ :List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] lowerCAmelCase_ :List[str] = tokenizer.convert_tokens_to_ids(__A ) self.assertListEqual(__A , __A ) # Testing conversion to ids with special tokens lowerCAmelCase_ :Any = tokens + [tokenizer.unk_token] lowerCAmelCase_ :Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] lowerCAmelCase_ :Union[str, Any] = tokenizer.convert_tokens_to_ids(__A ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :int = self.get_tokenizer() # Testing tokenization lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใซใกใฏใ€<|bagoftoken|>ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€<|bagoftoken|>ใ”บ็•Œใ€‚""" lowerCAmelCase_ :str = """ใ“ใ‚“ใซใกใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚""" lowerCAmelCase_ :str = tokenizer.encode(__A ) lowerCAmelCase_ :Dict = tokenizer.decode(__A ) self.assertEqual(__A , __A ) @slow def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization lowerCAmelCase_ :Optional[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚""" lowerCAmelCase_ :Any = """ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :Optional[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :List[Any] = tokenizer.encode(prefix_text + input_text ) lowerCAmelCase_ :List[str] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text ) lowerCAmelCase_ :int = tokenizer.encode(__A , prefix_text=__A ) lowerCAmelCase_ :int = tokenizer.decode(__A ) lowerCAmelCase_ :Dict = tokenizer.decode(__A ) lowerCAmelCase_ :Tuple = tokenizer.decode(__A ) self.assertEqual(__A , __A ) self.assertEqual(__A , __A ) self.assertEqual(__A , __A ) @slow def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization lowerCAmelCase_ :List[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚""" lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :List[str] = len(tokenizer.encode(__A ) ) - 2 lowerCAmelCase_ :Dict = len(tokenizer.encode(__A ) ) - 2 lowerCAmelCase_ :int = [1] + [0] * (len_prefix + len_text + 1) lowerCAmelCase_ :List[Any] = [1] * (len_prefix + len_text + 1) + [0] lowerCAmelCase_ :Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1) lowerCAmelCase_ :List[Any] = tokenizer(prefix_text + input_text ).token_type_ids lowerCAmelCase_ :List[str] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids lowerCAmelCase_ :List[Any] = tokenizer(__A , prefix_text=__A ).token_type_ids self.assertListEqual(__A , __A ) self.assertListEqual(__A , __A ) self.assertListEqual(__A , __A ) @slow def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) lowerCAmelCase_ :int = tokenizer.encode("""ใ‚ใƒณใ„ใƒฏ""" ) lowerCAmelCase_ :Optional[Any] = tokenizer.encode("""""" , prefix_text="""ใ‚ใƒณใ„ใƒฏ""" ) lowerCAmelCase_ :int = tokenizer.encode("""ใ„ใƒฏ""" , prefix_text="""ใ‚ใƒณ""" ) self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) ) self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) ) self.assertNotEqual(__A , __A ) self.assertNotEqual(__A , __A ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Union[str, Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) lowerCAmelCase_ :int = [["""ๆญฆ็”ฐไฟก็Ž„""", """ใฏใ€"""], ["""็น”็”ฐไฟก้•ท""", """ใฎ้…ไธ‹ใฎใ€"""]] lowerCAmelCase_ :Dict = tokenizer(__A , padding=__A ) lowerCAmelCase_ :Any = tokenizer.batch_encode_plus(__A , padding=__A ) # fmt: off lowerCAmelCase_ :int = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]] lowerCAmelCase_ :List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] lowerCAmelCase_ :int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , __A ) self.assertListEqual(x_token.token_type_ids , __A ) self.assertListEqual(x_token.attention_mask , __A ) self.assertListEqual(x_token_a.input_ids , __A ) self.assertListEqual(x_token_a.token_type_ids , __A ) self.assertListEqual(x_token_a.attention_mask , __A ) def __lowerCAmelCase ( self ) -> Tuple: # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def __lowerCAmelCase ( self ) -> str: # tokenizer has no padding token pass
1
1
"""simple docstring""" import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def _snake_case ( lowercase__ : Any ) -> Dict: '''simple docstring''' lowerCAmelCase_ :List[Any] = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowerCAmelCase_ :Union[str, Any] = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: lowerCAmelCase_ :Any = 4 lowerCAmelCase_ :List[Any] = 4_8 lowerCAmelCase_ :Optional[Any] = """pixelshuffle_aux""" elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowerCAmelCase_ :Optional[Any] = [6, 6, 6, 6] lowerCAmelCase_ :List[str] = 6_0 lowerCAmelCase_ :Tuple = [6, 6, 6, 6] lowerCAmelCase_ :List[Any] = """pixelshuffledirect""" elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowerCAmelCase_ :int = 4 lowerCAmelCase_ :Union[str, Any] = """nearest+conv""" elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: lowerCAmelCase_ :List[str] = 1 lowerCAmelCase_ :Tuple = 1 lowerCAmelCase_ :Any = 1_2_6 lowerCAmelCase_ :List[str] = 7 lowerCAmelCase_ :int = 255.0 lowerCAmelCase_ :Any = """""" return config def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' if "patch_embed.proj" in name and "layers" not in name: lowerCAmelCase_ :str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: lowerCAmelCase_ :List[str] = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" ) if "layers" in name: lowerCAmelCase_ :List[str] = name.replace("""layers""" , """encoder.stages""" ) if "residual_group.blocks" in name: lowerCAmelCase_ :str = name.replace("""residual_group.blocks""" , """layers""" ) if "attn.proj" in name: lowerCAmelCase_ :Dict = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: lowerCAmelCase_ :Tuple = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: lowerCAmelCase_ :Optional[Any] = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: lowerCAmelCase_ :Optional[Any] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: lowerCAmelCase_ :Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowerCAmelCase_ :Optional[Any] = name.replace("""mlp.fc2""" , """output.dense""" ) if "q_bias" in name: lowerCAmelCase_ :str = name.replace("""q_bias""" , """query.bias""" ) if "k_bias" in name: lowerCAmelCase_ :int = name.replace("""k_bias""" , """key.bias""" ) if "v_bias" in name: lowerCAmelCase_ :List[Any] = name.replace("""v_bias""" , """value.bias""" ) if "cpb_mlp" in name: lowerCAmelCase_ :Tuple = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" ) if "patch_embed.proj" in name: lowerCAmelCase_ :List[str] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" ) if name == "norm.weight": lowerCAmelCase_ :str = """layernorm.weight""" if name == "norm.bias": lowerCAmelCase_ :Dict = """layernorm.bias""" if "conv_first" in name: lowerCAmelCase_ :str = name.replace("""conv_first""" , """first_convolution""" ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: lowerCAmelCase_ :str = name.replace("""conv_last""" , """final_convolution""" ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: lowerCAmelCase_ :Optional[int] = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" ) if "upsample.0" in name: lowerCAmelCase_ :Union[str, Any] = name.replace("""upsample.0""" , """upsample.convolution_0""" ) if "upsample.2" in name: lowerCAmelCase_ :List[Any] = name.replace("""upsample.2""" , """upsample.convolution_1""" ) lowerCAmelCase_ :Dict = """upsample.""" + name elif config.upsampler == "pixelshuffledirect": lowerCAmelCase_ :int = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" ) lowerCAmelCase_ :Any = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" ) else: pass else: lowerCAmelCase_ :int = """swin2sr.""" + name return name def _snake_case ( lowercase__ : Any , lowercase__ : Optional[int] ) -> List[Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): lowerCAmelCase_ :List[str] = orig_state_dict.pop(lowercase__ ) if "qkv" in key: lowerCAmelCase_ :str = key.split(""".""" ) lowerCAmelCase_ :int = int(key_split[1] ) lowerCAmelCase_ :Any = int(key_split[4] ) lowerCAmelCase_ :str = config.embed_dim if "weight" in key: lowerCAmelCase_ :Any = val[:dim, :] lowerCAmelCase_ :List[str] = val[dim : dim * 2, :] lowerCAmelCase_ :Union[str, Any] = val[-dim:, :] else: lowerCAmelCase_ :List[Any] = val[:dim] lowerCAmelCase_ :Union[str, Any] = val[dim : dim * 2] lowerCAmelCase_ :Tuple = val[-dim:] pass else: lowerCAmelCase_ :List[Any] = val return orig_state_dict def _snake_case ( lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> Any: '''simple docstring''' lowerCAmelCase_ :Any = get_config(lowercase__ ) lowerCAmelCase_ :Union[str, Any] = SwinaSRForImageSuperResolution(lowercase__ ) model.eval() lowerCAmelCase_ :Any = torch.hub.load_state_dict_from_url(lowercase__ , map_location="""cpu""" ) lowerCAmelCase_ :Tuple = convert_state_dict(lowercase__ , lowercase__ ) lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = model.load_state_dict(lowercase__ , strict=lowercase__ ) if len(lowercase__ ) > 0: raise ValueError("""Missing keys when converting: {}""".format(lowercase__ ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f"""Unexpected key {key} in state_dict""" ) # verify values lowerCAmelCase_ :Optional[int] = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true""" lowerCAmelCase_ :Optional[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert("""RGB""" ) lowerCAmelCase_ :int = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values lowerCAmelCase_ :Optional[Any] = 1_2_6 if """Jpeg""" in checkpoint_url else 2_5_6 lowerCAmelCase_ :Optional[int] = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) lowerCAmelCase_ :Tuple = transforms(lowercase__ ).unsqueeze(0 ) if config.num_channels == 1: lowerCAmelCase_ :str = pixel_values[:, 0, :, :].unsqueeze(1 ) lowerCAmelCase_ :int = model(lowercase__ ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: lowerCAmelCase_ :List[str] = torch.Size([1, 3, 5_1_2, 5_1_2] ) lowerCAmelCase_ :Dict = torch.tensor( [[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowerCAmelCase_ :List[str] = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) lowerCAmelCase_ :Optional[Any] = torch.tensor( [[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here lowerCAmelCase_ :List[Any] = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) lowerCAmelCase_ :Optional[Any] = torch.tensor( [[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowerCAmelCase_ :Union[str, Any] = torch.Size([1, 3, 5_1_2, 5_1_2] ) lowerCAmelCase_ :Optional[int] = torch.tensor( [[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowerCAmelCase_ :Dict = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) lowerCAmelCase_ :Any = torch.tensor( [[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] ) assert ( outputs.reconstruction.shape == expected_shape ), f"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}""" assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase__ , atol=1E-3 ) print("""Looks ok!""" ) lowerCAmelCase_ :List[Any] = { """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": ( """swin2SR-classical-sr-x2-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": ( """swin2SR-classical-sr-x4-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": ( """swin2SR-compressed-sr-x4-48""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": ( """swin2SR-lightweight-x2-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": ( """swin2SR-realworld-sr-x4-64-bsrgan-psnr""" ), } lowerCAmelCase_ :List[Any] = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowercase__ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(lowercase__ ) if push_to_hub: model.push_to_hub(f"""caidas/{model_name}""" ) processor.push_to_hub(f"""caidas/{model_name}""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth', type=str, help='URL of the original Swin2SR checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument('--push_to_hub', action="https://huggingface.co/datasets/infinityofspace/python_codestyles-single-500/viewer/default/store_true", help='Whether to push the converted model to the hub.') __UpperCAmelCase = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
1
"""simple docstring""" import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset __UpperCAmelCase = pd.read_csv( 'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/' 'position_salaries.csv' ) __UpperCAmelCase = dataset.iloc[:, 1:2].values __UpperCAmelCase = dataset.iloc[:, 2].values __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0) __UpperCAmelCase = PolynomialFeatures(degree=4) __UpperCAmelCase = poly_reg.fit_transform(X) __UpperCAmelCase = LinearRegression() pol_reg.fit(X_poly, y) def _snake_case ( ) -> str: '''simple docstring''' plt.scatter(lowercase__ , lowercase__ , color="""red""" ) plt.plot(lowercase__ , pol_reg.predict(poly_reg.fit_transform(lowercase__ ) ) , color="""blue""" ) plt.title("""Truth or Bluff (Linear Regression)""" ) plt.xlabel("""Position level""" ) plt.ylabel("""Salary""" ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
1
1
"""simple docstring""" import argparse import math import traceback import dateutil.parser as date_parser import requests def _snake_case ( lowercase__ : int ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = {} lowerCAmelCase_ :List[str] = job["""started_at"""] lowerCAmelCase_ :List[str] = job["""completed_at"""] lowerCAmelCase_ :Dict = date_parser.parse(lowercase__ ) lowerCAmelCase_ :Tuple = date_parser.parse(lowercase__ ) lowerCAmelCase_ :Dict = round((end_datetime - start_datetime).total_seconds() / 60.0 ) lowerCAmelCase_ :Tuple = start lowerCAmelCase_ :List[Any] = end lowerCAmelCase_ :Any = duration_in_min return job_info def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Optional[int]=None ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = None if token is not None: lowerCAmelCase_ :int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""} lowerCAmelCase_ :Optional[int] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" lowerCAmelCase_ :Any = requests.get(lowercase__ , headers=lowercase__ ).json() lowerCAmelCase_ :str = {} try: job_time.update({job["""name"""]: extract_time_from_single_job(lowercase__ ) for job in result["""jobs"""]} ) lowerCAmelCase_ :Any = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 ) for i in range(lowercase__ ): lowerCAmelCase_ :List[Any] = requests.get(url + f"""&page={i + 2}""" , headers=lowercase__ ).json() job_time.update({job["""name"""]: extract_time_from_single_job(lowercase__ ) for job in result["""jobs"""]} ) return job_time except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') __UpperCAmelCase = parser.parse_args() __UpperCAmelCase = get_job_time(args.workflow_run_id) __UpperCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F"""{k}: {v["duration"]}""")
1
"""simple docstring""" from __future__ import annotations __UpperCAmelCase = 1.6021e-19 # units = C def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float , ) -> tuple[str, float]: '''simple docstring''' if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif conductivity < 0: raise ValueError("""Conductivity cannot be negative""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative""" ) elif mobility < 0: raise ValueError("""mobility cannot be negative""" ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
1
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , *__A , **__A ) -> None: warnings.warn( """The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DonutImageProcessor instead.""" , __A , ) super().__init__(*__A , **__A )
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , *__A , **__A ) -> None: warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""" , __A , ) super().__init__(*__A , **__A )
1
1
"""simple docstring""" from copy import deepcopy class _SCREAMING_SNAKE_CASE : def __init__( self , __A = None , __A = None ) -> None: if arr is None and size is not None: lowerCAmelCase_ :Tuple = size lowerCAmelCase_ :Dict = [0] * size elif arr is not None: self.init(__A ) else: raise ValueError("""Either arr or size must be specified""" ) def __lowerCAmelCase ( self , __A ) -> None: lowerCAmelCase_ :int = len(__A ) lowerCAmelCase_ :List[Any] = deepcopy(__A ) for i in range(1 , self.size ): lowerCAmelCase_ :List[Any] = self.next_(__A ) if j < self.size: self.tree[j] += self.tree[i] def __lowerCAmelCase ( self ) -> list[int]: lowerCAmelCase_ :Tuple = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): lowerCAmelCase_ :Tuple = self.next_(__A ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def __lowerCAmelCase ( __A ) -> int: return index + (index & (-index)) @staticmethod def __lowerCAmelCase ( __A ) -> int: return index - (index & (-index)) def __lowerCAmelCase ( self , __A , __A ) -> None: if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value lowerCAmelCase_ :Optional[Any] = self.next_(__A ) def __lowerCAmelCase ( self , __A , __A ) -> None: self.add(__A , value - self.get(__A ) ) def __lowerCAmelCase ( self , __A ) -> int: if right == 0: return 0 lowerCAmelCase_ :List[Any] = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] lowerCAmelCase_ :List[str] = self.prev(__A ) return result def __lowerCAmelCase ( self , __A , __A ) -> int: return self.prefix(__A ) - self.prefix(__A ) def __lowerCAmelCase ( self , __A ) -> int: return self.query(__A , index + 1 ) def __lowerCAmelCase ( self , __A ) -> int: value -= self.tree[0] if value < 0: return -1 lowerCAmelCase_ :Union[str, Any] = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 lowerCAmelCase_ :str = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
1
"""simple docstring""" from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def _snake_case ( lowercase__ : str = "laptop" ) -> DataFrame: '''simple docstring''' lowerCAmelCase_ :Dict = f"""https://www.amazon.in/laptop/s?k={product}""" lowerCAmelCase_ :List[str] = { """User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""", """Accept-Language""": """en-US, en;q=0.5""", } lowerCAmelCase_ :List[Any] = BeautifulSoup(requests.get(lowercase__ , headers=lowercase__ ).text ) # Initialize a Pandas dataframe with the column titles lowerCAmelCase_ :Union[str, Any] = DataFrame( columns=[ """Product Title""", """Product Link""", """Current Price of the product""", """Product Rating""", """MRP of the product""", """Discount""", ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( """div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ): try: lowerCAmelCase_ :str = item.ha.text lowerCAmelCase_ :Dict = """https://www.amazon.in/""" + item.ha.a["""href"""] lowerCAmelCase_ :int = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text try: lowerCAmelCase_ :Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text except AttributeError: lowerCAmelCase_ :int = """Not available""" try: lowerCAmelCase_ :str = ( """โ‚น""" + item.find( """span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""โ‚น""" )[1] ) except AttributeError: lowerCAmelCase_ :Optional[Any] = """""" try: lowerCAmelCase_ :str = float( ( ( float(product_mrp.strip("""โ‚น""" ).replace(""",""" , """""" ) ) - float(product_price.strip("""โ‚น""" ).replace(""",""" , """""" ) ) ) / float(product_mrp.strip("""โ‚น""" ).replace(""",""" , """""" ) ) ) * 1_0_0 ) except ValueError: lowerCAmelCase_ :Union[str, Any] = float("""nan""" ) except AttributeError: pass lowerCAmelCase_ :Any = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] lowerCAmelCase_ :List[Any] = """ """ lowerCAmelCase_ :Tuple = """ """ data_frame.index += 1 return data_frame if __name__ == "__main__": __UpperCAmelCase = 'headphones' get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
1
1
"""simple docstring""" import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :List[str] = FunnelTokenizer UpperCAmelCase_ :List[Any] = FunnelTokenizerFast UpperCAmelCase_ :int = True UpperCAmelCase_ :Optional[int] = True def __lowerCAmelCase ( self ) -> Tuple: super().setUp() lowerCAmelCase_ :int = [ """<unk>""", """<cls>""", """<sep>""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] lowerCAmelCase_ :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def __lowerCAmelCase ( self , **__A ) -> Optional[Any]: return FunnelTokenizer.from_pretrained(self.tmpdirname , **__A ) def __lowerCAmelCase ( self , **__A ) -> int: return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__A ) def __lowerCAmelCase ( self , __A ) -> List[Any]: lowerCAmelCase_ :Optional[int] = """UNwant\u00E9d,running""" lowerCAmelCase_ :Any = """unwanted, running""" return input_text, output_text def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Optional[Any] = self.tokenizer_class(self.vocab_file ) lowerCAmelCase_ :int = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(__A , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [7, 4, 5, 10, 8, 9] ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :List[Any] = self.get_tokenizers(do_lower_case=__A ) for tokenizer in tokenizers: lowerCAmelCase_ :Dict = tokenizer("""UNwant\u00E9d,running""" ) lowerCAmelCase_ :Optional[Any] = len(inputs["""input_ids"""] ) - 1 self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len ) lowerCAmelCase_ :str = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" ) self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
1
"""simple docstring""" import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ :Any = """laion/clap-htsat-unfused""" lowerCAmelCase_ :Optional[Any] = tempfile.mkdtemp() def __lowerCAmelCase ( self , **__A ) -> List[Any]: return RobertaTokenizer.from_pretrained(self.checkpoint , **__A ) def __lowerCAmelCase ( self , **__A ) -> Tuple: return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__A ) def __lowerCAmelCase ( self ) -> int: shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Optional[Any] = self.get_tokenizer() lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor() lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase_ :Optional[Any] = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __A ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __A ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Dict = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase_ :str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowerCAmelCase_ :Dict = self.get_feature_extractor(do_normalize=__A , padding_value=1.0 ) lowerCAmelCase_ :Union[str, Any] = ClapProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __A ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __A ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Dict = self.get_feature_extractor() lowerCAmelCase_ :str = self.get_tokenizer() lowerCAmelCase_ :List[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) lowerCAmelCase_ :Optional[Any] = floats_list((3, 1000) ) lowerCAmelCase_ :Optional[Any] = feature_extractor(__A , return_tensors="""np""" ) lowerCAmelCase_ :str = processor(audios=__A , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :List[Any] = self.get_feature_extractor() lowerCAmelCase_ :Any = self.get_tokenizer() lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) lowerCAmelCase_ :List[Any] = """This is a test string""" lowerCAmelCase_ :Dict = processor(text=__A ) lowerCAmelCase_ :List[str] = tokenizer(__A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :int = self.get_feature_extractor() lowerCAmelCase_ :Tuple = self.get_tokenizer() lowerCAmelCase_ :Optional[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) lowerCAmelCase_ :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase_ :Tuple = processor.batch_decode(__A ) lowerCAmelCase_ :Optional[Any] = tokenizer.batch_decode(__A ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor() lowerCAmelCase_ :Any = self.get_tokenizer() lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
1
1
"""simple docstring""" import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :Union[str, Any] = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def __lowerCAmelCase ( self , __A=0 ) -> int: lowerCAmelCase_ :Tuple = np.random.RandomState(__A ) lowerCAmelCase_ :List[Any] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Any = self.get_dummy_inputs() lowerCAmelCase_ :List[Any] = pipe(**__A ).images lowerCAmelCase_ :str = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCAmelCase_ :List[Any] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Union[str, Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[str] = self.get_dummy_inputs() lowerCAmelCase_ :Optional[int] = pipe(**__A ).images lowerCAmelCase_ :Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCAmelCase_ :List[Any] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :str = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Union[str, Any] = pipe(**__A ).images lowerCAmelCase_ :str = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCAmelCase_ :Any = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Optional[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[str] = self.get_dummy_inputs() lowerCAmelCase_ :List[Any] = pipe(**__A ).images lowerCAmelCase_ :Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCAmelCase_ :List[str] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Dict = self.get_dummy_inputs() lowerCAmelCase_ :Optional[int] = pipe(**__A ).images lowerCAmelCase_ :Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCAmelCase_ :int = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) lowerCAmelCase_ :Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Union[str, Any] = pipe(**__A ).images lowerCAmelCase_ :int = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCAmelCase_ :Optional[Any] = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :str = self.get_dummy_inputs() lowerCAmelCase_ :Tuple = 3 * [inputs["""prompt"""]] # forward lowerCAmelCase_ :Tuple = pipe(**__A ) lowerCAmelCase_ :Any = output.images[0, -3:, -3:, -1] lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Any = 3 * [inputs.pop("""prompt""" )] lowerCAmelCase_ :Optional[int] = pipe.tokenizer( __A , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=__A , return_tensors="""np""" , ) lowerCAmelCase_ :Dict = text_inputs["""input_ids"""] lowerCAmelCase_ :str = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] lowerCAmelCase_ :Optional[int] = prompt_embeds # forward lowerCAmelCase_ :Union[str, Any] = pipe(**__A ) lowerCAmelCase_ :List[str] = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Union[str, Any] = 3 * ["""this is a negative prompt"""] lowerCAmelCase_ :List[Any] = negative_prompt lowerCAmelCase_ :Optional[Any] = 3 * [inputs["""prompt"""]] # forward lowerCAmelCase_ :str = pipe(**__A ) lowerCAmelCase_ :Any = output.images[0, -3:, -3:, -1] lowerCAmelCase_ :List[Any] = self.get_dummy_inputs() lowerCAmelCase_ :Any = 3 * [inputs.pop("""prompt""" )] lowerCAmelCase_ :Optional[Any] = [] for p in [prompt, negative_prompt]: lowerCAmelCase_ :Union[str, Any] = pipe.tokenizer( __A , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=__A , return_tensors="""np""" , ) lowerCAmelCase_ :List[Any] = text_inputs["""input_ids"""] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) lowerCAmelCase_ , lowerCAmelCase_ :List[str] = embeds # forward lowerCAmelCase_ :Union[str, Any] = pipe(**__A ) lowerCAmelCase_ :Union[str, Any] = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @nightly @require_onnxruntime @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @property def __lowerCAmelCase ( self ) -> List[Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :List[Any] = ort.SessionOptions() lowerCAmelCase_ :Optional[Any] = False return options def __lowerCAmelCase ( self ) -> Dict: # using the PNDM scheduler by default lowerCAmelCase_ :Dict = OnnxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Dict = """A painting of a squirrel eating a burger""" np.random.seed(0 ) lowerCAmelCase_ :Any = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" ) lowerCAmelCase_ :List[Any] = output.images lowerCAmelCase_ :Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :List[str] = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :int = DDIMScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) lowerCAmelCase_ :List[Any] = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__A , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Any = """open neural network exchange""" lowerCAmelCase_ :str = np.random.RandomState(0 ) lowerCAmelCase_ :int = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type="""np""" ) lowerCAmelCase_ :Optional[int] = output.images lowerCAmelCase_ :Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Tuple = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :List[Any] = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) lowerCAmelCase_ :int = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__A , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Dict = """open neural network exchange""" lowerCAmelCase_ :str = np.random.RandomState(0 ) lowerCAmelCase_ :str = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type="""np""" ) lowerCAmelCase_ :Dict = output.images lowerCAmelCase_ :Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ :Optional[int] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :List[Any] = 0 def test_callback_fn(__A , __A , __A ) -> None: lowerCAmelCase_ :Dict = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) lowerCAmelCase_ :Union[str, Any] = latents[0, -3:, -3:, -1] lowerCAmelCase_ :List[str] = np.array( [-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) lowerCAmelCase_ :Union[str, Any] = latents[0, -3:, -3:, -1] lowerCAmelCase_ :Dict = np.array( [-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3 lowerCAmelCase_ :List[str] = False lowerCAmelCase_ :List[str] = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[Any] = """Andromeda galaxy in a bottle""" lowerCAmelCase_ :Dict = np.random.RandomState(0 ) pipe( prompt=__A , num_inference_steps=5 , guidance_scale=7.5 , generator=__A , callback=__A , callback_steps=1 , ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :List[str] = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) assert isinstance(__A , __A ) assert pipe.safety_checker is None lowerCAmelCase_ :str = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__A ) lowerCAmelCase_ :int = OnnxStableDiffusionPipeline.from_pretrained(__A ) # sanity check that the pipeline still works assert pipe.safety_checker is None lowerCAmelCase_ :Optional[int] = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None
1
"""simple docstring""" import os from math import logaa def _snake_case ( lowercase__ : str = "base_exp.txt" ) -> int: '''simple docstring''' lowerCAmelCase_ :float = 0 lowerCAmelCase_ :Union[str, Any] = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ): lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = list(map(lowercase__ , line.split(""",""" ) ) ) if x * logaa(lowercase__ ) > largest: lowerCAmelCase_ :Any = x * logaa(lowercase__ ) lowerCAmelCase_ :List[Any] = i + 1 return result if __name__ == "__main__": print(solution())
1
1
"""simple docstring""" import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class _SCREAMING_SNAKE_CASE ( A__ ): def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ :int = tempfile.mkdtemp() lowerCAmelCase_ :str = 5 # Realm tok lowerCAmelCase_ :Tuple = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """test""", """question""", """this""", """is""", """the""", """first""", """second""", """third""", """fourth""", """fifth""", """record""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] lowerCAmelCase_ :List[str] = os.path.join(self.tmpdirname , """realm_tokenizer""" ) os.makedirs(__A , exist_ok=__A ) lowerCAmelCase_ :Optional[Any] = os.path.join(__A , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) lowerCAmelCase_ :str = os.path.join(self.tmpdirname , """realm_block_records""" ) os.makedirs(__A , exist_ok=__A ) def __lowerCAmelCase ( self ) -> RealmTokenizer: return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) ) def __lowerCAmelCase ( self ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :List[str] = RealmConfig(num_block_records=self.num_block_records ) return config def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :str = Dataset.from_dict( { """id""": ["""0""", """1"""], """question""": ["""foo""", """bar"""], """answers""": [["""Foo""", """Bar"""], ["""Bar"""]], } ) return dataset def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :List[Any] = np.array( [ b"""This is the first record""", b"""This is the second record""", b"""This is the third record""", b"""This is the fourth record""", b"""This is the fifth record""", b"""This is a longer longer longer record""", ] , dtype=__A , ) return block_records def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :Union[str, Any] = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Optional[int] = self.get_config() lowerCAmelCase_ :List[Any] = self.get_dummy_retriever() lowerCAmelCase_ :int = retriever.tokenizer lowerCAmelCase_ :List[str] = np.array([0, 3] , dtype="""long""" ) lowerCAmelCase_ :Union[str, Any] = tokenizer(["""Test question"""] ).input_ids lowerCAmelCase_ :Optional[Any] = tokenizer( ["""the fourth"""] , add_special_tokens=__A , return_token_type_ids=__A , return_attention_mask=__A , ).input_ids lowerCAmelCase_ :Any = config.reader_seq_len lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = retriever( __A , __A , answer_ids=__A , max_length=__A , return_tensors="""np""" ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , ) def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :int = self.get_config() lowerCAmelCase_ :List[str] = self.get_dummy_retriever() lowerCAmelCase_ :Union[str, Any] = retriever.tokenizer lowerCAmelCase_ :Union[str, Any] = np.array([0, 3, 5] , dtype="""long""" ) lowerCAmelCase_ :Any = tokenizer(["""Test question"""] ).input_ids lowerCAmelCase_ :Optional[int] = tokenizer( ["""the fourth""", """longer longer"""] , add_special_tokens=__A , return_token_type_ids=__A , return_attention_mask=__A , ).input_ids lowerCAmelCase_ :str = config.reader_seq_len lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Any = retriever( __A , __A , answer_ids=__A , max_length=__A , return_tensors="""np""" ) self.assertEqual([False, True, True] , __A ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __A ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __A ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :List[Any] = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) ) # Test local path lowerCAmelCase_ :Any = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) ) self.assertEqual(retriever.block_records[0] , b"""This is the first record""" ) # Test mocked remote path with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download: lowerCAmelCase_ :Optional[Any] = os.path.join( os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME ) lowerCAmelCase_ :int = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" ) self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
1
"""simple docstring""" import itertools import math def _snake_case ( lowercase__ : int ) -> bool: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _snake_case ( ) -> Dict: '''simple docstring''' lowerCAmelCase_ :List[Any] = 2 while True: if is_prime(lowercase__ ): yield num num += 1 def _snake_case ( lowercase__ : int = 1_0_0_0_1 ) -> int: '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , lowercase__ ) ) if __name__ == "__main__": print(F"""{solution() = }""")
1
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } __UpperCAmelCase = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def _snake_case ( lowercase__ : int , lowercase__ : Dict , lowercase__ : Any , lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Any ) -> str: '''simple docstring''' for attribute in key.split(""".""" ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models lowerCAmelCase_ :Any = """lm_head""" lowerCAmelCase_ :Optional[int] = getattr(lowercase__ , lowercase__ ) if weight_type is not None: lowerCAmelCase_ :List[Any] = getattr(lowercase__ , lowercase__ ).shape else: lowerCAmelCase_ :List[str] = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowerCAmelCase_ :List[str] = value elif weight_type == "weight_g": lowerCAmelCase_ :Any = value elif weight_type == "weight_v": lowerCAmelCase_ :List[str] = value elif weight_type == "bias": lowerCAmelCase_ :Dict = value else: lowerCAmelCase_ :Optional[Any] = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _snake_case ( lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[int] = [] lowerCAmelCase_ :Union[str, Any] = fairseq_model.state_dict() lowerCAmelCase_ :Any = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): lowerCAmelCase_ :str = False if "conv_layers" in name: load_conv_layer( lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == """group""" , ) lowerCAmelCase_ :Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): lowerCAmelCase_ :Optional[Any] = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: lowerCAmelCase_ :Optional[int] = True if "*" in mapped_key: lowerCAmelCase_ :Any = name.split(lowercase__ )[0].split(""".""" )[-2] lowerCAmelCase_ :Optional[Any] = mapped_key.replace("""*""" , lowercase__ ) if "weight_g" in name: lowerCAmelCase_ :str = """weight_g""" elif "weight_v" in name: lowerCAmelCase_ :Any = """weight_v""" elif "bias" in name: lowerCAmelCase_ :List[Any] = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCAmelCase_ :str = """weight""" else: lowerCAmelCase_ :str = None set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) continue if not is_used: unused_weights.append(lowercase__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _snake_case ( lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : str ) -> Dict: '''simple docstring''' lowerCAmelCase_ :List[Any] = full_name.split("""conv_layers.""" )[-1] lowerCAmelCase_ :Union[str, Any] = name.split(""".""" ) lowerCAmelCase_ :Tuple = int(items[0] ) lowerCAmelCase_ :List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowerCAmelCase_ :str = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowerCAmelCase_ :Optional[int] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) lowerCAmelCase_ :List[Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowerCAmelCase_ :List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowercase__ ) @torch.no_grad() def _snake_case ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : Tuple=None , lowercase__ : int=None , lowercase__ : Optional[Any]=True ) -> Any: '''simple docstring''' if config_path is not None: lowerCAmelCase_ :List[Any] = UniSpeechConfig.from_pretrained(lowercase__ ) else: lowerCAmelCase_ :str = UniSpeechConfig() if is_finetuned: if dict_path: lowerCAmelCase_ :Union[str, Any] = Dictionary.load_from_json(lowercase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCAmelCase_ :Any = target_dict.pad_index lowerCAmelCase_ :Union[str, Any] = target_dict.bos_index lowerCAmelCase_ :Optional[Any] = target_dict.eos_index lowerCAmelCase_ :Dict = len(target_dict.symbols ) lowerCAmelCase_ :Optional[int] = os.path.join(lowercase__ , """vocab.json""" ) if not os.path.isdir(lowercase__ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase__ ) ) return os.makedirs(lowercase__ , exist_ok=lowercase__ ) lowerCAmelCase_ :List[Any] = target_dict.indices # fairseq has the <pad> and <s> switched lowerCAmelCase_ :List[str] = 4_2 lowerCAmelCase_ :List[str] = 4_3 with open(lowercase__ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(lowercase__ , lowercase__ ) lowerCAmelCase_ :int = WavaVecaPhonemeCTCTokenizer( lowercase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase__ , ) lowerCAmelCase_ :List[Any] = True if config.feat_extract_norm == """layer""" else False lowerCAmelCase_ :Optional[int] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , ) lowerCAmelCase_ :Dict = WavaVecaProcessor(feature_extractor=lowercase__ , tokenizer=lowercase__ ) processor.save_pretrained(lowercase__ ) lowerCAmelCase_ :Union[str, Any] = UniSpeechForCTC(lowercase__ ) else: lowerCAmelCase_ :List[str] = UniSpeechForPreTraining(lowercase__ ) if is_finetuned: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} ) else: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) lowerCAmelCase_ :Optional[int] = model[0].eval() recursively_load_weights(lowercase__ , lowercase__ , lowercase__ ) hf_unispeech.save_pretrained(lowercase__ ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action="https://huggingface.co/datasets/infinityofspace/python_codestyles-single-500/viewer/default/store_true", help='Whether the model to convert is a fine-tuned model or not' ) __UpperCAmelCase = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
1
"""simple docstring""" def _snake_case ( lowercase__ : int = 5_0 ) -> int: '''simple docstring''' lowerCAmelCase_ :int = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F"""{solution() = }""")
1
1
"""simple docstring""" import math def _snake_case ( ) -> None: '''simple docstring''' lowerCAmelCase_ :Tuple = input("""Enter message: """ ) lowerCAmelCase_ :Optional[Any] = int(input(f"""Enter key [2-{len(lowercase__ ) - 1}]: """ ) ) lowerCAmelCase_ :Tuple = input("""Encryption/Decryption [e/d]: """ ) if mode.lower().startswith("""e""" ): lowerCAmelCase_ :List[Any] = encrypt_message(lowercase__ , lowercase__ ) elif mode.lower().startswith("""d""" ): lowerCAmelCase_ :Tuple = decrypt_message(lowercase__ , lowercase__ ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(f"""Output:\n{text + "|"}""" ) def _snake_case ( lowercase__ : int , lowercase__ : str ) -> str: '''simple docstring''' lowerCAmelCase_ :Optional[int] = [""""""] * key for col in range(lowercase__ ): lowerCAmelCase_ :str = col while pointer < len(lowercase__ ): cipher_text[col] += message[pointer] pointer += key return "".join(lowercase__ ) def _snake_case ( lowercase__ : int , lowercase__ : str ) -> str: '''simple docstring''' lowerCAmelCase_ :Dict = math.ceil(len(lowercase__ ) / key ) lowerCAmelCase_ :int = key lowerCAmelCase_ :List[str] = (num_cols * num_rows) - len(lowercase__ ) lowerCAmelCase_ :Dict = [""""""] * num_cols lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Tuple = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): lowerCAmelCase_ :Any = 0 row += 1 return "".join(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
1
"""simple docstring""" # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ): UpperCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} UpperCAmelCase_ :Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCAmelCase_ :List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} ) UpperCAmelCase_ :Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS def __lowerCAmelCase ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ :Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) lowerCAmelCase_ :List[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , ) torch.manual_seed(0 ) lowerCAmelCase_ :List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowerCAmelCase_ :List[Any] = CLIPTextModel(__A ) lowerCAmelCase_ :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCAmelCase_ :Union[str, Any] = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def __lowerCAmelCase ( self , __A , __A=0 ) -> List[str]: if str(__A ).startswith("""mps""" ): lowerCAmelCase_ :Tuple = torch.manual_seed(__A ) else: lowerCAmelCase_ :Optional[int] = torch.Generator(device=__A ).manual_seed(__A ) lowerCAmelCase_ :List[Any] = 2 lowerCAmelCase_ :int = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ) lowerCAmelCase_ :Optional[int] = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A ) lowerCAmelCase_ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) ) lowerCAmelCase_ :Union[str, Any] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def __lowerCAmelCase ( self ) -> int: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def __lowerCAmelCase ( self ) -> List[str]: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ): UpperCAmelCase_ :List[str] = StableDiffusionControlNetImgaImgPipeline UpperCAmelCase_ :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCAmelCase_ :int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def __lowerCAmelCase ( self ) -> Optional[int]: torch.manual_seed(0 ) lowerCAmelCase_ :Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(__A ): if isinstance(__A , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) lowerCAmelCase_ :List[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__A ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__A ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowerCAmelCase_ :str = CLIPTextModel(__A ) lowerCAmelCase_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCAmelCase_ :Optional[Any] = MultiControlNetModel([controlneta, controlneta] ) lowerCAmelCase_ :List[Any] = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def __lowerCAmelCase ( self , __A , __A=0 ) -> str: if str(__A ).startswith("""mps""" ): lowerCAmelCase_ :Optional[Any] = torch.manual_seed(__A ) else: lowerCAmelCase_ :List[Any] = torch.Generator(device=__A ).manual_seed(__A ) lowerCAmelCase_ :Optional[Any] = 2 lowerCAmelCase_ :Optional[int] = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ), ] lowerCAmelCase_ :int = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A ) lowerCAmelCase_ :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) ) lowerCAmelCase_ :List[str] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :List[str] = self.get_dummy_components() lowerCAmelCase_ :Tuple = self.pipeline_class(**__A ) pipe.to(__A ) lowerCAmelCase_ :Union[str, Any] = 1_0.0 lowerCAmelCase_ :Union[str, Any] = 4 lowerCAmelCase_ :Tuple = self.get_dummy_inputs(__A ) lowerCAmelCase_ :List[str] = steps lowerCAmelCase_ :int = scale lowerCAmelCase_ :Union[str, Any] = pipe(**__A )[0] lowerCAmelCase_ :Any = self.get_dummy_inputs(__A ) lowerCAmelCase_ :str = steps lowerCAmelCase_ :str = scale lowerCAmelCase_ :Tuple = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Union[str, Any] = steps lowerCAmelCase_ :Union[str, Any] = scale lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] lowerCAmelCase_ :List[str] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Optional[int] = steps lowerCAmelCase_ :Tuple = scale lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def __lowerCAmelCase ( self ) -> Dict: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __lowerCAmelCase ( self ) -> Tuple: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def __lowerCAmelCase ( self ) -> Optional[int]: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :str = self.get_dummy_components() lowerCAmelCase_ :Tuple = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(__A ) except NotImplementedError: pass @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :Any = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" ) lowerCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , safety_checker=__A , controlnet=__A ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowerCAmelCase_ :List[Any] = """evil space-punk bird""" lowerCAmelCase_ :List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) ) lowerCAmelCase_ :int = load_image( """https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) ) lowerCAmelCase_ :Union[str, Any] = pipe( __A , __A , control_image=__A , generator=__A , output_type="""np""" , num_inference_steps=50 , strength=0.6 , ) lowerCAmelCase_ :Tuple = output.images[0] assert image.shape == (512, 512, 3) lowerCAmelCase_ :Tuple = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" ) assert np.abs(expected_image - image ).max() < 9E-2
1
1
"""simple docstring""" from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class _SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self , __A = 16 , __A = 88 , __A = None , __A = 1 , __A = 0.0 , __A = 32 , __A = None , __A = False , __A = None , __A = None , __A = "geglu" , __A = None , ) -> Tuple: super().__init__() lowerCAmelCase_ :Optional[Any] = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__A , attention_head_dim=__A , in_channels=__A , num_layers=__A , dropout=__A , norm_num_groups=__A , cross_attention_dim=__A , attention_bias=__A , sample_size=__A , num_vector_embeds=__A , activation_fn=__A , num_embeds_ada_norm=__A , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference lowerCAmelCase_ :List[str] = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` lowerCAmelCase_ :Optional[Any] = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` lowerCAmelCase_ :Tuple = [1, 0] def __lowerCAmelCase ( self , __A , __A , __A=None , __A=None , __A=None , __A = True , ) -> int: lowerCAmelCase_ :Tuple = hidden_states lowerCAmelCase_ :Optional[Any] = [] lowerCAmelCase_ :Optional[int] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens lowerCAmelCase_ :Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] lowerCAmelCase_ :Dict = self.transformer_index_for_condition[i] lowerCAmelCase_ :Tuple = self.transformers[transformer_index]( __A , encoder_hidden_states=__A , timestep=__A , cross_attention_kwargs=__A , return_dict=__A , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] lowerCAmelCase_ :Optional[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) lowerCAmelCase_ :Dict = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__A )
1
"""simple docstring""" from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ ): UpperCAmelCase_ :List[str] = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"] @register_to_config def __init__( self , __A , __A , __A = None , __A = 5_0257 , __A = 1024 , __A = 768 , __A = 12 , __A = 12 , __A = None , __A = "gelu_new" , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 1E-5 , __A = 0.0_2 , __A = True , __A = True , __A = False , __A = False , ) -> Optional[Any]: super().__init__() lowerCAmelCase_ :List[str] = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and""" f""" `n_embd`: {n_embd} are not equal.""" ) lowerCAmelCase_ :Union[str, Any] = prefix_inner_dim lowerCAmelCase_ :str = prefix_hidden_dim lowerCAmelCase_ :str = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) lowerCAmelCase_ :List[Any] = ( nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity() ) lowerCAmelCase_ :Any = GPTaConfig( vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , ) lowerCAmelCase_ :Any = GPTaLMHeadModel(__A ) def __lowerCAmelCase ( self , __A , __A , __A = None , __A = None , ) -> List[str]: lowerCAmelCase_ :str = self.transformer.transformer.wte(__A ) lowerCAmelCase_ :Any = self.encode_prefix(__A ) lowerCAmelCase_ :Optional[Any] = self.decode_prefix(__A ) lowerCAmelCase_ :Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: lowerCAmelCase_ :int = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) lowerCAmelCase_ :Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 ) lowerCAmelCase_ :Tuple = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def __lowerCAmelCase ( self , __A , __A ) -> torch.Tensor: return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: return self.encode_prefix(__A ) @torch.no_grad() def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[int]: lowerCAmelCase_ :Tuple = torch.split(__A , 1 , dim=0 ) lowerCAmelCase_ :Optional[int] = [] lowerCAmelCase_ :List[str] = [] for feature in features: lowerCAmelCase_ :Tuple = self.decode_prefix(feature.to(__A ) ) # back to the clip feature # Only support beam search for now lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.generate_beam( input_embeds=__A , device=__A , eos_token_id=__A ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) lowerCAmelCase_ :Tuple = torch.stack(__A ) lowerCAmelCase_ :int = torch.stack(__A ) return generated_tokens, generated_seq_lengths @torch.no_grad() def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A = 5 , __A = 67 , __A = 1.0 , __A = None , ) -> Union[str, Any]: lowerCAmelCase_ :Optional[int] = eos_token_id lowerCAmelCase_ :Optional[int] = None lowerCAmelCase_ :Any = None lowerCAmelCase_ :int = torch.ones(__A , device=__A , dtype=torch.int ) lowerCAmelCase_ :Optional[int] = torch.zeros(__A , device=__A , dtype=torch.bool ) if input_embeds is not None: lowerCAmelCase_ :List[str] = input_embeds else: lowerCAmelCase_ :Union[str, Any] = self.transformer.transformer.wte(__A ) for i in range(__A ): lowerCAmelCase_ :Optional[int] = self.transformer(inputs_embeds=__A ) lowerCAmelCase_ :str = outputs.logits lowerCAmelCase_ :str = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) lowerCAmelCase_ :Dict = logits.softmax(-1 ).log() if scores is None: lowerCAmelCase_ , lowerCAmelCase_ :Any = logits.topk(__A , -1 ) lowerCAmelCase_ :Union[str, Any] = generated.expand(__A , *generated.shape[1:] ) lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: lowerCAmelCase_ :List[str] = next_tokens else: lowerCAmelCase_ :List[Any] = tokens.expand(__A , *tokens.shape[1:] ) lowerCAmelCase_ :Any = torch.cat((tokens, next_tokens) , dim=1 ) else: lowerCAmelCase_ :List[Any] = -float(np.inf ) lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Optional[int] = scores[:, None] + logits seq_lengths[~is_stopped] += 1 lowerCAmelCase_ :List[Any] = scores_sum / seq_lengths[:, None] lowerCAmelCase_ , lowerCAmelCase_ :Tuple = scores_sum_average.view(-1 ).topk(__A , -1 ) lowerCAmelCase_ :Optional[Any] = next_tokens // scores_sum.shape[1] lowerCAmelCase_ :Dict = seq_lengths[next_tokens_source] lowerCAmelCase_ :Tuple = next_tokens % scores_sum.shape[1] lowerCAmelCase_ :Optional[Any] = next_tokens.unsqueeze(1 ) lowerCAmelCase_ :str = tokens[next_tokens_source] lowerCAmelCase_ :List[Any] = torch.cat((tokens, next_tokens) , dim=1 ) lowerCAmelCase_ :Dict = generated[next_tokens_source] lowerCAmelCase_ :Dict = scores_sum_average * seq_lengths lowerCAmelCase_ :Tuple = is_stopped[next_tokens_source] lowerCAmelCase_ :str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) lowerCAmelCase_ :List[Any] = torch.cat((generated, next_token_embed) , dim=1 ) lowerCAmelCase_ :Optional[int] = is_stopped + next_tokens.eq(__A ).squeeze() if is_stopped.all(): break lowerCAmelCase_ :str = scores / seq_lengths lowerCAmelCase_ :Optional[int] = scores.argsort(descending=__A ) # tokens tensors are already padded to max_seq_length lowerCAmelCase_ :Optional[Any] = [tokens[i] for i in order] lowerCAmelCase_ :Dict = torch.stack(__A , dim=0 ) lowerCAmelCase_ :Tuple = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
1
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCAmelCase = { 'configuration_mobilenet_v2': [ 'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileNetV2Config', 'MobileNetV2OnnxConfig', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['MobileNetV2FeatureExtractor'] __UpperCAmelCase = ['MobileNetV2ImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST', 'MobileNetV2ForImageClassification', 'MobileNetV2ForSemanticSegmentation', 'MobileNetV2Model', 'MobileNetV2PreTrainedModel', 'load_tf_weights_in_mobilenet_v2', ] if TYPE_CHECKING: from .configuration_mobilenet_va import ( MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileNetVaConfig, MobileNetVaOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor from .image_processing_mobilenet_va import MobileNetVaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilenet_va import ( MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel, MobileNetVaPreTrainedModel, load_tf_weights_in_mobilenet_va, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
1
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json', # See all DETR models at https://huggingface.co/models?filter=detr } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "detr" UpperCAmelCase_ :str = ["past_key_values"] UpperCAmelCase_ :Tuple = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , __A=True , __A=None , __A=3 , __A=100 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=1 , __A=5 , __A=2 , __A=1 , __A=1 , __A=5 , __A=2 , __A=0.1 , **__A , ) -> List[Any]: if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) lowerCAmelCase_ :int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(__A , __A ): lowerCAmelCase_ :str = backbone_config.get("""model_type""" ) lowerCAmelCase_ :List[Any] = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase_ :Optional[Any] = config_class.from_dict(__A ) # set timm attributes to None lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = None, None, None lowerCAmelCase_ :Tuple = use_timm_backbone lowerCAmelCase_ :Optional[int] = backbone_config lowerCAmelCase_ :Optional[int] = num_channels lowerCAmelCase_ :int = num_queries lowerCAmelCase_ :List[Any] = d_model lowerCAmelCase_ :Optional[int] = encoder_ffn_dim lowerCAmelCase_ :Tuple = encoder_layers lowerCAmelCase_ :int = encoder_attention_heads lowerCAmelCase_ :Optional[Any] = decoder_ffn_dim lowerCAmelCase_ :List[str] = decoder_layers lowerCAmelCase_ :Dict = decoder_attention_heads lowerCAmelCase_ :Dict = dropout lowerCAmelCase_ :Tuple = attention_dropout lowerCAmelCase_ :Union[str, Any] = activation_dropout lowerCAmelCase_ :Any = activation_function lowerCAmelCase_ :List[str] = init_std lowerCAmelCase_ :Optional[int] = init_xavier_std lowerCAmelCase_ :int = encoder_layerdrop lowerCAmelCase_ :Union[str, Any] = decoder_layerdrop lowerCAmelCase_ :List[str] = encoder_layers lowerCAmelCase_ :Union[str, Any] = auxiliary_loss lowerCAmelCase_ :str = position_embedding_type lowerCAmelCase_ :List[Any] = backbone lowerCAmelCase_ :str = use_pretrained_backbone lowerCAmelCase_ :str = dilation # Hungarian matcher lowerCAmelCase_ :List[Any] = class_cost lowerCAmelCase_ :Union[str, Any] = bbox_cost lowerCAmelCase_ :Tuple = giou_cost # Loss coefficients lowerCAmelCase_ :Optional[int] = mask_loss_coefficient lowerCAmelCase_ :Union[str, Any] = dice_loss_coefficient lowerCAmelCase_ :Tuple = bbox_loss_coefficient lowerCAmelCase_ :Tuple = giou_loss_coefficient lowerCAmelCase_ :Dict = eos_coefficient super().__init__(is_encoder_decoder=__A , **__A ) @property def __lowerCAmelCase ( self ) -> int: return self.encoder_attention_heads @property def __lowerCAmelCase ( self ) -> int: return self.d_model @classmethod def __lowerCAmelCase ( cls , __A , **__A ) -> Any: return cls(backbone_config=__A , **__A ) def __lowerCAmelCase ( self ) -> Dict[str, any]: lowerCAmelCase_ :List[str] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: lowerCAmelCase_ :Dict = self.backbone_config.to_dict() lowerCAmelCase_ :str = self.__class__.model_type return output class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :List[Any] = version.parse("1.11" ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def __lowerCAmelCase ( self ) -> float: return 1E-5 @property def __lowerCAmelCase ( self ) -> int: return 12
1
1
"""simple docstring""" import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer __UpperCAmelCase = ['gpt2'] __UpperCAmelCase = 'gpt2' if is_tf_available(): class _SCREAMING_SNAKE_CASE ( tf.Module ): def __init__( self , __A ) -> str: super().__init__() lowerCAmelCase_ :Dict = tokenizer lowerCAmelCase_ :List[Any] = AutoConfig.from_pretrained(__A ) lowerCAmelCase_ :int = TFGPTaLMHeadModel.from_config(__A ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) ) def __lowerCAmelCase ( self , __A ) -> Union[str, Any]: lowerCAmelCase_ :Union[str, Any] = self.tokenizer(__A ) lowerCAmelCase_ :Optional[Any] = tokenized["""input_ids"""].to_tensor() lowerCAmelCase_ :Union[str, Any] = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) lowerCAmelCase_ :Dict = self.model(input_ids=__A , attention_mask=__A )["""logits"""] return outputs @require_tf @require_keras_nlp class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[int]: super().setUp() lowerCAmelCase_ :List[str] = [GPTaTokenizer.from_pretrained(__A ) for checkpoint in (TOKENIZER_CHECKPOINTS)] lowerCAmelCase_ :Tuple = [TFGPTaTokenizer.from_pretrained(__A ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) lowerCAmelCase_ :Optional[int] = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: ไธ€ ไบŒ ไธ‰ ไธ€ไบŒไธ‰""", """And some much more rare Chinese: ้ฝ‰ ๅ ƒ ้ฝ‰ๅ ƒ""", """Je vais aussi รฉcrire en franรงais pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaฤ‹, ๊ผ""", ] lowerCAmelCase_ :Union[str, Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def __lowerCAmelCase ( self ) -> Any: for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: lowerCAmelCase_ :Tuple = tokenizer([test_inputs] , return_tensors="""tf""" ) lowerCAmelCase_ :List[Any] = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors lowerCAmelCase_ :Union[str, Any] = python_outputs[key].numpy() lowerCAmelCase_ :Tuple = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(__A , tf.intaa ) == tf_outputs_values ) ) @slow def __lowerCAmelCase ( self ) -> str: for tf_tokenizer in self.tf_tokenizers: lowerCAmelCase_ :str = tf.function(__A ) for test_inputs in self.test_sentences: lowerCAmelCase_ :List[str] = tf.constant(__A ) lowerCAmelCase_ :str = compiled_tokenizer(__A ) lowerCAmelCase_ :Optional[Any] = tf_tokenizer(__A ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def __lowerCAmelCase ( self ) -> Optional[int]: for tf_tokenizer in self.tf_tokenizers: lowerCAmelCase_ :Optional[Any] = ModelToSave(tokenizer=__A ) lowerCAmelCase_ :Optional[int] = tf.convert_to_tensor([self.test_sentences[0]] ) lowerCAmelCase_ :Dict = model.serving(__A ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: lowerCAmelCase_ :Optional[int] = Path(__A ) / """saved.model""" tf.saved_model.save(__A , __A , signatures={"""serving_default""": model.serving} ) lowerCAmelCase_ :Dict = tf.saved_model.load(__A ) lowerCAmelCase_ :int = loaded_model.signatures["""serving_default"""](__A )["""output_0"""] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def __lowerCAmelCase ( self ) -> Dict: for tf_tokenizer in self.tf_tokenizers: lowerCAmelCase_ :Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] ) lowerCAmelCase_ :Optional[Any] = tf_tokenizer(__A ) # Build model with some sample inputs lowerCAmelCase_ :Any = tf_tokenizer.get_config() lowerCAmelCase_ :str = TFGPTaTokenizer.from_config(__A ) lowerCAmelCase_ :Optional[Any] = model_from_config(__A ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def __lowerCAmelCase ( self ) -> Any: for tf_tokenizer in self.tf_tokenizers: # for the test to run lowerCAmelCase_ :Dict = 12_3123 for max_length in [3, 5, 1024]: lowerCAmelCase_ :List[Any] = tf.convert_to_tensor([self.test_sentences[0]] ) lowerCAmelCase_ :Tuple = tf_tokenizer(__A , max_length=__A ) lowerCAmelCase_ :Optional[int] = out["""input_ids"""].numpy().shape[1] assert out_length == max_length
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __UpperCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['DeiTFeatureExtractor'] __UpperCAmelCase = ['DeiTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DeiTForImageClassification', 'DeiTForImageClassificationWithTeacher', 'DeiTForMaskedImageModeling', 'DeiTModel', 'DeiTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDeiTForImageClassification', 'TFDeiTForImageClassificationWithTeacher', 'TFDeiTForMaskedImageModeling', 'TFDeiTModel', 'TFDeiTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
1
1
"""simple docstring""" from __future__ import annotations from collections import deque class _SCREAMING_SNAKE_CASE : def __init__( self , __A ) -> Any: lowerCAmelCase_ :list[dict] = [] self.adlist.append( {"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} ) for keyword in keywords: self.add_keyword(__A ) self.set_fail_transitions() def __lowerCAmelCase ( self , __A , __A ) -> int | None: for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def __lowerCAmelCase ( self , __A ) -> None: lowerCAmelCase_ :int = 0 for character in keyword: lowerCAmelCase_ :Optional[int] = self.find_next_state(__A , __A ) if next_state is None: self.adlist.append( { """value""": character, """next_states""": [], """fail_state""": 0, """output""": [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) lowerCAmelCase_ :int = len(self.adlist ) - 1 else: lowerCAmelCase_ :str = next_state self.adlist[current_state]["output"].append(__A ) def __lowerCAmelCase ( self ) -> None: lowerCAmelCase_ :deque = deque() for node in self.adlist[0]["next_states"]: q.append(__A ) lowerCAmelCase_ :Optional[int] = 0 while q: lowerCAmelCase_ :Union[str, Any] = q.popleft() for child in self.adlist[r]["next_states"]: q.append(__A ) lowerCAmelCase_ :List[Any] = self.adlist[r]["""fail_state"""] while ( self.find_next_state(__A , self.adlist[child]["""value"""] ) is None and state != 0 ): lowerCAmelCase_ :Tuple = self.adlist[state]["""fail_state"""] lowerCAmelCase_ :Optional[Any] = self.find_next_state( __A , self.adlist[child]["""value"""] ) if self.adlist[child]["fail_state"] is None: lowerCAmelCase_ :List[Any] = 0 lowerCAmelCase_ :Any = ( self.adlist[child]["""output"""] + self.adlist[self.adlist[child]["""fail_state"""]]["""output"""] ) def __lowerCAmelCase ( self , __A ) -> dict[str, list[int]]: lowerCAmelCase_ :dict = {} # returns a dict with keywords and list of its occurrences lowerCAmelCase_ :Union[str, Any] = 0 for i in range(len(__A ) ): while ( self.find_next_state(__A , string[i] ) is None and current_state != 0 ): lowerCAmelCase_ :str = self.adlist[current_state]["""fail_state"""] lowerCAmelCase_ :List[str] = self.find_next_state(__A , string[i] ) if next_state is None: lowerCAmelCase_ :Optional[int] = 0 else: lowerCAmelCase_ :Optional[Any] = next_state for key in self.adlist[current_state]["output"]: if key not in result: lowerCAmelCase_ :int = [] result[key].append(i - len(__A ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { 'configuration_squeezebert': [ 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SqueezeBertConfig', 'SqueezeBertOnnxConfig', ], 'tokenization_squeezebert': ['SqueezeBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['SqueezeBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertModule', 'SqueezeBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
1
1
"""simple docstring""" def _snake_case ( lowercase__ : list ) -> bool: '''simple docstring''' if not isinstance(lowercase__ , lowercase__ ): raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" ) if len(lowercase__ ) == 0: raise ValueError("""Input list must be a non empty list""" ) if len(lowercase__ ) == 1: return True lowerCAmelCase_ :List[Any] = series[1] - series[0] for index in range(len(lowercase__ ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def _snake_case ( lowercase__ : list ) -> float: '''simple docstring''' if not isinstance(lowercase__ , lowercase__ ): raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" ) if len(lowercase__ ) == 0: raise ValueError("""Input list must be a non empty list""" ) lowerCAmelCase_ :Dict = 0 for val in series: answer += val return answer / len(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
1
"""simple docstring""" __UpperCAmelCase = 2_56 # Modulus to hash a string __UpperCAmelCase = 1_00_00_03 def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool: '''simple docstring''' lowerCAmelCase_ :Tuple = len(lowercase__ ) lowerCAmelCase_ :List[str] = len(lowercase__ ) if p_len > t_len: return False lowerCAmelCase_ :List[str] = 0 lowerCAmelCase_ :Optional[int] = 0 lowerCAmelCase_ :Any = 1 # Calculating the hash of pattern and substring of text for i in range(lowercase__ ): lowerCAmelCase_ :int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus lowerCAmelCase_ :Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue lowerCAmelCase_ :Optional[Any] = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash lowerCAmelCase_ :Any = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def _snake_case ( ) -> None: '''simple docstring''' lowerCAmelCase_ :int = """abc1abc12""" lowerCAmelCase_ :Dict = """alskfjaldsabc1abc1abc12k23adsfabcabc""" lowerCAmelCase_ :int = """alskfjaldsk23adsfabcabc""" assert rabin_karp(lowercase__ , lowercase__ ) and not rabin_karp(lowercase__ , lowercase__ ) # Test 2) lowerCAmelCase_ :Dict = """ABABX""" lowerCAmelCase_ :int = """ABABZABABYABABX""" assert rabin_karp(lowercase__ , lowercase__ ) # Test 3) lowerCAmelCase_ :Union[str, Any] = """AAAB""" lowerCAmelCase_ :List[str] = """ABAAAAAB""" assert rabin_karp(lowercase__ , lowercase__ ) # Test 4) lowerCAmelCase_ :Dict = """abcdabcy""" lowerCAmelCase_ :Union[str, Any] = """abcxabcdabxabcdabcdabcy""" assert rabin_karp(lowercase__ , lowercase__ ) # Test 5) lowerCAmelCase_ :Optional[int] = """Lรผ""" lowerCAmelCase_ :Optional[int] = """Lรผsai""" assert rabin_karp(lowercase__ , lowercase__ ) lowerCAmelCase_ :Optional[int] = """Lue""" assert not rabin_karp(lowercase__ , lowercase__ ) print("""Success.""" ) if __name__ == "__main__": test_rabin_karp()
1
1
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class _SCREAMING_SNAKE_CASE : def __init__( self , __A , __A=12 , __A=7 , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=32 , __A=2 , __A=4 , __A=37 , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=0 , __A=None , ) -> Tuple: lowerCAmelCase_ :Any = parent lowerCAmelCase_ :Any = batch_size lowerCAmelCase_ :str = seq_length lowerCAmelCase_ :Any = is_training lowerCAmelCase_ :List[str] = use_input_mask lowerCAmelCase_ :str = use_labels lowerCAmelCase_ :str = vocab_size lowerCAmelCase_ :List[Any] = hidden_size lowerCAmelCase_ :Any = projection_dim lowerCAmelCase_ :Optional[Any] = num_hidden_layers lowerCAmelCase_ :Optional[int] = num_attention_heads lowerCAmelCase_ :Optional[int] = intermediate_size lowerCAmelCase_ :Tuple = dropout lowerCAmelCase_ :List[Any] = attention_dropout lowerCAmelCase_ :List[Any] = max_position_embeddings lowerCAmelCase_ :Union[str, Any] = initializer_range lowerCAmelCase_ :Optional[Any] = scope lowerCAmelCase_ :List[str] = bos_token_id def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ :Optional[Any] = None if self.use_input_mask: lowerCAmelCase_ :Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: lowerCAmelCase_ :Optional[int] = input_mask.numpy() lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = input_mask.shape lowerCAmelCase_ :List[str] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__A ): lowerCAmelCase_ :Dict = 1 lowerCAmelCase_ :Dict = 0 lowerCAmelCase_ :Optional[Any] = self.get_config() return config, input_ids, tf.convert_to_tensor(__A ) def __lowerCAmelCase ( self ) -> Tuple: return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def __lowerCAmelCase ( self , __A , __A , __A ) -> Any: lowerCAmelCase_ :str = TFBlipTextModel(config=__A ) lowerCAmelCase_ :str = model(__A , attention_mask=__A , training=__A ) lowerCAmelCase_ :int = model(__A , training=__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :List[Any] = self.prepare_config_and_inputs() lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :int = config_and_inputs lowerCAmelCase_ :List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :Any = (TFBlipTextModel,) if is_tf_available() else () UpperCAmelCase_ :Any = False UpperCAmelCase_ :List[Any] = False UpperCAmelCase_ :Optional[Any] = False def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :int = BlipTextModelTester(self ) lowerCAmelCase_ :Any = ConfigTester(self , config_class=__A , hidden_size=37 ) def __lowerCAmelCase ( self ) -> str: self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __lowerCAmelCase ( self ) -> int: pass def __lowerCAmelCase ( self ) -> Optional[int]: pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def __lowerCAmelCase ( self ) -> Any: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def __lowerCAmelCase ( self ) -> Optional[int]: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def __lowerCAmelCase ( self ) -> Any: pass @slow def __lowerCAmelCase ( self ) -> List[str]: for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ :Dict = TFBlipTextModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def __lowerCAmelCase ( self , __A=True ) -> Tuple: super().test_pt_tf_model_equivalence(allow_missing_keys=__A )
1
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __UpperCAmelCase = 16 __UpperCAmelCase = 32 def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 ) -> str: '''simple docstring''' lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowercase__ : int ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase_ :Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCAmelCase_ :Optional[Any] = datasets.map( lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase_ :str = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowercase__ : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCAmelCase_ :int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCAmelCase_ :List[Any] = 1_6 elif accelerator.mixed_precision != "no": lowerCAmelCase_ :List[str] = 8 else: lowerCAmelCase_ :Optional[int] = None return tokenizer.pad( lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , ) # Instantiate dataloaders. lowerCAmelCase_ :Optional[Any] = DataLoader( tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) lowerCAmelCase_ :List[Any] = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __UpperCAmelCase = mocked_dataloaders # noqa: F811 def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[int] ) -> Optional[Any]: '''simple docstring''' if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1": lowerCAmelCase_ :Optional[Any] = 2 # New Code # lowerCAmelCase_ :List[str] = int(args.gradient_accumulation_steps ) lowerCAmelCase_ :int = int(args.local_sgd_steps ) # Initialize accelerator lowerCAmelCase_ :str = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase_ :int = config["""lr"""] lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] ) lowerCAmelCase_ :int = int(config["""seed"""] ) lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] ) lowerCAmelCase_ :Union[str, Any] = evaluate.load("""glue""" , """mrpc""" ) set_seed(lowercase__ ) lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = get_dataloaders(lowercase__ , lowercase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase_ :Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCAmelCase_ :Union[str, Any] = model.to(accelerator.device ) # Instantiate optimizer lowerCAmelCase_ :Optional[Any] = AdamW(params=model.parameters() , lr=lowercase__ ) # Instantiate scheduler lowerCAmelCase_ :Union[str, Any] = get_linear_schedule_with_warmup( optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.prepare( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Now we train the model for epoch in range(lowercase__ ): model.train() with LocalSGD( accelerator=lowercase__ , model=lowercase__ , local_sgd_steps=lowercase__ , enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowercase__ ): lowerCAmelCase_ :str = model(**lowercase__ ) lowerCAmelCase_ :Optional[int] = output.loss accelerator.backward(lowercase__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase_ :Optional[int] = model(**lowercase__ ) lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 ) lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowercase__ , references=lowercase__ , ) lowerCAmelCase_ :Any = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , lowercase__ ) def _snake_case ( ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :str = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=lowercase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument( """--local_sgd_steps""" , type=lowercase__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) lowerCAmelCase_ :Optional[Any] = parser.parse_args() lowerCAmelCase_ :Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6} training_function(lowercase__ , lowercase__ ) if __name__ == "__main__": main()
1
1
"""simple docstring""" import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'spiece.model'} __UpperCAmelCase = { 'vocab_file': { 'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model', } } __UpperCAmelCase = { 'AI-Sweden/gpt-sw3-126m': 20_48, 'AI-Sweden/gpt-sw3-350m': 20_48, 'AI-Sweden/gpt-sw3-1.6b': 20_48, 'AI-Sweden/gpt-sw3-6.7b': 20_48, 'AI-Sweden/gpt-sw3-20b': 20_48, } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = VOCAB_FILES_NAMES UpperCAmelCase_ :List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ :str = ["input_ids", "attention_mask"] def __init__( self , __A , __A=False , __A=False , __A=False , __A=None , __A=None , __A=None , __A=None , __A = None , **__A , ) -> None: lowerCAmelCase_ :Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs lowerCAmelCase_ :Any = kwargs.get("""name_or_path""" ) if name_or_path is None: logger.warning( """name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,""" """ you are testing the model, this can safely be ignored""" ) lowerCAmelCase_ :str = """None""" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing lowerCAmelCase_ :Dict = """<|endoftext|>""" if eos_token is None else eos_token lowerCAmelCase_ :Any = """<unk>""" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: lowerCAmelCase_ :Dict = unk_token if pad_token is None else pad_token lowerCAmelCase_ :List[Any] = eos_token if bos_token is None else bos_token else: lowerCAmelCase_ :Any = """<pad>""" if pad_token is None else pad_token lowerCAmelCase_ :Any = """<s>""" if bos_token is None else bos_token super().__init__( do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , pad_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , ) lowerCAmelCase_ :List[Any] = do_lower_case lowerCAmelCase_ :Dict = remove_space lowerCAmelCase_ :Optional[int] = keep_accents lowerCAmelCase_ :int = vocab_file lowerCAmelCase_ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__A ) # Used for whitespace normalization in input texts # fmt : off lowerCAmelCase_ :List[str] = {""" """, """โ€‰""", """โ€Š""", """โ€ฏ""", """โ€…""", """ใ€€""", """โ€‚""", """ """, """โ€ˆ""", """โ€ƒ""", """๏ฟผ""", """ย„"""} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing lowerCAmelCase_ :int = re.compile( f"""[{"".join(map(__A , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" ) def __getstate__( self ) -> Optional[Any]: lowerCAmelCase_ :Dict = self.__dict__.copy() lowerCAmelCase_ :str = None return state def __setstate__( self , __A ) -> List[Any]: lowerCAmelCase_ :int = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowerCAmelCase_ :Optional[Any] = {} lowerCAmelCase_ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def __lowerCAmelCase ( self ) -> int: return len(self.sp_model ) def __lowerCAmelCase ( self , __A ) -> str: lowerCAmelCase_ :Any = self.non_printing_characters_re.sub("""""" , __A ) # Normalize whitespaces lowerCAmelCase_ :str = """""".join([char if char not in self.whitespaces else """ """ for char in text] ) # NFC Unicode normalization lowerCAmelCase_ :Union[str, Any] = unicodedata.normalize("""NFC""" , __A ) return text def __lowerCAmelCase ( self , __A , **__A ) -> List[str]: lowerCAmelCase_ :Dict = self.preprocess_text(__A ) return self.sp_model.encode(__A , out_type=__A ) def __lowerCAmelCase ( self , __A ) -> int: return self.sp_model.PieceToId(__A ) def __lowerCAmelCase ( self , __A ) -> str: return self.sp_model.IdToPiece(__A ) @staticmethod def __lowerCAmelCase ( __A ) -> str: return out_string def __lowerCAmelCase ( self , __A ) -> str: lowerCAmelCase_ :int = [] lowerCAmelCase_ :int = """""" lowerCAmelCase_ :List[Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__A ) + token lowerCAmelCase_ :Tuple = True lowerCAmelCase_ :Optional[int] = [] else: current_sub_tokens.append(__A ) lowerCAmelCase_ :str = False out_string += self.sp_model.decode(__A ) return out_string def __lowerCAmelCase ( self ) -> Dict[str, int]: lowerCAmelCase_ :int = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]: if not os.path.isdir(__A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase_ :Optional[int] = os.path.join( __A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __A ) elif not os.path.isfile(self.vocab_file ): with open(__A , """wb""" ) as fi: lowerCAmelCase_ :str = self.sp_model.serialized_model_proto() fi.write(__A ) return (out_vocab_file,) def __lowerCAmelCase ( self , __A , __A = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(__A , __A ): lowerCAmelCase_ :Dict = self.preprocess_text(__A ) lowerCAmelCase_ :int = self.sp_model.encode(__A ) else: lowerCAmelCase_ :List[str] = [self.preprocess_text(__A ) for t in text] lowerCAmelCase_ :List[Any] = self.sp_model.encode(__A ) if return_tensors is True or return_tensors == "pt": lowerCAmelCase_ :Dict = torch.tensor(__A ) return token_ids def __lowerCAmelCase ( self , __A ) -> str: return self.sp_model.decode(__A ) def __lowerCAmelCase ( self , __A ) -> List[int]: lowerCAmelCase_ :str = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()] lowerCAmelCase_ :Optional[Any] = ( f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(__A ) + f"""{self.bos_token}Bot:""" ) return self.encode(text=__A )
1
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __UpperCAmelCase = 16 __UpperCAmelCase = 32 def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 , lowercase__ : str = "bert-base-cased" ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained(lowercase__ ) lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowercase__ : List[str] ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase_ :str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCAmelCase_ :str = datasets.map( lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase_ :List[str] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowercase__ : Union[str, Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" ) return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. lowerCAmelCase_ :Optional[int] = DataLoader( tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) lowerCAmelCase_ :Any = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) return train_dataloader, eval_dataloader def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : int ) -> List[str]: '''simple docstring''' model.eval() lowerCAmelCase_ :Dict = 0 for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase_ :Optional[int] = model(**lowercase__ ) lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(lowercase__ ) - 1: lowerCAmelCase_ :Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowerCAmelCase_ :Any = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=lowercase__ , references=lowercase__ , ) lowerCAmelCase_ :Tuple = metric.compute() return eval_metric["accuracy"] def _snake_case ( lowercase__ : str , lowercase__ : List[str] ) -> Any: '''simple docstring''' lowerCAmelCase_ :Optional[int] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase_ :int = config["""lr"""] lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] ) lowerCAmelCase_ :Optional[int] = int(config["""seed"""] ) lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] ) lowerCAmelCase_ :Optional[Any] = args.model_name_or_path set_seed(lowercase__ ) lowerCAmelCase_ , lowerCAmelCase_ :Dict = get_dataloaders(lowercase__ , lowercase__ , lowercase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase_ :str = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ ) # Instantiate optimizer lowerCAmelCase_ :List[str] = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowerCAmelCase_ :str = optimizer_cls(params=model.parameters() , lr=lowercase__ ) if accelerator.state.deepspeed_plugin is not None: lowerCAmelCase_ :Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: lowerCAmelCase_ :Any = 1 lowerCAmelCase_ :str = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowerCAmelCase_ :List[str] = get_linear_schedule_with_warmup( optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , ) else: lowerCAmelCase_ :int = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.prepare( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # We need to keep track of how many total steps we have iterated over lowerCAmelCase_ :List[str] = 0 # We also need to keep track of the stating epoch so files are named properly lowerCAmelCase_ :List[Any] = 0 lowerCAmelCase_ :str = evaluate.load("""glue""" , """mrpc""" ) lowerCAmelCase_ :Optional[Any] = num_epochs if args.partial_train_epoch is not None: lowerCAmelCase_ :Dict = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) lowerCAmelCase_ :Optional[Any] = args.resume_from_checkpoint.split("""epoch_""" )[1] lowerCAmelCase_ :int = """""" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break lowerCAmelCase_ :Union[str, Any] = int(lowercase__ ) + 1 lowerCAmelCase_ :Optional[int] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) accelerator.print("""resumed checkpoint performance:""" , lowercase__ ) accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] ) accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] ) with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f: lowerCAmelCase_ :List[str] = json.load(lowercase__ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model lowerCAmelCase_ :List[Any] = {} for epoch in range(lowercase__ , lowercase__ ): model.train() for step, batch in enumerate(lowercase__ ): lowerCAmelCase_ :Optional[int] = model(**lowercase__ ) lowerCAmelCase_ :Dict = outputs.loss lowerCAmelCase_ :int = loss / gradient_accumulation_steps accelerator.backward(lowercase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 lowerCAmelCase_ :List[str] = f"""epoch_{epoch}""" lowerCAmelCase_ :Any = os.path.join(args.output_dir , lowercase__ ) accelerator.save_state(lowercase__ ) lowerCAmelCase_ :List[Any] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) lowerCAmelCase_ :Union[str, Any] = accuracy lowerCAmelCase_ :Any = lr_scheduler.get_lr()[0] lowerCAmelCase_ :str = optimizer.param_groups[0]["""lr"""] lowerCAmelCase_ :List[Any] = epoch lowerCAmelCase_ :Tuple = overall_step accelerator.print(f"""epoch {epoch}:""" , lowercase__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) def _snake_case ( ) -> int: '''simple docstring''' lowerCAmelCase_ :List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , ) parser.add_argument( """--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--resume_from_checkpoint""" , type=lowercase__ , default=lowercase__ , help="""If the training should continue from a checkpoint folder.""" , ) parser.add_argument( """--partial_train_epoch""" , type=lowercase__ , default=lowercase__ , help="""If passed, the training will stop after this number of epochs.""" , ) parser.add_argument( """--num_epochs""" , type=lowercase__ , default=2 , help="""Number of train epochs.""" , ) lowerCAmelCase_ :Optional[int] = parser.parse_args() lowerCAmelCase_ :List[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6} training_function(lowercase__ , lowercase__ ) if __name__ == "__main__": main()
1
1
"""simple docstring""" from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
1
"""simple docstring""" import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class _SCREAMING_SNAKE_CASE : def __init__( self , __A ) -> Union[str, Any]: if isinstance(__A , __A ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden lowerCAmelCase_ :Tuple = deepcopy(__A ) elif os.path.exists(__A ): with io.open(__A , """r""" , encoding="""utf-8""" ) as f: lowerCAmelCase_ :str = json.load(__A ) else: try: lowerCAmelCase_ :Dict = baseaa.urlsafe_baadecode(__A ).decode("""utf-8""" ) lowerCAmelCase_ :int = json.loads(__A ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" ) lowerCAmelCase_ :Optional[Any] = config self.set_stage_and_offload() def __lowerCAmelCase ( self ) -> Tuple: # zero stage - this is done as early as possible, before model is created, to allow # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object # during ``zero.Init()`` which needs to know the dtype, and some other hparams. lowerCAmelCase_ :Tuple = self.get_value("""zero_optimization.stage""" , -1 ) # offload lowerCAmelCase_ :Dict = False if self.is_zeroa() or self.is_zeroa(): lowerCAmelCase_ :Optional[int] = set(["""cpu""", """nvme"""] ) lowerCAmelCase_ :Union[str, Any] = set( [ self.get_value("""zero_optimization.offload_optimizer.device""" ), self.get_value("""zero_optimization.offload_param.device""" ), ] ) if len(offload_devices & offload_devices_valid ) > 0: lowerCAmelCase_ :Optional[int] = True def __lowerCAmelCase ( self , __A ) -> Optional[Any]: lowerCAmelCase_ :str = self.config # find the config node of interest if it exists lowerCAmelCase_ :Tuple = ds_key_long.split(""".""" ) lowerCAmelCase_ :List[str] = nodes.pop() for node in nodes: lowerCAmelCase_ :Tuple = config.get(__A ) if config is None: return None, ds_key return config, ds_key def __lowerCAmelCase ( self , __A , __A=None ) -> Optional[Any]: lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.find_config_node(__A ) if config is None: return default return config.get(__A , __A ) def __lowerCAmelCase ( self , __A , __A=False ) -> Optional[Any]: lowerCAmelCase_ :Tuple = self.config # find the config node of interest if it exists lowerCAmelCase_ :Union[str, Any] = ds_key_long.split(""".""" ) for node in nodes: lowerCAmelCase_ :int = config lowerCAmelCase_ :Any = config.get(__A ) if config is None: if must_exist: raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" ) else: return # if found remove it if parent_config is not None: parent_config.pop(__A ) def __lowerCAmelCase ( self , __A ) -> Union[str, Any]: lowerCAmelCase_ :Optional[int] = self.get_value(__A ) return False if value is None else bool(__A ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: lowerCAmelCase_ :List[str] = self.get_value(__A ) return False if value is None else not bool(__A ) def __lowerCAmelCase ( self ) -> str: return self._stage == 2 def __lowerCAmelCase ( self ) -> Union[str, Any]: return self._stage == 3 def __lowerCAmelCase ( self ) -> Union[str, Any]: return self._offload class _SCREAMING_SNAKE_CASE : def __init__( self , __A ) -> Optional[int]: lowerCAmelCase_ :Dict = engine def __lowerCAmelCase ( self , __A , **__A ) -> str: # runs backpropagation and handles mixed precision self.engine.backward(__A , **__A ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A ) -> List[str]: super().__init__(__A , device_placement=__A , scaler=__A ) lowerCAmelCase_ :List[str] = hasattr(self.optimizer , """overflow""" ) def __lowerCAmelCase ( self , __A=None ) -> Optional[Any]: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def __lowerCAmelCase ( self ) -> List[Any]: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def __lowerCAmelCase ( self ) -> int: if self.__has_overflow__: return self.optimizer.overflow return False class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A , __A ) -> Optional[int]: super().__init__(__A , __A ) def __lowerCAmelCase ( self ) -> Any: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class _SCREAMING_SNAKE_CASE : def __init__( self , __A , __A=0.0_0_1 , __A=0 , **__A ) -> List[Any]: lowerCAmelCase_ :str = params lowerCAmelCase_ :Any = lr lowerCAmelCase_ :List[Any] = weight_decay lowerCAmelCase_ :Any = kwargs class _SCREAMING_SNAKE_CASE : def __init__( self , __A , __A=None , __A=0 , **__A ) -> List[str]: lowerCAmelCase_ :Optional[int] = optimizer lowerCAmelCase_ :int = total_num_steps lowerCAmelCase_ :List[Any] = warmup_num_steps lowerCAmelCase_ :int = kwargs
1
1
"""simple docstring""" import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process __UpperCAmelCase = logging.getLogger(__name__) __UpperCAmelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) __UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _SCREAMING_SNAKE_CASE : UpperCAmelCase_ :Optional[str] = field( default=A__ , metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) } , ) UpperCAmelCase_ :Optional[str] = field( default=A__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(A__ )} , ) UpperCAmelCase_ :Optional[str] = field( default=A__ , metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) } , ) UpperCAmelCase_ :Optional[str] = field( default=A__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) UpperCAmelCase_ :Optional[str] = field( default=A__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) UpperCAmelCase_ :Optional[str] = field( default=A__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) UpperCAmelCase_ :bool = field( default=A__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) UpperCAmelCase_ :str = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) UpperCAmelCase_ :bool = field( default=A__ , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) def __lowerCAmelCase ( self ) -> Tuple: if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( """--config_overrides can't be used in combination with --config_name or --model_name_or_path""" ) @dataclass class _SCREAMING_SNAKE_CASE : UpperCAmelCase_ :Optional[str] = field( default=A__ , metadata={"help": "The name of the dataset to use (via the datasets library)."} ) UpperCAmelCase_ :Optional[str] = field( default=A__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) UpperCAmelCase_ :Optional[str] = field(default=A__ , metadata={"help": "The input training data file (a text file)."} ) UpperCAmelCase_ :Optional[str] = field( default=A__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) UpperCAmelCase_ :Optional[str] = field( default=A__ , metadata={"help": "An optional input train ref data file for whole word masking in Chinese."} , ) UpperCAmelCase_ :Optional[str] = field( default=A__ , metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."} , ) UpperCAmelCase_ :bool = field( default=A__ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) UpperCAmelCase_ :Optional[int] = field( default=5 , metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" } , ) UpperCAmelCase_ :Optional[int] = field( default=A__ , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated. Default to the max input length of the model." ) } , ) UpperCAmelCase_ :Optional[int] = field( default=A__ , metadata={"help": "The number of processes to use for the preprocessing."} , ) UpperCAmelCase_ :float = field( default=0.1_5 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) UpperCAmelCase_ :bool = field( default=A__ , metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) } , ) def __lowerCAmelCase ( self ) -> Dict: if self.train_file is not None: lowerCAmelCase_ :List[Any] = self.train_file.split(""".""" )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: lowerCAmelCase_ :List[Any] = self.validation_file.split(""".""" )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def _snake_case ( lowercase__ : Tuple , lowercase__ : Any ) -> str: '''simple docstring''' with open(lowercase__ , """r""" , encoding="""utf-8""" ) as f: lowerCAmelCase_ :Optional[int] = [json.loads(lowercase__ ) for line in f.read().splitlines() if (len(lowercase__ ) > 0 and not line.isspace())] assert len(lowercase__ ) == len(lowercase__ ) lowerCAmelCase_ :Union[str, Any] = {c: dataset[c] for c in dataset.column_names} lowerCAmelCase_ :Any = refs return Dataset.from_dict(lowercase__ ) def _snake_case ( ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :int = parser.parse_args_into_dataclasses() # Detecting last checkpoint. lowerCAmelCase_ :List[str] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCAmelCase_ :Tuple = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , lowercase__ ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowerCAmelCase_ :Tuple = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): lowerCAmelCase_ :Optional[int] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f"""train[:{data_args.validation_split_percentage}%]""" , ) lowerCAmelCase_ :str = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f"""train[{data_args.validation_split_percentage}%:]""" , ) else: lowerCAmelCase_ :Dict = {} if data_args.train_file is not None: lowerCAmelCase_ :str = data_args.train_file if data_args.validation_file is not None: lowerCAmelCase_ :Union[str, Any] = data_args.validation_file lowerCAmelCase_ :Optional[Any] = data_args.train_file.split(""".""" )[-1] if extension == "txt": lowerCAmelCase_ :Any = """text""" lowerCAmelCase_ :str = load_dataset(lowercase__ , data_files=lowercase__ ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCAmelCase_ :List[Any] = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(model_args.config_name , **lowercase__ ) elif model_args.model_name_or_path: lowerCAmelCase_ :Any = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: lowerCAmelCase_ :Optional[int] = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(f"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(f"""New config: {config}""" ) lowerCAmelCase_ :List[Any] = { """cache_dir""": model_args.cache_dir, """use_fast""": model_args.use_fast_tokenizer, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: lowerCAmelCase_ :int = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowercase__ ) elif model_args.model_name_or_path: lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported by this script.""" """You can do it from another script, save it, and load it from here, using --tokenizer_name.""" ) if model_args.model_name_or_path: lowerCAmelCase_ :List[Any] = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) lowerCAmelCase_ :Dict = AutoModelForMaskedLM.from_config(lowercase__ ) model.resize_token_embeddings(len(lowercase__ ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: lowerCAmelCase_ :Optional[int] = datasets["""train"""].column_names else: lowerCAmelCase_ :List[str] = datasets["""validation"""].column_names lowerCAmelCase_ :int = """text""" if """text""" in column_names else column_names[0] lowerCAmelCase_ :Union[str, Any] = """max_length""" if data_args.pad_to_max_length else False def tokenize_function(lowercase__ : Optional[Any] ): # Remove empty lines lowerCAmelCase_ :List[Any] = [line for line in examples["""text"""] if len(lowercase__ ) > 0 and not line.isspace()] return tokenizer(examples["""text"""] , padding=lowercase__ , truncation=lowercase__ , max_length=data_args.max_seq_length ) lowerCAmelCase_ :Tuple = datasets.map( lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: lowerCAmelCase_ :List[str] = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: lowerCAmelCase_ :str = add_chinese_references( tokenized_datasets["""validation"""] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer lowerCAmelCase_ :List[str] = data_args.train_ref_file or data_args.validation_ref_file if has_ref: lowerCAmelCase_ :Optional[Any] = False # Data collator # This one will take care of randomly masking the tokens. lowerCAmelCase_ :List[str] = DataCollatorForWholeWordMask(tokenizer=lowercase__ , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer lowerCAmelCase_ :str = Trainer( model=lowercase__ , args=lowercase__ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , ) # Training if training_args.do_train: if last_checkpoint is not None: lowerCAmelCase_ :str = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): lowerCAmelCase_ :List[Any] = model_args.model_name_or_path else: lowerCAmelCase_ :Dict = None lowerCAmelCase_ :int = trainer.train(resume_from_checkpoint=lowercase__ ) trainer.save_model() # Saves the tokenizer too for easy upload lowerCAmelCase_ :List[Any] = os.path.join(training_args.output_dir , """train_results.txt""" ) if trainer.is_world_process_zero(): with open(lowercase__ , """w""" ) as writer: logger.info("""***** Train results *****""" ) for key, value in sorted(train_result.metrics.items() ): logger.info(f""" {key} = {value}""" ) writer.write(f"""{key} = {value}\n""" ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # Evaluation lowerCAmelCase_ :Union[str, Any] = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) lowerCAmelCase_ :Optional[Any] = trainer.evaluate() lowerCAmelCase_ :int = math.exp(eval_output["""eval_loss"""] ) lowerCAmelCase_ :str = perplexity lowerCAmelCase_ :Any = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" ) if trainer.is_world_process_zero(): with open(lowercase__ , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in sorted(results.items() ): logger.info(f""" {key} = {value}""" ) writer.write(f"""{key} = {value}\n""" ) return results def _snake_case ( lowercase__ : List[Any] ) -> Optional[Any]: '''simple docstring''' main() if __name__ == "__main__": main()
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Dict = ( "This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image." "It takes two arguments named `image` which should be the original image, and `label` which should be a text " "describing the elements what should be identified in the segmentation mask. The tool returns the mask." ) UpperCAmelCase_ :List[str] = "CIDAS/clipseg-rd64-refined" UpperCAmelCase_ :List[Any] = "image_segmenter" UpperCAmelCase_ :Optional[int] = CLIPSegForImageSegmentation UpperCAmelCase_ :Tuple = ["image", "text"] UpperCAmelCase_ :Dict = ["image"] def __init__( self , *__A , **__A ) -> Optional[Any]: requires_backends(self , ["""vision"""] ) super().__init__(*__A , **__A ) def __lowerCAmelCase ( self , __A , __A ) -> Any: return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" ) def __lowerCAmelCase ( self , __A ) -> Tuple: with torch.no_grad(): lowerCAmelCase_ :Dict = self.model(**__A ).logits return logits def __lowerCAmelCase ( self , __A ) -> Tuple: lowerCAmelCase_ :Optional[int] = outputs.cpu().detach().numpy() lowerCAmelCase_ :List[str] = 0 lowerCAmelCase_ :str = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
1
1
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Union[str, Any] = (UniPCMultistepScheduler,) UpperCAmelCase_ :Optional[int] = (("num_inference_steps", 25),) def __lowerCAmelCase ( self , **__A ) -> List[str]: lowerCAmelCase_ :int = { """num_train_timesteps""": 1000, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", """solver_order""": 2, """solver_type""": """bh2""", } config.update(**__A ) return config def __lowerCAmelCase ( self , __A=0 , **__A ) -> List[Any]: lowerCAmelCase_ :str = dict(self.forward_default_kwargs ) lowerCAmelCase_ :Any = kwargs.pop("""num_inference_steps""" , __A ) lowerCAmelCase_ :List[str] = self.dummy_sample lowerCAmelCase_ :int = 0.1 * sample lowerCAmelCase_ :Optional[int] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: lowerCAmelCase_ :int = self.get_scheduler_config(**__A ) lowerCAmelCase_ :List[str] = scheduler_class(**__A ) scheduler.set_timesteps(__A ) # copy over dummy past residuals lowerCAmelCase_ :List[Any] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__A ) lowerCAmelCase_ :Optional[int] = scheduler_class.from_pretrained(__A ) new_scheduler.set_timesteps(__A ) # copy over dummy past residuals lowerCAmelCase_ :int = dummy_past_residuals[: new_scheduler.config.solver_order] lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = sample, sample for t in range(__A , time_step + scheduler.config.solver_order + 1 ): lowerCAmelCase_ :List[Any] = scheduler.step(__A , __A , __A , **__A ).prev_sample lowerCAmelCase_ :str = new_scheduler.step(__A , __A , __A , **__A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , __A=0 , **__A ) -> Union[str, Any]: lowerCAmelCase_ :Optional[Any] = dict(self.forward_default_kwargs ) lowerCAmelCase_ :Optional[Any] = kwargs.pop("""num_inference_steps""" , __A ) lowerCAmelCase_ :str = self.dummy_sample lowerCAmelCase_ :Union[str, Any] = 0.1 * sample lowerCAmelCase_ :Optional[int] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: lowerCAmelCase_ :Tuple = self.get_scheduler_config() lowerCAmelCase_ :Any = scheduler_class(**__A ) scheduler.set_timesteps(__A ) # copy over dummy past residuals (must be after setting timesteps) lowerCAmelCase_ :Tuple = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__A ) lowerCAmelCase_ :str = scheduler_class.from_pretrained(__A ) # copy over dummy past residuals new_scheduler.set_timesteps(__A ) # copy over dummy past residual (must be after setting timesteps) lowerCAmelCase_ :Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order] lowerCAmelCase_ :Union[str, Any] = scheduler.step(__A , __A , __A , **__A ).prev_sample lowerCAmelCase_ :Optional[Any] = new_scheduler.step(__A , __A , __A , **__A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , __A=None , **__A ) -> List[Any]: if scheduler is None: lowerCAmelCase_ :List[str] = self.scheduler_classes[0] lowerCAmelCase_ :str = self.get_scheduler_config(**__A ) lowerCAmelCase_ :Optional[Any] = scheduler_class(**__A ) lowerCAmelCase_ :Optional[int] = self.scheduler_classes[0] lowerCAmelCase_ :List[Any] = self.get_scheduler_config(**__A ) lowerCAmelCase_ :List[Any] = scheduler_class(**__A ) lowerCAmelCase_ :Any = 10 lowerCAmelCase_ :List[str] = self.dummy_model() lowerCAmelCase_ :Optional[Any] = self.dummy_sample_deter scheduler.set_timesteps(__A ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase_ :List[Any] = model(__A , __A ) lowerCAmelCase_ :Any = scheduler.step(__A , __A , __A ).prev_sample return sample def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Union[str, Any] = dict(self.forward_default_kwargs ) lowerCAmelCase_ :List[str] = kwargs.pop("""num_inference_steps""" , __A ) for scheduler_class in self.scheduler_classes: lowerCAmelCase_ :Any = self.get_scheduler_config() lowerCAmelCase_ :str = scheduler_class(**__A ) lowerCAmelCase_ :Any = self.dummy_sample lowerCAmelCase_ :Union[str, Any] = 0.1 * sample if num_inference_steps is not None and hasattr(__A , """set_timesteps""" ): scheduler.set_timesteps(__A ) elif num_inference_steps is not None and not hasattr(__A , """set_timesteps""" ): lowerCAmelCase_ :List[Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowerCAmelCase_ :List[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] lowerCAmelCase_ :List[str] = dummy_past_residuals[: scheduler.config.solver_order] lowerCAmelCase_ :Any = scheduler.timesteps[5] lowerCAmelCase_ :List[str] = scheduler.timesteps[6] lowerCAmelCase_ :Any = scheduler.step(__A , __A , __A , **__A ).prev_sample lowerCAmelCase_ :int = scheduler.step(__A , __A , __A , **__A ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __lowerCAmelCase ( self ) -> str: # make sure that iterating over schedulers with same config names gives same results # for defaults lowerCAmelCase_ :List[str] = UniPCMultistepScheduler(**self.get_scheduler_config() ) lowerCAmelCase_ :Union[str, Any] = self.full_loop(scheduler=__A ) lowerCAmelCase_ :int = torch.mean(torch.abs(__A ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 lowerCAmelCase_ :Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config ) lowerCAmelCase_ :List[Any] = DEISMultistepScheduler.from_config(scheduler.config ) lowerCAmelCase_ :Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config ) lowerCAmelCase_ :List[Any] = UniPCMultistepScheduler.from_config(scheduler.config ) lowerCAmelCase_ :Union[str, Any] = self.full_loop(scheduler=__A ) lowerCAmelCase_ :List[Any] = torch.mean(torch.abs(__A ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 def __lowerCAmelCase ( self ) -> List[Any]: for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=__A ) def __lowerCAmelCase ( self ) -> Dict: self.check_over_configs(thresholding=__A ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__A , prediction_type=__A , sample_max_value=__A , solver_order=__A , solver_type=__A , ) def __lowerCAmelCase ( self ) -> int: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__A ) def __lowerCAmelCase ( self ) -> Tuple: for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__A , solver_type=__A , prediction_type=__A , ) lowerCAmelCase_ :Dict = self.full_loop( solver_order=__A , solver_type=__A , prediction_type=__A , ) assert not torch.isnan(__A ).any(), "Samples have nan numbers" def __lowerCAmelCase ( self ) -> Optional[int]: self.check_over_configs(lower_order_final=__A ) self.check_over_configs(lower_order_final=__A ) def __lowerCAmelCase ( self ) -> str: for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=__A , time_step=0 ) def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Tuple = self.full_loop() lowerCAmelCase_ :Any = torch.mean(torch.abs(__A ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :str = self.full_loop(prediction_type="""v_prediction""" ) lowerCAmelCase_ :str = torch.mean(torch.abs(__A ) ) assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3 def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :str = self.scheduler_classes[0] lowerCAmelCase_ :List[str] = self.get_scheduler_config(thresholding=__A , dynamic_thresholding_ratio=0 ) lowerCAmelCase_ :int = scheduler_class(**__A ) lowerCAmelCase_ :List[Any] = 10 lowerCAmelCase_ :Tuple = self.dummy_model() lowerCAmelCase_ :Dict = self.dummy_sample_deter.half() scheduler.set_timesteps(__A ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase_ :List[Any] = model(__A , __A ) lowerCAmelCase_ :Tuple = scheduler.step(__A , __A , __A ).prev_sample assert sample.dtype == torch.floataa def __lowerCAmelCase ( self , **__A ) -> List[str]: for scheduler_class in self.scheduler_classes: lowerCAmelCase_ :Union[str, Any] = self.get_scheduler_config(**__A ) lowerCAmelCase_ :int = scheduler_class(**__A ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
1
"""simple docstring""" def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int: '''simple docstring''' if index == number_of_items: return 0 lowerCAmelCase_ :Any = 0 lowerCAmelCase_ :str = 0 lowerCAmelCase_ :Dict = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 ) if weights[index] <= max_weight: lowerCAmelCase_ :str = values[index] + knapsack( lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 ) return max(lowercase__ , lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
1
1
"""simple docstring""" from ...processing_utils import ProcessorMixin class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Any = ["image_processor", "feature_extractor"] UpperCAmelCase_ :Any = "TvltImageProcessor" UpperCAmelCase_ :List[Any] = "TvltFeatureExtractor" def __init__( self , __A , __A ) -> Optional[Any]: super().__init__(image_processor=__A , feature_extractor=__A ) lowerCAmelCase_ :Dict = image_processor lowerCAmelCase_ :Union[str, Any] = feature_extractor def __call__( self , __A=None , __A=None , __A=None , __A=None , __A=False , __A=False , *__A , **__A , ) -> Optional[int]: if images is None and audio is None: raise ValueError("""You need to specify either an `images` or `audio` input to process.""" ) lowerCAmelCase_ :str = None if images is not None: lowerCAmelCase_ :Tuple = self.image_processor(__A , mask_pixel=__A , *__A , **__A ) if images_mixed is not None: lowerCAmelCase_ :Tuple = self.image_processor(__A , is_mixed=__A , *__A , **__A ) if audio is not None: lowerCAmelCase_ :Optional[Any] = self.feature_extractor( __A , *__A , sampling_rate=__A , mask_audio=__A , **__A ) lowerCAmelCase_ :Optional[Any] = {} if audio is not None: output_dict.update(__A ) if images is not None: output_dict.update(__A ) if images_mixed_dict is not None: output_dict.update(__A ) return output_dict @property def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Optional[int] = self.image_processor.model_input_names lowerCAmelCase_ :Dict = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
1
"""simple docstring""" from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def _snake_case ( lowercase__ : bool = True , *lowercase__ : Optional[int] , **lowercase__ : str ) -> Optional[Any]: '''simple docstring''' if not is_tqdm_available(): raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" ) lowerCAmelCase_ :Tuple = False if main_process_only: lowerCAmelCase_ :Dict = PartialState().local_process_index == 0 return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
1
1
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :List[str] = BioGptTokenizer UpperCAmelCase_ :int = False def __lowerCAmelCase ( self ) -> str: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase_ :str = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] lowerCAmelCase_ :Any = dict(zip(__A , range(len(__A ) ) ) ) lowerCAmelCase_ :str = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase_ :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(__A ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(__A ) ) def __lowerCAmelCase ( self , __A ) -> Union[str, Any]: lowerCAmelCase_ :int = """lower newer""" lowerCAmelCase_ :str = """lower newer""" return input_text, output_text def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ :Tuple = BioGptTokenizer(self.vocab_file , self.merges_file ) lowerCAmelCase_ :Dict = """lower""" lowerCAmelCase_ :List[str] = ["""low""", """er</w>"""] lowerCAmelCase_ :Any = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :int = tokens + ["""<unk>"""] lowerCAmelCase_ :Any = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) @slow def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Optional[int] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) lowerCAmelCase_ :Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=__A ) lowerCAmelCase_ :Any = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A ) lowerCAmelCase_ :Dict = tokenizer.build_inputs_with_special_tokens(__A ) lowerCAmelCase_ :Tuple = tokenizer.build_inputs_with_special_tokens(__A , __A ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
1
"""simple docstring""" import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 __UpperCAmelCase = get_tests_dir('fixtures/dummy-config.json') class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :int = 0 def __lowerCAmelCase ( self ) -> List[str]: self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""bert-base-uncased""" ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :int = AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :int = AutoConfig.for_model("""roberta""" ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Tuple: with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. lowerCAmelCase_ :int = os.path.join(__A , """fake-roberta""" ) os.makedirs(__A , exist_ok=__A ) with open(os.path.join(__A , """config.json""" ) , """w""" ) as f: f.write(json.dumps({} ) ) lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A ) self.assertEqual(type(__A ) , __A ) def __lowerCAmelCase ( self ) -> Optional[int]: try: AutoConfig.register("""custom""" , __A ) # Wrong model type will raise an error with self.assertRaises(__A ): AutoConfig.register("""model""" , __A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__A ): AutoConfig.register("""bert""" , __A ) # Now that the config is registered, it can be used as any other config with the auto-API lowerCAmelCase_ :Union[str, Any] = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__A ) lowerCAmelCase_ :Optional[int] = AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def __lowerCAmelCase ( self ) -> Tuple: with self.assertRaisesRegex( __A , """bert-base is not a local folder and is not a valid model identifier""" ): lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""bert-base""" ) def __lowerCAmelCase ( self ) -> Any: with self.assertRaisesRegex( __A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , revision="""aaaaaa""" ) def __lowerCAmelCase ( self ) -> int: with self.assertRaisesRegex( __A , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ): lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" ) def __lowerCAmelCase ( self ) -> Tuple: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__A ): lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__A ): lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) lowerCAmelCase_ :str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) self.assertEqual(config.__class__.__name__ , """NewModelConfig""" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__A ) lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , trust_remote_code=__A ) self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" ) def __lowerCAmelCase ( self ) -> int: class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :int = "new-model" try: AutoConfig.register("""new-model""" , __A ) # If remote code is not set, the default is to use local lowerCAmelCase_ :Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" ) # If remote code is disabled, we load the local one. lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" ) # If remote is enabled, we load from the Hub lowerCAmelCase_ :Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) self.assertEqual(config.__class__.__name__ , """NewModelConfig""" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
1
1
"""simple docstring""" def _snake_case ( lowercase__ : list , lowercase__ : int , lowercase__ : int = 0 , lowercase__ : int = 0 ) -> int: '''simple docstring''' lowerCAmelCase_ :List[str] = right or len(lowercase__ ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(lowercase__ , lowercase__ , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
1
"""simple docstring""" import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :str = GPTSanJapaneseTokenizer UpperCAmelCase_ :Optional[int] = False UpperCAmelCase_ :Optional[int] = {"do_clean_text": False, "add_prefix_space": False} def __lowerCAmelCase ( self ) -> Tuple: super().setUp() # fmt: off lowerCAmelCase_ :Dict = ["""ใ“ใ‚“""", """ใ“ใ‚“ใซ""", """ใซใกใฏ""", """ใฐใ‚“ใฏ""", """ไธ–็•Œ,ใ”บ็•Œ""", """ใ€""", """ใ€‚""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""] # fmt: on lowerCAmelCase_ :List[str] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # ๐Ÿ˜€ lowerCAmelCase_ :int = {"""unk_token""": """<unk>"""} lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) with open(self.emoji_file , """w""" ) as emoji_writer: emoji_writer.write(json.dumps(__A ) ) def __lowerCAmelCase ( self , **__A ) -> int: kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A ) def __lowerCAmelCase ( self , __A ) -> Dict: lowerCAmelCase_ :List[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \nใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \nใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€""" return input_text, output_text def __lowerCAmelCase ( self , __A ) -> str: lowerCAmelCase_ , lowerCAmelCase_ :Tuple = self.get_input_output_texts(__A ) lowerCAmelCase_ :List[str] = tokenizer.encode(__A , add_special_tokens=__A ) lowerCAmelCase_ :str = tokenizer.decode(__A , clean_up_tokenization_spaces=__A ) return text, ids def __lowerCAmelCase ( self ) -> str: pass # TODO add if relevant def __lowerCAmelCase ( self ) -> Dict: pass # TODO add if relevant def __lowerCAmelCase ( self ) -> int: pass # TODO add if relevant def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Union[str, Any] = self.get_tokenizer() # Testing tokenization lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ€€ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚""" lowerCAmelCase_ :Any = ["""ใ“ใ‚“""", """ใซใกใฏ""", """ใ€""", """ไธ–็•Œ""", """ใ€‚""", """<SP>""", """ใ“ใ‚“""", """ใฐใ‚“ใฏ""", """ใ€""", """ใ”บ็•Œ""", """ใ€‚"""] lowerCAmelCase_ :Tuple = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) # Testing conversion to ids without special tokens lowerCAmelCase_ :List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] lowerCAmelCase_ :List[str] = tokenizer.convert_tokens_to_ids(__A ) self.assertListEqual(__A , __A ) # Testing conversion to ids with special tokens lowerCAmelCase_ :Any = tokens + [tokenizer.unk_token] lowerCAmelCase_ :Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] lowerCAmelCase_ :Union[str, Any] = tokenizer.convert_tokens_to_ids(__A ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :int = self.get_tokenizer() # Testing tokenization lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใซใกใฏใ€<|bagoftoken|>ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€<|bagoftoken|>ใ”บ็•Œใ€‚""" lowerCAmelCase_ :str = """ใ“ใ‚“ใซใกใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚""" lowerCAmelCase_ :str = tokenizer.encode(__A ) lowerCAmelCase_ :Dict = tokenizer.decode(__A ) self.assertEqual(__A , __A ) @slow def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization lowerCAmelCase_ :Optional[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚""" lowerCAmelCase_ :Any = """ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :Optional[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :List[Any] = tokenizer.encode(prefix_text + input_text ) lowerCAmelCase_ :List[str] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text ) lowerCAmelCase_ :int = tokenizer.encode(__A , prefix_text=__A ) lowerCAmelCase_ :int = tokenizer.decode(__A ) lowerCAmelCase_ :Dict = tokenizer.decode(__A ) lowerCAmelCase_ :Tuple = tokenizer.decode(__A ) self.assertEqual(__A , __A ) self.assertEqual(__A , __A ) self.assertEqual(__A , __A ) @slow def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization lowerCAmelCase_ :List[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚""" lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :List[str] = len(tokenizer.encode(__A ) ) - 2 lowerCAmelCase_ :Dict = len(tokenizer.encode(__A ) ) - 2 lowerCAmelCase_ :int = [1] + [0] * (len_prefix + len_text + 1) lowerCAmelCase_ :List[Any] = [1] * (len_prefix + len_text + 1) + [0] lowerCAmelCase_ :Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1) lowerCAmelCase_ :List[Any] = tokenizer(prefix_text + input_text ).token_type_ids lowerCAmelCase_ :List[str] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids lowerCAmelCase_ :List[Any] = tokenizer(__A , prefix_text=__A ).token_type_ids self.assertListEqual(__A , __A ) self.assertListEqual(__A , __A ) self.assertListEqual(__A , __A ) @slow def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) lowerCAmelCase_ :int = tokenizer.encode("""ใ‚ใƒณใ„ใƒฏ""" ) lowerCAmelCase_ :Optional[Any] = tokenizer.encode("""""" , prefix_text="""ใ‚ใƒณใ„ใƒฏ""" ) lowerCAmelCase_ :int = tokenizer.encode("""ใ„ใƒฏ""" , prefix_text="""ใ‚ใƒณ""" ) self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) ) self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) ) self.assertNotEqual(__A , __A ) self.assertNotEqual(__A , __A ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Union[str, Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) lowerCAmelCase_ :int = [["""ๆญฆ็”ฐไฟก็Ž„""", """ใฏใ€"""], ["""็น”็”ฐไฟก้•ท""", """ใฎ้…ไธ‹ใฎใ€"""]] lowerCAmelCase_ :Dict = tokenizer(__A , padding=__A ) lowerCAmelCase_ :Any = tokenizer.batch_encode_plus(__A , padding=__A ) # fmt: off lowerCAmelCase_ :int = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]] lowerCAmelCase_ :List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] lowerCAmelCase_ :int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , __A ) self.assertListEqual(x_token.token_type_ids , __A ) self.assertListEqual(x_token.attention_mask , __A ) self.assertListEqual(x_token_a.input_ids , __A ) self.assertListEqual(x_token_a.token_type_ids , __A ) self.assertListEqual(x_token_a.attention_mask , __A ) def __lowerCAmelCase ( self ) -> Tuple: # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def __lowerCAmelCase ( self ) -> str: # tokenizer has no padding token pass
1
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class _SCREAMING_SNAKE_CASE : UpperCAmelCase_ :Tuple = LEDConfig UpperCAmelCase_ :int = {} UpperCAmelCase_ :Union[str, Any] = "gelu" def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=False , __A=99 , __A=32 , __A=2 , __A=4 , __A=37 , __A=0.1 , __A=0.1 , __A=20 , __A=2 , __A=1 , __A=0 , __A=4 , ) -> Union[str, Any]: lowerCAmelCase_ :Dict = parent lowerCAmelCase_ :str = batch_size lowerCAmelCase_ :Optional[Any] = seq_length lowerCAmelCase_ :str = is_training lowerCAmelCase_ :Tuple = use_labels lowerCAmelCase_ :Tuple = vocab_size lowerCAmelCase_ :Union[str, Any] = hidden_size lowerCAmelCase_ :Optional[Any] = num_hidden_layers lowerCAmelCase_ :List[Any] = num_attention_heads lowerCAmelCase_ :List[Any] = intermediate_size lowerCAmelCase_ :List[Any] = hidden_dropout_prob lowerCAmelCase_ :Optional[int] = attention_probs_dropout_prob lowerCAmelCase_ :Optional[int] = max_position_embeddings lowerCAmelCase_ :List[str] = eos_token_id lowerCAmelCase_ :Optional[Any] = pad_token_id lowerCAmelCase_ :int = bos_token_id lowerCAmelCase_ :List[Any] = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after lowerCAmelCase_ :Tuple = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests lowerCAmelCase_ :Optional[int] = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCAmelCase_ :Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCAmelCase_ :Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCAmelCase_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ :Optional[int] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) lowerCAmelCase_ :Union[str, Any] = prepare_led_inputs_dict(__A , __A , __A ) lowerCAmelCase_ :Tuple = tf.concat( [tf.zeros_like(__A )[:, :-1], tf.ones_like(__A )[:, -1:]] , axis=-1 , ) lowerCAmelCase_ :str = global_attention_mask return config, inputs_dict def __lowerCAmelCase ( self , __A , __A ) -> str: lowerCAmelCase_ :List[str] = TFLEDModel(config=__A ).get_decoder() lowerCAmelCase_ :Optional[Any] = inputs_dict["""input_ids"""] lowerCAmelCase_ :List[str] = input_ids[:1, :] lowerCAmelCase_ :Optional[Any] = inputs_dict["""attention_mask"""][:1, :] lowerCAmelCase_ :Any = 1 # first forward pass lowerCAmelCase_ :List[str] = model(__A , attention_mask=__A , use_cache=__A ) lowerCAmelCase_ , lowerCAmelCase_ :Any = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCAmelCase_ :List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase_ :Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowerCAmelCase_ :Dict = tf.concat([input_ids, next_tokens] , axis=-1 ) lowerCAmelCase_ :Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowerCAmelCase_ :Dict = model(__A , attention_mask=__A )[0] lowerCAmelCase_ :int = model(__A , attention_mask=__A , past_key_values=__A )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowerCAmelCase_ :str = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowerCAmelCase_ :Tuple = output_from_no_past[:, -3:, random_slice_idx] lowerCAmelCase_ :Any = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__A , __A , rtol=1E-3 ) def _snake_case ( lowercase__ : int , lowercase__ : List[Any] , lowercase__ : Tuple , lowercase__ : Any=None , lowercase__ : int=None , lowercase__ : str=None , lowercase__ : Optional[int]=None , ) -> Optional[Any]: '''simple docstring''' if attention_mask is None: lowerCAmelCase_ :Union[str, Any] = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCAmelCase_ :int = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCAmelCase_ :Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCAmelCase_ :Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ): UpperCAmelCase_ :Union[str, Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () UpperCAmelCase_ :Tuple = (TFLEDForConditionalGeneration,) if is_tf_available() else () UpperCAmelCase_ :Optional[int] = ( { "conversational": TFLEDForConditionalGeneration, "feature-extraction": TFLEDModel, "summarization": TFLEDForConditionalGeneration, "text2text-generation": TFLEDForConditionalGeneration, "translation": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) UpperCAmelCase_ :int = True UpperCAmelCase_ :Optional[Any] = False UpperCAmelCase_ :Union[str, Any] = False UpperCAmelCase_ :Optional[Any] = False def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Optional[Any] = TFLEDModelTester(self ) lowerCAmelCase_ :List[str] = ConfigTester(self , config_class=__A ) def __lowerCAmelCase ( self ) -> str: self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__A ) def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ , lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ :Optional[int] = tf.zeros_like(inputs_dict["""attention_mask"""] ) lowerCAmelCase_ :Optional[int] = 2 lowerCAmelCase_ :Any = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , ) lowerCAmelCase_ :List[str] = True lowerCAmelCase_ :Optional[Any] = self.model_tester.seq_length lowerCAmelCase_ :Optional[Any] = self.model_tester.encoder_seq_length def check_decoder_attentions_output(__A ): lowerCAmelCase_ :str = outputs.decoder_attentions self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(__A ): lowerCAmelCase_ :Any = [t.numpy() for t in outputs.encoder_attentions] lowerCAmelCase_ :str = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: lowerCAmelCase_ :Dict = True lowerCAmelCase_ :List[str] = False lowerCAmelCase_ :int = False lowerCAmelCase_ :Any = model_class(__A ) lowerCAmelCase_ :Any = model(self._prepare_for_class(__A , __A ) ) lowerCAmelCase_ :Union[str, Any] = len(__A ) self.assertEqual(config.output_hidden_states , __A ) check_encoder_attentions_output(__A ) if self.is_encoder_decoder: lowerCAmelCase_ :Any = model_class(__A ) lowerCAmelCase_ :str = model(self._prepare_for_class(__A , __A ) ) self.assertEqual(config.output_hidden_states , __A ) check_decoder_attentions_output(__A ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] lowerCAmelCase_ :Any = True lowerCAmelCase_ :Dict = model_class(__A ) lowerCAmelCase_ :int = model(self._prepare_for_class(__A , __A ) ) self.assertEqual(config.output_hidden_states , __A ) check_encoder_attentions_output(__A ) # Check attention is always last and order is fine lowerCAmelCase_ :Dict = True lowerCAmelCase_ :Optional[int] = True lowerCAmelCase_ :str = model_class(__A ) lowerCAmelCase_ :List[Any] = model(self._prepare_for_class(__A , __A ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__A ) ) self.assertEqual(model.config.output_hidden_states , __A ) check_encoder_attentions_output(__A ) @unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" ) def __lowerCAmelCase ( self ) -> str: pass def __lowerCAmelCase ( self ) -> List[str]: # TODO: Head-masking not yet implement pass def _snake_case ( lowercase__ : List[Any] ) -> int: '''simple docstring''' return tf.constant(lowercase__ , dtype=tf.intaa ) __UpperCAmelCase = 1e-4 @slow @require_tf class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Union[str, Any] = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led # change to intended input here lowerCAmelCase_ :Union[str, Any] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] ) lowerCAmelCase_ :List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] ) lowerCAmelCase_ :List[Any] = prepare_led_inputs_dict(model.config , __A , __A ) lowerCAmelCase_ :Optional[Any] = model(**__A )[0] lowerCAmelCase_ :Tuple = (1, 1024, 768) self.assertEqual(output.shape , __A ) # change to expected output here lowerCAmelCase_ :Optional[Any] = tf.convert_to_tensor( [[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , ) tf.debugging.assert_near(output[:, :3, :3] , __A , atol=1E-3 ) def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :List[str] = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ) # change to intended input here lowerCAmelCase_ :Any = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] ) lowerCAmelCase_ :List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] ) lowerCAmelCase_ :Tuple = prepare_led_inputs_dict(model.config , __A , __A ) lowerCAmelCase_ :Any = model(**__A )[0] lowerCAmelCase_ :Optional[Any] = (1, 1024, model.config.vocab_size) self.assertEqual(output.shape , __A ) # change to expected output here lowerCAmelCase_ :Optional[Any] = tf.convert_to_tensor( [[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , ) tf.debugging.assert_near(output[:, :3, :3] , __A , atol=1E-3 , rtol=1E-3 )
1
"""simple docstring""" import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset __UpperCAmelCase = pd.read_csv( 'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/' 'position_salaries.csv' ) __UpperCAmelCase = dataset.iloc[:, 1:2].values __UpperCAmelCase = dataset.iloc[:, 2].values __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0) __UpperCAmelCase = PolynomialFeatures(degree=4) __UpperCAmelCase = poly_reg.fit_transform(X) __UpperCAmelCase = LinearRegression() pol_reg.fit(X_poly, y) def _snake_case ( ) -> str: '''simple docstring''' plt.scatter(lowercase__ , lowercase__ , color="""red""" ) plt.plot(lowercase__ , pol_reg.predict(poly_reg.fit_transform(lowercase__ ) ) , color="""blue""" ) plt.title("""Truth or Bluff (Linear Regression)""" ) plt.xlabel("""Position level""" ) plt.ylabel("""Salary""" ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
1
1
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : list , lowercase__ : list , lowercase__ : list ) -> float: '''simple docstring''' lowerCAmelCase_ :Tuple = np.array([[1, item, train_mtch[i]] for i, item in enumerate(lowercase__ )] ) lowerCAmelCase_ :Any = np.array(lowercase__ ) lowerCAmelCase_ :Optional[Any] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , lowercase__ ) ) , x.transpose() ) , lowercase__ ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : list ) -> float: '''simple docstring''' lowerCAmelCase_ :Any = (1, 2, 1) lowerCAmelCase_ :int = (1, 1, 0, 7) lowerCAmelCase_ :Dict = SARIMAX( lowercase__ , exog=lowercase__ , order=lowercase__ , seasonal_order=lowercase__ ) lowerCAmelCase_ :Any = model.fit(disp=lowercase__ , maxiter=6_0_0 , method="""nm""" ) lowerCAmelCase_ :str = model_fit.predict(1 , len(lowercase__ ) , exog=[test_match] ) return result[0] def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : list ) -> float: '''simple docstring''' lowerCAmelCase_ :Optional[int] = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(lowercase__ , lowercase__ ) lowerCAmelCase_ :int = regressor.predict(lowercase__ ) return y_pred[0] def _snake_case ( lowercase__ : list ) -> float: '''simple docstring''' train_user.sort() lowerCAmelCase_ :Dict = np.percentile(lowercase__ , 2_5 ) lowerCAmelCase_ :List[Any] = np.percentile(lowercase__ , 7_5 ) lowerCAmelCase_ :List[Any] = qa - qa lowerCAmelCase_ :Optional[Any] = qa - (iqr * 0.1) return low_lim def _snake_case ( lowercase__ : list , lowercase__ : float ) -> bool: '''simple docstring''' lowerCAmelCase_ :List[str] = 0 lowerCAmelCase_ :Any = 0 for i in list_vote: if i > actual_result: lowerCAmelCase_ :Tuple = not_safe + 1 else: if abs(abs(lowercase__ ) - abs(lowercase__ ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) __UpperCAmelCase = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]] __UpperCAmelCase = pd.DataFrame( data_input, columns=['total_user', 'total_even', 'days'] ) __UpperCAmelCase = Normalizer().fit_transform(data_input_df.values) # split data __UpperCAmelCase = normalize_df[:, 2].tolist() __UpperCAmelCase = normalize_df[:, 0].tolist() __UpperCAmelCase = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) __UpperCAmelCase = normalize_df[:, [1, 2]].tolist() __UpperCAmelCase = x[: len(x) - 1] __UpperCAmelCase = x[len(x) - 1 :] # for linear regression & sarimax __UpperCAmelCase = total_date[: len(total_date) - 1] __UpperCAmelCase = total_user[: len(total_user) - 1] __UpperCAmelCase = total_match[: len(total_match) - 1] __UpperCAmelCase = total_date[len(total_date) - 1 :] __UpperCAmelCase = total_user[len(total_user) - 1 :] __UpperCAmelCase = total_match[len(total_match) - 1 :] # voting system with forecasting __UpperCAmelCase = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data __UpperCAmelCase = '' if data_safety_checker(res_vote, tst_user) else 'not ' print('Today\'s data is {not_str}safe.')
1
"""simple docstring""" from __future__ import annotations __UpperCAmelCase = 1.6021e-19 # units = C def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float , ) -> tuple[str, float]: '''simple docstring''' if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif conductivity < 0: raise ValueError("""Conductivity cannot be negative""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative""" ) elif mobility < 0: raise ValueError("""mobility cannot be negative""" ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
1
1
"""simple docstring""" from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( lowercase__ : Union[tf.Tensor, np.ndarray] ) -> List[int]: '''simple docstring''' if isinstance(lowercase__ , np.ndarray ): return list(tensor.shape ) lowerCAmelCase_ :int = tf.shape(lowercase__ ) if tensor.shape == tf.TensorShape(lowercase__ ): return dynamic lowerCAmelCase_ :str = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(lowercase__ )] def _snake_case ( lowercase__ : tf.Tensor , lowercase__ : Optional[int] = None , lowercase__ : Optional[str] = None ) -> tf.Tensor: '''simple docstring''' return tf.nn.softmax(logits=logits + 1E-9 , axis=lowercase__ , name=lowercase__ ) def _snake_case ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : str=1E-5 , lowercase__ : Dict=-1 ) -> List[str]: '''simple docstring''' if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowercase__ , lowercase__ ): raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" ) # Get mean and variance on the axis to be normalized lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = tf.nn.moments(lowercase__ , axes=[axis] , keepdims=lowercase__ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowerCAmelCase_ :str = [1] * inputs.shape.rank lowerCAmelCase_ :Union[str, Any] = shape_list(lowercase__ )[axis] lowerCAmelCase_ :str = tf.reshape(lowercase__ , lowercase__ ) lowerCAmelCase_ :Optional[int] = tf.reshape(lowercase__ , lowercase__ ) # Compute layer normalization using the batch_normalization # function. lowerCAmelCase_ :str = tf.nn.batch_normalization( lowercase__ , lowercase__ , lowercase__ , offset=lowercase__ , scale=lowercase__ , variance_epsilon=lowercase__ , ) return outputs def _snake_case ( lowercase__ : List[str] , lowercase__ : str=0 , lowercase__ : Tuple=-1 ) -> List[str]: '''simple docstring''' if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowerCAmelCase_ :List[str] = tf.shape(lowercase__ ) lowerCAmelCase_ :Dict = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowerCAmelCase_ :Union[str, Any] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(lowercase__ , lowercase__ ) def _snake_case ( lowercase__ : tf.Tensor ) -> tf.Tensor: '''simple docstring''' if not isinstance(lowercase__ , tf.Tensor ): lowerCAmelCase_ :Optional[int] = tf.convert_to_tensor(lowercase__ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowerCAmelCase_ :List[Any] = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowerCAmelCase_ :Union[str, Any] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowerCAmelCase_ :Tuple = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def _snake_case ( lowercase__ : tf.Tensor , lowercase__ : int , lowercase__ : str = "input_ids" ) -> None: '''simple docstring''' tf.debugging.assert_less( lowercase__ , tf.cast(lowercase__ , dtype=tensor.dtype ) , message=( f"""The maximum value of {tensor_name} ({tf.math.reduce_max(lowercase__ )}) must be smaller than the embedding """ f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time.""" ) , ) def _snake_case ( lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Tuple ) -> str: '''simple docstring''' lowerCAmelCase_ :int = 6_4_5_1_2 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowerCAmelCase_ :Dict = [x for x in data if len(lowercase__ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( """The following attributes cannot be saved to HDF5 file because """ f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """ f"""bytes: {bad_attributes}""" ) lowerCAmelCase_ :Optional[int] = np.asarray(lowercase__ ) lowerCAmelCase_ :List[Any] = 1 lowerCAmelCase_ :Union[str, Any] = np.array_split(lowercase__ , lowercase__ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowerCAmelCase_ :int = np.array_split(lowercase__ , lowercase__ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(lowercase__ ): lowerCAmelCase_ :Optional[Any] = chunk_data else: lowerCAmelCase_ :Tuple = data def _snake_case ( lowercase__ : List[str] , lowercase__ : List[str] ) -> Tuple: '''simple docstring''' if name in group.attrs: lowerCAmelCase_ :List[str] = [n.decode("""utf8""" ) if hasattr(lowercase__ , """decode""" ) else n for n in group.attrs[name]] else: lowerCAmelCase_ :Dict = [] lowerCAmelCase_ :Dict = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("""utf8""" ) if hasattr(lowercase__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] ) chunk_id += 1 return data def _snake_case ( lowercase__ : int ) -> Optional[Any]: '''simple docstring''' def _expand_single_ad_tensor(lowercase__ : Union[str, Any] ): if isinstance(lowercase__ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(lowercase__ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , lowercase__ )
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , *__A , **__A ) -> None: warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""" , __A , ) super().__init__(*__A , **__A )
1
1
"""simple docstring""" import warnings from functools import wraps from typing import Callable def _snake_case ( lowercase__ : Callable ) -> Callable: '''simple docstring''' @wraps(lowercase__ ) def _inner_fn(*lowercase__ : Optional[int] , **lowercase__ : Optional[Any] ): warnings.warn( (f"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowercase__ , ) return fn(*lowercase__ , **lowercase__ ) return _inner_fn
1
"""simple docstring""" from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def _snake_case ( lowercase__ : str = "laptop" ) -> DataFrame: '''simple docstring''' lowerCAmelCase_ :Dict = f"""https://www.amazon.in/laptop/s?k={product}""" lowerCAmelCase_ :List[str] = { """User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""", """Accept-Language""": """en-US, en;q=0.5""", } lowerCAmelCase_ :List[Any] = BeautifulSoup(requests.get(lowercase__ , headers=lowercase__ ).text ) # Initialize a Pandas dataframe with the column titles lowerCAmelCase_ :Union[str, Any] = DataFrame( columns=[ """Product Title""", """Product Link""", """Current Price of the product""", """Product Rating""", """MRP of the product""", """Discount""", ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( """div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ): try: lowerCAmelCase_ :str = item.ha.text lowerCAmelCase_ :Dict = """https://www.amazon.in/""" + item.ha.a["""href"""] lowerCAmelCase_ :int = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text try: lowerCAmelCase_ :Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text except AttributeError: lowerCAmelCase_ :int = """Not available""" try: lowerCAmelCase_ :str = ( """โ‚น""" + item.find( """span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""โ‚น""" )[1] ) except AttributeError: lowerCAmelCase_ :Optional[Any] = """""" try: lowerCAmelCase_ :str = float( ( ( float(product_mrp.strip("""โ‚น""" ).replace(""",""" , """""" ) ) - float(product_price.strip("""โ‚น""" ).replace(""",""" , """""" ) ) ) / float(product_mrp.strip("""โ‚น""" ).replace(""",""" , """""" ) ) ) * 1_0_0 ) except ValueError: lowerCAmelCase_ :Union[str, Any] = float("""nan""" ) except AttributeError: pass lowerCAmelCase_ :Any = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] lowerCAmelCase_ :List[Any] = """ """ lowerCAmelCase_ :Tuple = """ """ data_frame.index += 1 return data_frame if __name__ == "__main__": __UpperCAmelCase = 'headphones' get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
1
1
"""simple docstring""" import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self , __A , __A=13 , __A=30 , __A=2 , __A=3 , __A=True , __A=True , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=10 , __A=0.0_2 , ) -> Optional[Any]: lowerCAmelCase_ :List[str] = parent lowerCAmelCase_ :int = batch_size lowerCAmelCase_ :Optional[Any] = image_size lowerCAmelCase_ :Tuple = patch_size lowerCAmelCase_ :int = num_channels lowerCAmelCase_ :List[Any] = is_training lowerCAmelCase_ :List[Any] = use_labels lowerCAmelCase_ :Optional[int] = hidden_size lowerCAmelCase_ :Tuple = num_hidden_layers lowerCAmelCase_ :Union[str, Any] = num_attention_heads lowerCAmelCase_ :Union[str, Any] = intermediate_size lowerCAmelCase_ :List[str] = hidden_act lowerCAmelCase_ :Optional[int] = hidden_dropout_prob lowerCAmelCase_ :List[str] = attention_probs_dropout_prob lowerCAmelCase_ :int = type_sequence_label_size lowerCAmelCase_ :List[Any] = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCAmelCase_ :Tuple = (image_size // patch_size) ** 2 lowerCAmelCase_ :int = num_patches + 1 def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase_ :str = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , ) return config, pixel_values def __lowerCAmelCase ( self , __A , __A ) -> Any: lowerCAmelCase_ :str = FlaxViTModel(config=__A ) lowerCAmelCase_ :str = model(__A ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) lowerCAmelCase_ :Optional[Any] = (self.image_size, self.image_size) lowerCAmelCase_ :Optional[Any] = (self.patch_size, self.patch_size) lowerCAmelCase_ :Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def __lowerCAmelCase ( self , __A , __A ) -> List[str]: lowerCAmelCase_ :Optional[int] = self.type_sequence_label_size lowerCAmelCase_ :List[Any] = FlaxViTForImageClassification(config=__A ) lowerCAmelCase_ :Union[str, Any] = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCAmelCase_ :List[Any] = 1 lowerCAmelCase_ :Dict = FlaxViTForImageClassification(__A ) lowerCAmelCase_ :str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase_ :List[str] = model(__A ) def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :Optional[int] = self.prepare_config_and_inputs() ( ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ) :Any = config_and_inputs lowerCAmelCase_ :Tuple = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :str = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def __lowerCAmelCase ( self ) -> None: lowerCAmelCase_ :Union[str, Any] = FlaxViTModelTester(self ) lowerCAmelCase_ :Union[str, Any] = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 ) def __lowerCAmelCase ( self ) -> Any: self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ :str = model_class(__A ) lowerCAmelCase_ :Dict = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ :Optional[int] = [*signature.parameters.keys()] lowerCAmelCase_ :int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __A ) def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase_ :str = self._prepare_for_class(__A , __A ) lowerCAmelCase_ :Union[str, Any] = model_class(__A ) @jax.jit def model_jitted(__A , **__A ): return model(pixel_values=__A , **__A ) with self.subTest("""JIT Enabled""" ): lowerCAmelCase_ :List[Any] = model_jitted(**__A ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): lowerCAmelCase_ :Any = model_jitted(**__A ).to_tuple() self.assertEqual(len(__A ) , len(__A ) ) for jitted_output, output in zip(__A , __A ): self.assertEqual(jitted_output.shape , output.shape ) @slow def __lowerCAmelCase ( self ) -> Union[str, Any]: for model_class_name in self.all_model_classes: lowerCAmelCase_ :List[str] = model_class_name.from_pretrained("""google/vit-base-patch16-224""" ) lowerCAmelCase_ :List[Any] = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(__A )
1
"""simple docstring""" import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ :Any = """laion/clap-htsat-unfused""" lowerCAmelCase_ :Optional[Any] = tempfile.mkdtemp() def __lowerCAmelCase ( self , **__A ) -> List[Any]: return RobertaTokenizer.from_pretrained(self.checkpoint , **__A ) def __lowerCAmelCase ( self , **__A ) -> Tuple: return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__A ) def __lowerCAmelCase ( self ) -> int: shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Optional[Any] = self.get_tokenizer() lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor() lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase_ :Optional[Any] = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __A ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __A ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Dict = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase_ :str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowerCAmelCase_ :Dict = self.get_feature_extractor(do_normalize=__A , padding_value=1.0 ) lowerCAmelCase_ :Union[str, Any] = ClapProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __A ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __A ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Dict = self.get_feature_extractor() lowerCAmelCase_ :str = self.get_tokenizer() lowerCAmelCase_ :List[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) lowerCAmelCase_ :Optional[Any] = floats_list((3, 1000) ) lowerCAmelCase_ :Optional[Any] = feature_extractor(__A , return_tensors="""np""" ) lowerCAmelCase_ :str = processor(audios=__A , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :List[Any] = self.get_feature_extractor() lowerCAmelCase_ :Any = self.get_tokenizer() lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) lowerCAmelCase_ :List[Any] = """This is a test string""" lowerCAmelCase_ :Dict = processor(text=__A ) lowerCAmelCase_ :List[str] = tokenizer(__A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :int = self.get_feature_extractor() lowerCAmelCase_ :Tuple = self.get_tokenizer() lowerCAmelCase_ :Optional[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) lowerCAmelCase_ :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase_ :Tuple = processor.batch_decode(__A ) lowerCAmelCase_ :Optional[Any] = tokenizer.batch_decode(__A ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor() lowerCAmelCase_ :Any = self.get_tokenizer() lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
1
1
"""simple docstring""" import sys __UpperCAmelCase = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def _snake_case ( lowercase__ : str ) -> int: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = 1 for digit in s: product *= int(lowercase__ ) return product def _snake_case ( lowercase__ : str = N ) -> int: '''simple docstring''' lowerCAmelCase_ :Dict = -sys.maxsize - 1 lowerCAmelCase_ :Optional[int] = n[:1_3] lowerCAmelCase_ :Any = 1_3 while cur_index < len(lowercase__ ) - 1_3: if int(n[cur_index] ) >= int(substr[0] ): lowerCAmelCase_ :Dict = substr[1:] + n[cur_index] cur_index += 1 else: lowerCAmelCase_ :List[Any] = max(lowercase__ , str_eval(lowercase__ ) ) lowerCAmelCase_ :Optional[Any] = n[cur_index : cur_index + 1_3] cur_index += 1_3 return largest_product if __name__ == "__main__": print(F"""{solution() = }""")
1
"""simple docstring""" import os from math import logaa def _snake_case ( lowercase__ : str = "base_exp.txt" ) -> int: '''simple docstring''' lowerCAmelCase_ :float = 0 lowerCAmelCase_ :Union[str, Any] = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ): lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = list(map(lowercase__ , line.split(""",""" ) ) ) if x * logaa(lowercase__ ) > largest: lowerCAmelCase_ :Any = x * logaa(lowercase__ ) lowerCAmelCase_ :List[Any] = i + 1 return result if __name__ == "__main__": print(solution())
1
1
"""simple docstring""" import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets __UpperCAmelCase = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n' __UpperCAmelCase = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n' __UpperCAmelCase = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): def __lowerCAmelCase ( self ) -> Dict: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" ), """references""": datasets.Value("""string""" ), } ) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , ) def __lowerCAmelCase ( self , __A , __A ) -> int: lowerCAmelCase_ :Dict = 0.0 for i, j in zip(__A , __A ): n_correct += 1.0 if math_equivalence.is_equiv(__A , __A ) else 0.0 lowerCAmelCase_ :List[Any] = n_correct / len(__A ) return { "accuracy": accuracy, }
1
"""simple docstring""" import itertools import math def _snake_case ( lowercase__ : int ) -> bool: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _snake_case ( ) -> Dict: '''simple docstring''' lowerCAmelCase_ :List[Any] = 2 while True: if is_prime(lowercase__ ): yield num num += 1 def _snake_case ( lowercase__ : int = 1_0_0_0_1 ) -> int: '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , lowercase__ ) ) if __name__ == "__main__": print(F"""{solution() = }""")
1
1
"""simple docstring""" import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.0_2 , __A=4 , ) -> int: lowerCAmelCase_ :Optional[Any] = parent lowerCAmelCase_ :Any = batch_size lowerCAmelCase_ :Optional[Any] = seq_length lowerCAmelCase_ :Optional[int] = is_training lowerCAmelCase_ :Optional[Any] = use_attention_mask lowerCAmelCase_ :Optional[int] = use_token_type_ids lowerCAmelCase_ :int = use_labels lowerCAmelCase_ :Union[str, Any] = vocab_size lowerCAmelCase_ :Optional[int] = hidden_size lowerCAmelCase_ :Tuple = num_hidden_layers lowerCAmelCase_ :Tuple = num_attention_heads lowerCAmelCase_ :Dict = intermediate_size lowerCAmelCase_ :Tuple = hidden_act lowerCAmelCase_ :Optional[Any] = hidden_dropout_prob lowerCAmelCase_ :Optional[Any] = attention_probs_dropout_prob lowerCAmelCase_ :Dict = max_position_embeddings lowerCAmelCase_ :Optional[Any] = type_vocab_size lowerCAmelCase_ :str = type_sequence_label_size lowerCAmelCase_ :List[str] = initializer_range lowerCAmelCase_ :int = num_choices def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ :Dict = None if self.use_attention_mask: lowerCAmelCase_ :Any = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase_ :str = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__A , ) return config, input_ids, attention_mask def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :Tuple = self.prepare_config_and_inputs() lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[str] = config_and_inputs lowerCAmelCase_ :Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :Tuple = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :str = FlaxDistilBertModelTester(self ) @slow def __lowerCAmelCase ( self ) -> Dict: for model_class_name in self.all_model_classes: lowerCAmelCase_ :Dict = model_class_name.from_pretrained("""distilbert-base-uncased""" ) lowerCAmelCase_ :str = model(np.ones((1, 1) ) ) self.assertIsNotNone(__A ) @require_flax class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :List[str] = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" ) lowerCAmelCase_ :List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowerCAmelCase_ :List[str] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) lowerCAmelCase_ :List[Any] = model(__A , attention_mask=__A )[0] lowerCAmelCase_ :List[Any] = (1, 11, 768) self.assertEqual(output.shape , __A ) lowerCAmelCase_ :Tuple = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4 ) )
1
"""simple docstring""" def _snake_case ( lowercase__ : int = 5_0 ) -> int: '''simple docstring''' lowerCAmelCase_ :int = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F"""{solution() = }""")
1
1
"""simple docstring""" __UpperCAmelCase = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } def _snake_case ( lowercase__ : dict , lowercase__ : Union[str, Any] , lowercase__ : int ) -> list[str]: '''simple docstring''' lowerCAmelCase_ :str = set() # keep track of all the paths to be checked lowerCAmelCase_ :Tuple = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue lowerCAmelCase_ :List[str] = queue.pop(0 ) # get the last node from the path lowerCAmelCase_ :Any = path[-1] if node not in explored: lowerCAmelCase_ :str = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: lowerCAmelCase_ :Tuple = list(lowercase__ ) new_path.append(lowercase__ ) queue.append(lowercase__ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(lowercase__ ) # in case there's no path between the 2 nodes return [] def _snake_case ( lowercase__ : dict , lowercase__ : Union[str, Any] , lowercase__ : int ) -> int: '''simple docstring''' if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 lowerCAmelCase_ :Any = [start] lowerCAmelCase_ :List[Any] = set(lowercase__ ) # Keep tab on distances from `start` node. lowerCAmelCase_ :List[str] = {start: 0, target: -1} while queue: lowerCAmelCase_ :Tuple = queue.pop(0 ) if node == target: lowerCAmelCase_ :str = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(lowercase__ ) queue.append(lowercase__ ) lowerCAmelCase_ :Optional[Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
1
"""simple docstring""" # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ): UpperCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} UpperCAmelCase_ :Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCAmelCase_ :List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} ) UpperCAmelCase_ :Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS def __lowerCAmelCase ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ :Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) lowerCAmelCase_ :List[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , ) torch.manual_seed(0 ) lowerCAmelCase_ :List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowerCAmelCase_ :List[Any] = CLIPTextModel(__A ) lowerCAmelCase_ :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCAmelCase_ :Union[str, Any] = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def __lowerCAmelCase ( self , __A , __A=0 ) -> List[str]: if str(__A ).startswith("""mps""" ): lowerCAmelCase_ :Tuple = torch.manual_seed(__A ) else: lowerCAmelCase_ :Optional[int] = torch.Generator(device=__A ).manual_seed(__A ) lowerCAmelCase_ :List[Any] = 2 lowerCAmelCase_ :int = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ) lowerCAmelCase_ :Optional[int] = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A ) lowerCAmelCase_ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) ) lowerCAmelCase_ :Union[str, Any] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def __lowerCAmelCase ( self ) -> int: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def __lowerCAmelCase ( self ) -> List[str]: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ): UpperCAmelCase_ :List[str] = StableDiffusionControlNetImgaImgPipeline UpperCAmelCase_ :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCAmelCase_ :int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def __lowerCAmelCase ( self ) -> Optional[int]: torch.manual_seed(0 ) lowerCAmelCase_ :Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(__A ): if isinstance(__A , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) lowerCAmelCase_ :List[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__A ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__A ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowerCAmelCase_ :str = CLIPTextModel(__A ) lowerCAmelCase_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCAmelCase_ :Optional[Any] = MultiControlNetModel([controlneta, controlneta] ) lowerCAmelCase_ :List[Any] = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def __lowerCAmelCase ( self , __A , __A=0 ) -> str: if str(__A ).startswith("""mps""" ): lowerCAmelCase_ :Optional[Any] = torch.manual_seed(__A ) else: lowerCAmelCase_ :List[Any] = torch.Generator(device=__A ).manual_seed(__A ) lowerCAmelCase_ :Optional[Any] = 2 lowerCAmelCase_ :Optional[int] = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ), ] lowerCAmelCase_ :int = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A ) lowerCAmelCase_ :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) ) lowerCAmelCase_ :List[str] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :List[str] = self.get_dummy_components() lowerCAmelCase_ :Tuple = self.pipeline_class(**__A ) pipe.to(__A ) lowerCAmelCase_ :Union[str, Any] = 1_0.0 lowerCAmelCase_ :Union[str, Any] = 4 lowerCAmelCase_ :Tuple = self.get_dummy_inputs(__A ) lowerCAmelCase_ :List[str] = steps lowerCAmelCase_ :int = scale lowerCAmelCase_ :Union[str, Any] = pipe(**__A )[0] lowerCAmelCase_ :Any = self.get_dummy_inputs(__A ) lowerCAmelCase_ :str = steps lowerCAmelCase_ :str = scale lowerCAmelCase_ :Tuple = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Union[str, Any] = steps lowerCAmelCase_ :Union[str, Any] = scale lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] lowerCAmelCase_ :List[str] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Optional[int] = steps lowerCAmelCase_ :Tuple = scale lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def __lowerCAmelCase ( self ) -> Dict: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __lowerCAmelCase ( self ) -> Tuple: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def __lowerCAmelCase ( self ) -> Optional[int]: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :str = self.get_dummy_components() lowerCAmelCase_ :Tuple = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(__A ) except NotImplementedError: pass @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :Any = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" ) lowerCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , safety_checker=__A , controlnet=__A ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowerCAmelCase_ :List[Any] = """evil space-punk bird""" lowerCAmelCase_ :List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) ) lowerCAmelCase_ :int = load_image( """https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) ) lowerCAmelCase_ :Union[str, Any] = pipe( __A , __A , control_image=__A , generator=__A , output_type="""np""" , num_inference_steps=50 , strength=0.6 , ) lowerCAmelCase_ :Tuple = output.images[0] assert image.shape == (512, 512, 3) lowerCAmelCase_ :Tuple = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" ) assert np.abs(expected_image - image ).max() < 9E-2
1
1
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) __UpperCAmelCase = logging.getLogger(__name__) @dataclass class _SCREAMING_SNAKE_CASE : UpperCAmelCase_ :str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) UpperCAmelCase_ :Optional[str] = field( default=A__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) UpperCAmelCase_ :Optional[str] = field( default=A__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) UpperCAmelCase_ :Optional[str] = field( default=A__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) UpperCAmelCase_ :bool = field(default=A__ , metadata={"help": "Whether tp freeze the encoder."} ) UpperCAmelCase_ :bool = field(default=A__ , metadata={"help": "Whether to freeze the embeddings."} ) @dataclass class _SCREAMING_SNAKE_CASE : UpperCAmelCase_ :str = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) UpperCAmelCase_ :Optional[str] = field( default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , ) UpperCAmelCase_ :Optional[int] = field( default=1024 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) UpperCAmelCase_ :Optional[int] = field( default=128 , metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) UpperCAmelCase_ :Optional[int] = field( default=142 , metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) } , ) UpperCAmelCase_ :Optional[int] = field( default=142 , metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) UpperCAmelCase_ :Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} ) UpperCAmelCase_ :Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} ) UpperCAmelCase_ :Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} ) UpperCAmelCase_ :Optional[str] = field(default=A__ , metadata={"help": "Source language id for translation."} ) UpperCAmelCase_ :Optional[str] = field(default=A__ , metadata={"help": "Target language id for translation."} ) UpperCAmelCase_ :Optional[int] = field(default=A__ , metadata={"help": "# num_beams to use for evaluation."} ) UpperCAmelCase_ :bool = field( default=A__ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , ) def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(lowercase__ , os.path.join(lowercase__ , f"""{split}_results.json""" ) ) def _snake_case ( ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Any = parser.parse_args_into_dataclasses() check_output_dir(lowercase__ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("""Training/evaluation parameters %s""" , lowercase__ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowerCAmelCase_ :Union[str, Any] = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(lowercase__ , lowercase__ , lowercase__ ): assert hasattr(lowercase__ , lowercase__ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(lowercase__ , lowercase__ , getattr(lowercase__ , lowercase__ ) ) lowerCAmelCase_ :Tuple = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowerCAmelCase_ :Tuple = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase__ , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(lowercase__ , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: lowerCAmelCase_ :Dict = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(lowercase__ , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(lowercase__ , lowercase__ ): lowerCAmelCase_ :List[str] = tokenizer.lang_code_to_id[data_args.tgt_lang] else: lowerCAmelCase_ :List[str] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(lowercase__ ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) lowerCAmelCase_ :Dict = SeqaSeqDataset # Get datasets lowerCAmelCase_ :Tuple = ( dataset_class( lowercase__ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_train else None ) lowerCAmelCase_ :List[str] = ( dataset_class( lowercase__ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) lowerCAmelCase_ :Any = ( dataset_class( lowercase__ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_predict else None ) # Initialize our Trainer lowerCAmelCase_ :Any = ( build_compute_metrics_fn(data_args.task , lowercase__ ) if training_args.predict_with_generate else None ) lowerCAmelCase_ :Tuple = SeqaSeqTrainer( model=lowercase__ , args=lowercase__ , data_args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , data_collator=SeqaSeqDataCollator( lowercase__ , lowercase__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase__ , tokenizer=lowercase__ , ) lowerCAmelCase_ :Union[str, Any] = {} # Training if training_args.do_train: logger.info("""*** Train ***""" ) lowerCAmelCase_ :Any = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) lowerCAmelCase_ :Any = train_result.metrics lowerCAmelCase_ :Union[str, Any] = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("""train""" , lowercase__ , training_args.output_dir ) all_metrics.update(lowercase__ ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) lowerCAmelCase_ :Any = trainer.evaluate(metric_key_prefix="""val""" ) lowerCAmelCase_ :List[Any] = data_args.n_val lowerCAmelCase_ :Tuple = round(metrics["""val_loss"""] , 4 ) if trainer.is_world_process_zero(): handle_metrics("""val""" , lowercase__ , training_args.output_dir ) all_metrics.update(lowercase__ ) if training_args.do_predict: logger.info("""*** Predict ***""" ) lowerCAmelCase_ :Optional[int] = trainer.predict(test_dataset=lowercase__ , metric_key_prefix="""test""" ) lowerCAmelCase_ :Union[str, Any] = test_output.metrics lowerCAmelCase_ :Dict = data_args.n_test if trainer.is_world_process_zero(): lowerCAmelCase_ :List[str] = round(metrics["""test_loss"""] , 4 ) handle_metrics("""test""" , lowercase__ , training_args.output_dir ) all_metrics.update(lowercase__ ) if training_args.predict_with_generate: lowerCAmelCase_ :Union[str, Any] = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ ) lowerCAmelCase_ :str = lmap(str.strip , lowercase__ ) write_txt_file(lowercase__ , os.path.join(training_args.output_dir , """test_generations.txt""" ) ) if trainer.is_world_process_zero(): save_json(lowercase__ , os.path.join(training_args.output_dir , """all_results.json""" ) ) return all_metrics def _snake_case ( lowercase__ : Tuple ) -> Dict: '''simple docstring''' main() if __name__ == "__main__": main()
1
"""simple docstring""" from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ ): UpperCAmelCase_ :List[str] = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"] @register_to_config def __init__( self , __A , __A , __A = None , __A = 5_0257 , __A = 1024 , __A = 768 , __A = 12 , __A = 12 , __A = None , __A = "gelu_new" , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 1E-5 , __A = 0.0_2 , __A = True , __A = True , __A = False , __A = False , ) -> Optional[Any]: super().__init__() lowerCAmelCase_ :List[str] = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and""" f""" `n_embd`: {n_embd} are not equal.""" ) lowerCAmelCase_ :Union[str, Any] = prefix_inner_dim lowerCAmelCase_ :str = prefix_hidden_dim lowerCAmelCase_ :str = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) lowerCAmelCase_ :List[Any] = ( nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity() ) lowerCAmelCase_ :Any = GPTaConfig( vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , ) lowerCAmelCase_ :Any = GPTaLMHeadModel(__A ) def __lowerCAmelCase ( self , __A , __A , __A = None , __A = None , ) -> List[str]: lowerCAmelCase_ :str = self.transformer.transformer.wte(__A ) lowerCAmelCase_ :Any = self.encode_prefix(__A ) lowerCAmelCase_ :Optional[Any] = self.decode_prefix(__A ) lowerCAmelCase_ :Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: lowerCAmelCase_ :int = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) lowerCAmelCase_ :Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 ) lowerCAmelCase_ :Tuple = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def __lowerCAmelCase ( self , __A , __A ) -> torch.Tensor: return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: return self.encode_prefix(__A ) @torch.no_grad() def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[int]: lowerCAmelCase_ :Tuple = torch.split(__A , 1 , dim=0 ) lowerCAmelCase_ :Optional[int] = [] lowerCAmelCase_ :List[str] = [] for feature in features: lowerCAmelCase_ :Tuple = self.decode_prefix(feature.to(__A ) ) # back to the clip feature # Only support beam search for now lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.generate_beam( input_embeds=__A , device=__A , eos_token_id=__A ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) lowerCAmelCase_ :Tuple = torch.stack(__A ) lowerCAmelCase_ :int = torch.stack(__A ) return generated_tokens, generated_seq_lengths @torch.no_grad() def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A = 5 , __A = 67 , __A = 1.0 , __A = None , ) -> Union[str, Any]: lowerCAmelCase_ :Optional[int] = eos_token_id lowerCAmelCase_ :Optional[int] = None lowerCAmelCase_ :Any = None lowerCAmelCase_ :int = torch.ones(__A , device=__A , dtype=torch.int ) lowerCAmelCase_ :Optional[int] = torch.zeros(__A , device=__A , dtype=torch.bool ) if input_embeds is not None: lowerCAmelCase_ :List[str] = input_embeds else: lowerCAmelCase_ :Union[str, Any] = self.transformer.transformer.wte(__A ) for i in range(__A ): lowerCAmelCase_ :Optional[int] = self.transformer(inputs_embeds=__A ) lowerCAmelCase_ :str = outputs.logits lowerCAmelCase_ :str = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) lowerCAmelCase_ :Dict = logits.softmax(-1 ).log() if scores is None: lowerCAmelCase_ , lowerCAmelCase_ :Any = logits.topk(__A , -1 ) lowerCAmelCase_ :Union[str, Any] = generated.expand(__A , *generated.shape[1:] ) lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: lowerCAmelCase_ :List[str] = next_tokens else: lowerCAmelCase_ :List[Any] = tokens.expand(__A , *tokens.shape[1:] ) lowerCAmelCase_ :Any = torch.cat((tokens, next_tokens) , dim=1 ) else: lowerCAmelCase_ :List[Any] = -float(np.inf ) lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Optional[int] = scores[:, None] + logits seq_lengths[~is_stopped] += 1 lowerCAmelCase_ :List[Any] = scores_sum / seq_lengths[:, None] lowerCAmelCase_ , lowerCAmelCase_ :Tuple = scores_sum_average.view(-1 ).topk(__A , -1 ) lowerCAmelCase_ :Optional[Any] = next_tokens // scores_sum.shape[1] lowerCAmelCase_ :Dict = seq_lengths[next_tokens_source] lowerCAmelCase_ :Tuple = next_tokens % scores_sum.shape[1] lowerCAmelCase_ :Optional[Any] = next_tokens.unsqueeze(1 ) lowerCAmelCase_ :str = tokens[next_tokens_source] lowerCAmelCase_ :List[Any] = torch.cat((tokens, next_tokens) , dim=1 ) lowerCAmelCase_ :Dict = generated[next_tokens_source] lowerCAmelCase_ :Dict = scores_sum_average * seq_lengths lowerCAmelCase_ :Tuple = is_stopped[next_tokens_source] lowerCAmelCase_ :str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) lowerCAmelCase_ :List[Any] = torch.cat((generated, next_token_embed) , dim=1 ) lowerCAmelCase_ :Optional[int] = is_stopped + next_tokens.eq(__A ).squeeze() if is_stopped.all(): break lowerCAmelCase_ :str = scores / seq_lengths lowerCAmelCase_ :Optional[int] = scores.argsort(descending=__A ) # tokens tensors are already padded to max_seq_length lowerCAmelCase_ :Optional[Any] = [tokens[i] for i in order] lowerCAmelCase_ :Dict = torch.stack(__A , dim=0 ) lowerCAmelCase_ :Tuple = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
1
1
"""simple docstring""" from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Dict = DistilBertTokenizer UpperCAmelCase_ :List[str] = DistilBertTokenizerFast UpperCAmelCase_ :Optional[int] = True @slow def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Dict = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" ) lowerCAmelCase_ :List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__A ) lowerCAmelCase_ :Any = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A ) lowerCAmelCase_ :str = tokenizer.build_inputs_with_special_tokens(__A ) lowerCAmelCase_ :Dict = tokenizer.build_inputs_with_special_tokens(__A , __A ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
1
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json', # See all DETR models at https://huggingface.co/models?filter=detr } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "detr" UpperCAmelCase_ :str = ["past_key_values"] UpperCAmelCase_ :Tuple = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , __A=True , __A=None , __A=3 , __A=100 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=1 , __A=5 , __A=2 , __A=1 , __A=1 , __A=5 , __A=2 , __A=0.1 , **__A , ) -> List[Any]: if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) lowerCAmelCase_ :int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(__A , __A ): lowerCAmelCase_ :str = backbone_config.get("""model_type""" ) lowerCAmelCase_ :List[Any] = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase_ :Optional[Any] = config_class.from_dict(__A ) # set timm attributes to None lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = None, None, None lowerCAmelCase_ :Tuple = use_timm_backbone lowerCAmelCase_ :Optional[int] = backbone_config lowerCAmelCase_ :Optional[int] = num_channels lowerCAmelCase_ :int = num_queries lowerCAmelCase_ :List[Any] = d_model lowerCAmelCase_ :Optional[int] = encoder_ffn_dim lowerCAmelCase_ :Tuple = encoder_layers lowerCAmelCase_ :int = encoder_attention_heads lowerCAmelCase_ :Optional[Any] = decoder_ffn_dim lowerCAmelCase_ :List[str] = decoder_layers lowerCAmelCase_ :Dict = decoder_attention_heads lowerCAmelCase_ :Dict = dropout lowerCAmelCase_ :Tuple = attention_dropout lowerCAmelCase_ :Union[str, Any] = activation_dropout lowerCAmelCase_ :Any = activation_function lowerCAmelCase_ :List[str] = init_std lowerCAmelCase_ :Optional[int] = init_xavier_std lowerCAmelCase_ :int = encoder_layerdrop lowerCAmelCase_ :Union[str, Any] = decoder_layerdrop lowerCAmelCase_ :List[str] = encoder_layers lowerCAmelCase_ :Union[str, Any] = auxiliary_loss lowerCAmelCase_ :str = position_embedding_type lowerCAmelCase_ :List[Any] = backbone lowerCAmelCase_ :str = use_pretrained_backbone lowerCAmelCase_ :str = dilation # Hungarian matcher lowerCAmelCase_ :List[Any] = class_cost lowerCAmelCase_ :Union[str, Any] = bbox_cost lowerCAmelCase_ :Tuple = giou_cost # Loss coefficients lowerCAmelCase_ :Optional[int] = mask_loss_coefficient lowerCAmelCase_ :Union[str, Any] = dice_loss_coefficient lowerCAmelCase_ :Tuple = bbox_loss_coefficient lowerCAmelCase_ :Tuple = giou_loss_coefficient lowerCAmelCase_ :Dict = eos_coefficient super().__init__(is_encoder_decoder=__A , **__A ) @property def __lowerCAmelCase ( self ) -> int: return self.encoder_attention_heads @property def __lowerCAmelCase ( self ) -> int: return self.d_model @classmethod def __lowerCAmelCase ( cls , __A , **__A ) -> Any: return cls(backbone_config=__A , **__A ) def __lowerCAmelCase ( self ) -> Dict[str, any]: lowerCAmelCase_ :List[str] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: lowerCAmelCase_ :Dict = self.backbone_config.to_dict() lowerCAmelCase_ :str = self.__class__.model_type return output class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :List[Any] = version.parse("1.11" ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def __lowerCAmelCase ( self ) -> float: return 1E-5 @property def __lowerCAmelCase ( self ) -> int: return 12
1
1
"""simple docstring""" # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ): UpperCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} UpperCAmelCase_ :Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCAmelCase_ :List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} ) UpperCAmelCase_ :Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS def __lowerCAmelCase ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ :Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) lowerCAmelCase_ :List[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , ) torch.manual_seed(0 ) lowerCAmelCase_ :List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowerCAmelCase_ :List[Any] = CLIPTextModel(__A ) lowerCAmelCase_ :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCAmelCase_ :Union[str, Any] = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def __lowerCAmelCase ( self , __A , __A=0 ) -> List[str]: if str(__A ).startswith("""mps""" ): lowerCAmelCase_ :Tuple = torch.manual_seed(__A ) else: lowerCAmelCase_ :Optional[int] = torch.Generator(device=__A ).manual_seed(__A ) lowerCAmelCase_ :List[Any] = 2 lowerCAmelCase_ :int = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ) lowerCAmelCase_ :Optional[int] = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A ) lowerCAmelCase_ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) ) lowerCAmelCase_ :Union[str, Any] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def __lowerCAmelCase ( self ) -> int: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def __lowerCAmelCase ( self ) -> List[str]: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ): UpperCAmelCase_ :List[str] = StableDiffusionControlNetImgaImgPipeline UpperCAmelCase_ :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCAmelCase_ :int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def __lowerCAmelCase ( self ) -> Optional[int]: torch.manual_seed(0 ) lowerCAmelCase_ :Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(__A ): if isinstance(__A , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) lowerCAmelCase_ :List[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__A ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__A ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowerCAmelCase_ :str = CLIPTextModel(__A ) lowerCAmelCase_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCAmelCase_ :Optional[Any] = MultiControlNetModel([controlneta, controlneta] ) lowerCAmelCase_ :List[Any] = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def __lowerCAmelCase ( self , __A , __A=0 ) -> str: if str(__A ).startswith("""mps""" ): lowerCAmelCase_ :Optional[Any] = torch.manual_seed(__A ) else: lowerCAmelCase_ :List[Any] = torch.Generator(device=__A ).manual_seed(__A ) lowerCAmelCase_ :Optional[Any] = 2 lowerCAmelCase_ :Optional[int] = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ), ] lowerCAmelCase_ :int = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A ) lowerCAmelCase_ :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) ) lowerCAmelCase_ :List[str] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :List[str] = self.get_dummy_components() lowerCAmelCase_ :Tuple = self.pipeline_class(**__A ) pipe.to(__A ) lowerCAmelCase_ :Union[str, Any] = 1_0.0 lowerCAmelCase_ :Union[str, Any] = 4 lowerCAmelCase_ :Tuple = self.get_dummy_inputs(__A ) lowerCAmelCase_ :List[str] = steps lowerCAmelCase_ :int = scale lowerCAmelCase_ :Union[str, Any] = pipe(**__A )[0] lowerCAmelCase_ :Any = self.get_dummy_inputs(__A ) lowerCAmelCase_ :str = steps lowerCAmelCase_ :str = scale lowerCAmelCase_ :Tuple = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Union[str, Any] = steps lowerCAmelCase_ :Union[str, Any] = scale lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] lowerCAmelCase_ :List[str] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Optional[int] = steps lowerCAmelCase_ :Tuple = scale lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def __lowerCAmelCase ( self ) -> Dict: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __lowerCAmelCase ( self ) -> Tuple: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def __lowerCAmelCase ( self ) -> Optional[int]: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :str = self.get_dummy_components() lowerCAmelCase_ :Tuple = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(__A ) except NotImplementedError: pass @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :Any = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" ) lowerCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , safety_checker=__A , controlnet=__A ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowerCAmelCase_ :List[Any] = """evil space-punk bird""" lowerCAmelCase_ :List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) ) lowerCAmelCase_ :int = load_image( """https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) ) lowerCAmelCase_ :Union[str, Any] = pipe( __A , __A , control_image=__A , generator=__A , output_type="""np""" , num_inference_steps=50 , strength=0.6 , ) lowerCAmelCase_ :Tuple = output.images[0] assert image.shape == (512, 512, 3) lowerCAmelCase_ :Tuple = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" ) assert np.abs(expected_image - image ).max() < 9E-2
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __UpperCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['DeiTFeatureExtractor'] __UpperCAmelCase = ['DeiTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DeiTForImageClassification', 'DeiTForImageClassificationWithTeacher', 'DeiTForMaskedImageModeling', 'DeiTModel', 'DeiTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDeiTForImageClassification', 'TFDeiTForImageClassificationWithTeacher', 'TFDeiTForMaskedImageModeling', 'TFDeiTModel', 'TFDeiTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
1
1
"""simple docstring""" import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'vocab.json'} __UpperCAmelCase = { 'vocab_file': { 'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json', } } __UpperCAmelCase = {'mgp-str': 27} class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Union[str, Any] = VOCAB_FILES_NAMES UpperCAmelCase_ :Optional[int] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , __A , __A="[GO]" , __A="[GO]" , __A="[s]" , __A="[GO]" , **__A ) -> Optional[int]: super().__init__( unk_token=__A , bos_token=__A , eos_token=__A , pad_token=__A , **__A , ) with open(__A , encoding="""utf-8""" ) as vocab_handle: lowerCAmelCase_ :List[Any] = json.load(__A ) lowerCAmelCase_ :List[str] = {v: k for k, v in self.vocab.items()} @property def __lowerCAmelCase ( self ) -> Optional[Any]: return len(self.vocab ) def __lowerCAmelCase ( self ) -> Union[str, Any]: return dict(self.vocab , **self.added_tokens_encoder ) def __lowerCAmelCase ( self , __A ) -> Any: lowerCAmelCase_ :Any = [] for s in text: char_tokens.extend(__A ) return char_tokens def __lowerCAmelCase ( self , __A ) -> List[str]: return self.vocab.get(__A , self.vocab.get(self.unk_token ) ) def __lowerCAmelCase ( self , __A ) -> Any: return self.decoder.get(__A ) def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]: if not os.path.isdir(__A ): logger.error("""Vocabulary path ({}) should be a directory""".format(__A ) ) return lowerCAmelCase_ :Dict = os.path.join( __A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) with open(__A , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=__A , ensure_ascii=__A ) + """\n""" ) return (vocab_file,)
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { 'configuration_squeezebert': [ 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SqueezeBertConfig', 'SqueezeBertOnnxConfig', ], 'tokenization_squeezebert': ['SqueezeBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['SqueezeBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertModule', 'SqueezeBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
1
1
"""simple docstring""" def _snake_case ( lowercase__ : int , lowercase__ : int ) -> int: '''simple docstring''' return int((input_a, input_a).count(0 ) == 0 ) def _snake_case ( ) -> None: '''simple docstring''' assert and_gate(0 , 0 ) == 0 assert and_gate(0 , 1 ) == 0 assert and_gate(1 , 0 ) == 0 assert and_gate(1 , 1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
1
"""simple docstring""" __UpperCAmelCase = 2_56 # Modulus to hash a string __UpperCAmelCase = 1_00_00_03 def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool: '''simple docstring''' lowerCAmelCase_ :Tuple = len(lowercase__ ) lowerCAmelCase_ :List[str] = len(lowercase__ ) if p_len > t_len: return False lowerCAmelCase_ :List[str] = 0 lowerCAmelCase_ :Optional[int] = 0 lowerCAmelCase_ :Any = 1 # Calculating the hash of pattern and substring of text for i in range(lowercase__ ): lowerCAmelCase_ :int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus lowerCAmelCase_ :Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue lowerCAmelCase_ :Optional[Any] = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash lowerCAmelCase_ :Any = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def _snake_case ( ) -> None: '''simple docstring''' lowerCAmelCase_ :int = """abc1abc12""" lowerCAmelCase_ :Dict = """alskfjaldsabc1abc1abc12k23adsfabcabc""" lowerCAmelCase_ :int = """alskfjaldsk23adsfabcabc""" assert rabin_karp(lowercase__ , lowercase__ ) and not rabin_karp(lowercase__ , lowercase__ ) # Test 2) lowerCAmelCase_ :Dict = """ABABX""" lowerCAmelCase_ :int = """ABABZABABYABABX""" assert rabin_karp(lowercase__ , lowercase__ ) # Test 3) lowerCAmelCase_ :Union[str, Any] = """AAAB""" lowerCAmelCase_ :List[str] = """ABAAAAAB""" assert rabin_karp(lowercase__ , lowercase__ ) # Test 4) lowerCAmelCase_ :Dict = """abcdabcy""" lowerCAmelCase_ :Union[str, Any] = """abcxabcdabxabcdabcdabcy""" assert rabin_karp(lowercase__ , lowercase__ ) # Test 5) lowerCAmelCase_ :Optional[int] = """Lรผ""" lowerCAmelCase_ :Optional[int] = """Lรผsai""" assert rabin_karp(lowercase__ , lowercase__ ) lowerCAmelCase_ :Optional[int] = """Lue""" assert not rabin_karp(lowercase__ , lowercase__ ) print("""Success.""" ) if __name__ == "__main__": test_rabin_karp()
1
1
"""simple docstring""" def _snake_case ( lowercase__ : float ) -> float: '''simple docstring''' return 1_0 - x * x def _snake_case ( lowercase__ : float , lowercase__ : float ) -> float: '''simple docstring''' if equation(lowercase__ ) * equation(lowercase__ ) >= 0: raise ValueError("""Wrong space!""" ) lowerCAmelCase_ :List[Any] = a while (b - a) >= 0.01: # Find middle point lowerCAmelCase_ :Tuple = (a + b) / 2 # Check if middle point is root if equation(lowercase__ ) == 0.0: break # Decide the side to repeat the steps if equation(lowercase__ ) * equation(lowercase__ ) < 0: lowerCAmelCase_ :List[str] = c else: lowerCAmelCase_ :Optional[Any] = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
1
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __UpperCAmelCase = 16 __UpperCAmelCase = 32 def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 ) -> str: '''simple docstring''' lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowercase__ : int ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase_ :Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCAmelCase_ :Optional[Any] = datasets.map( lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase_ :str = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowercase__ : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCAmelCase_ :int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCAmelCase_ :List[Any] = 1_6 elif accelerator.mixed_precision != "no": lowerCAmelCase_ :List[str] = 8 else: lowerCAmelCase_ :Optional[int] = None return tokenizer.pad( lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , ) # Instantiate dataloaders. lowerCAmelCase_ :Optional[Any] = DataLoader( tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) lowerCAmelCase_ :List[Any] = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __UpperCAmelCase = mocked_dataloaders # noqa: F811 def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[int] ) -> Optional[Any]: '''simple docstring''' if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1": lowerCAmelCase_ :Optional[Any] = 2 # New Code # lowerCAmelCase_ :List[str] = int(args.gradient_accumulation_steps ) lowerCAmelCase_ :int = int(args.local_sgd_steps ) # Initialize accelerator lowerCAmelCase_ :str = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase_ :int = config["""lr"""] lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] ) lowerCAmelCase_ :int = int(config["""seed"""] ) lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] ) lowerCAmelCase_ :Union[str, Any] = evaluate.load("""glue""" , """mrpc""" ) set_seed(lowercase__ ) lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = get_dataloaders(lowercase__ , lowercase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase_ :Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCAmelCase_ :Union[str, Any] = model.to(accelerator.device ) # Instantiate optimizer lowerCAmelCase_ :Optional[Any] = AdamW(params=model.parameters() , lr=lowercase__ ) # Instantiate scheduler lowerCAmelCase_ :Union[str, Any] = get_linear_schedule_with_warmup( optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.prepare( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Now we train the model for epoch in range(lowercase__ ): model.train() with LocalSGD( accelerator=lowercase__ , model=lowercase__ , local_sgd_steps=lowercase__ , enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowercase__ ): lowerCAmelCase_ :str = model(**lowercase__ ) lowerCAmelCase_ :Optional[int] = output.loss accelerator.backward(lowercase__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase_ :Optional[int] = model(**lowercase__ ) lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 ) lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowercase__ , references=lowercase__ , ) lowerCAmelCase_ :Any = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , lowercase__ ) def _snake_case ( ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :str = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=lowercase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument( """--local_sgd_steps""" , type=lowercase__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) lowerCAmelCase_ :Optional[Any] = parser.parse_args() lowerCAmelCase_ :Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6} training_function(lowercase__ , lowercase__ ) if __name__ == "__main__": main()
1
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'caidas/swin2sr-classicalsr-x2-64': ( 'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json' ), } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Dict = "swin2sr" UpperCAmelCase_ :Tuple = { "hidden_size": "embed_dim", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , __A=64 , __A=1 , __A=3 , __A=180 , __A=[6, 6, 6, 6, 6, 6] , __A=[6, 6, 6, 6, 6, 6] , __A=8 , __A=2.0 , __A=True , __A=0.0 , __A=0.0 , __A=0.1 , __A="gelu" , __A=False , __A=0.0_2 , __A=1E-5 , __A=2 , __A=1.0 , __A="1conv" , __A="pixelshuffle" , **__A , ) -> Union[str, Any]: super().__init__(**__A ) lowerCAmelCase_ :List[str] = image_size lowerCAmelCase_ :Union[str, Any] = patch_size lowerCAmelCase_ :Dict = num_channels lowerCAmelCase_ :Any = embed_dim lowerCAmelCase_ :List[Any] = depths lowerCAmelCase_ :Tuple = len(__A ) lowerCAmelCase_ :int = num_heads lowerCAmelCase_ :Optional[int] = window_size lowerCAmelCase_ :str = mlp_ratio lowerCAmelCase_ :List[Any] = qkv_bias lowerCAmelCase_ :Tuple = hidden_dropout_prob lowerCAmelCase_ :str = attention_probs_dropout_prob lowerCAmelCase_ :Optional[int] = drop_path_rate lowerCAmelCase_ :Tuple = hidden_act lowerCAmelCase_ :int = use_absolute_embeddings lowerCAmelCase_ :Dict = layer_norm_eps lowerCAmelCase_ :Optional[Any] = initializer_range lowerCAmelCase_ :List[str] = upscale lowerCAmelCase_ :Union[str, Any] = img_range lowerCAmelCase_ :int = resi_connection lowerCAmelCase_ :Dict = upsampler
1
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __UpperCAmelCase = 16 __UpperCAmelCase = 32 def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 , lowercase__ : str = "bert-base-cased" ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained(lowercase__ ) lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowercase__ : List[str] ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase_ :str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCAmelCase_ :str = datasets.map( lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase_ :List[str] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowercase__ : Union[str, Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" ) return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. lowerCAmelCase_ :Optional[int] = DataLoader( tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) lowerCAmelCase_ :Any = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) return train_dataloader, eval_dataloader def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : int ) -> List[str]: '''simple docstring''' model.eval() lowerCAmelCase_ :Dict = 0 for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase_ :Optional[int] = model(**lowercase__ ) lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(lowercase__ ) - 1: lowerCAmelCase_ :Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowerCAmelCase_ :Any = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=lowercase__ , references=lowercase__ , ) lowerCAmelCase_ :Tuple = metric.compute() return eval_metric["accuracy"] def _snake_case ( lowercase__ : str , lowercase__ : List[str] ) -> Any: '''simple docstring''' lowerCAmelCase_ :Optional[int] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase_ :int = config["""lr"""] lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] ) lowerCAmelCase_ :Optional[int] = int(config["""seed"""] ) lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] ) lowerCAmelCase_ :Optional[Any] = args.model_name_or_path set_seed(lowercase__ ) lowerCAmelCase_ , lowerCAmelCase_ :Dict = get_dataloaders(lowercase__ , lowercase__ , lowercase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase_ :str = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ ) # Instantiate optimizer lowerCAmelCase_ :List[str] = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowerCAmelCase_ :str = optimizer_cls(params=model.parameters() , lr=lowercase__ ) if accelerator.state.deepspeed_plugin is not None: lowerCAmelCase_ :Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: lowerCAmelCase_ :Any = 1 lowerCAmelCase_ :str = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowerCAmelCase_ :List[str] = get_linear_schedule_with_warmup( optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , ) else: lowerCAmelCase_ :int = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.prepare( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # We need to keep track of how many total steps we have iterated over lowerCAmelCase_ :List[str] = 0 # We also need to keep track of the stating epoch so files are named properly lowerCAmelCase_ :List[Any] = 0 lowerCAmelCase_ :str = evaluate.load("""glue""" , """mrpc""" ) lowerCAmelCase_ :Optional[Any] = num_epochs if args.partial_train_epoch is not None: lowerCAmelCase_ :Dict = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) lowerCAmelCase_ :Optional[Any] = args.resume_from_checkpoint.split("""epoch_""" )[1] lowerCAmelCase_ :int = """""" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break lowerCAmelCase_ :Union[str, Any] = int(lowercase__ ) + 1 lowerCAmelCase_ :Optional[int] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) accelerator.print("""resumed checkpoint performance:""" , lowercase__ ) accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] ) accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] ) with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f: lowerCAmelCase_ :List[str] = json.load(lowercase__ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model lowerCAmelCase_ :List[Any] = {} for epoch in range(lowercase__ , lowercase__ ): model.train() for step, batch in enumerate(lowercase__ ): lowerCAmelCase_ :Optional[int] = model(**lowercase__ ) lowerCAmelCase_ :Dict = outputs.loss lowerCAmelCase_ :int = loss / gradient_accumulation_steps accelerator.backward(lowercase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 lowerCAmelCase_ :List[str] = f"""epoch_{epoch}""" lowerCAmelCase_ :Any = os.path.join(args.output_dir , lowercase__ ) accelerator.save_state(lowercase__ ) lowerCAmelCase_ :List[Any] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) lowerCAmelCase_ :Union[str, Any] = accuracy lowerCAmelCase_ :Any = lr_scheduler.get_lr()[0] lowerCAmelCase_ :str = optimizer.param_groups[0]["""lr"""] lowerCAmelCase_ :List[Any] = epoch lowerCAmelCase_ :Tuple = overall_step accelerator.print(f"""epoch {epoch}:""" , lowercase__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) def _snake_case ( ) -> int: '''simple docstring''' lowerCAmelCase_ :List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , ) parser.add_argument( """--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--resume_from_checkpoint""" , type=lowercase__ , default=lowercase__ , help="""If the training should continue from a checkpoint folder.""" , ) parser.add_argument( """--partial_train_epoch""" , type=lowercase__ , default=lowercase__ , help="""If passed, the training will stop after this number of epochs.""" , ) parser.add_argument( """--num_epochs""" , type=lowercase__ , default=2 , help="""Number of train epochs.""" , ) lowerCAmelCase_ :Optional[int] = parser.parse_args() lowerCAmelCase_ :List[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6} training_function(lowercase__ , lowercase__ ) if __name__ == "__main__": main()
1
1
"""simple docstring""" from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time __UpperCAmelCase = Lock() def _snake_case ( lowercase__ : str , lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ) -> List[str]: '''simple docstring''' global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 1_0 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(lowercase__ ) process_lock.release() # receive your right neighbor's value process_lock.acquire() lowerCAmelCase_ :Dict = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left lowerCAmelCase_ :Optional[int] = min(lowercase__ , lowercase__ ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(lowercase__ ) process_lock.release() # receive your left neighbor's value process_lock.acquire() lowerCAmelCase_ :List[str] = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right lowerCAmelCase_ :Optional[int] = max(lowercase__ , lowercase__ ) # after all swaps are performed, send the values back to main result_pipe[1].send(lowercase__ ) def _snake_case ( lowercase__ : Optional[int] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :str = [] lowerCAmelCase_ :Any = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop lowerCAmelCase_ :int = Pipe() lowerCAmelCase_ :Any = Pipe() process_array_.append( Process( target=lowercase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) lowerCAmelCase_ :Union[str, Any] = temp_rs lowerCAmelCase_ :Optional[Any] = temp_rr for i in range(1 , len(lowercase__ ) - 1 ): lowerCAmelCase_ :Any = Pipe() lowerCAmelCase_ :int = Pipe() process_array_.append( Process( target=lowercase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) lowerCAmelCase_ :Dict = temp_rs lowerCAmelCase_ :Any = temp_rr process_array_.append( Process( target=lowercase__ , args=( len(lowercase__ ) - 1, arr[len(lowercase__ ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(lowercase__ ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(lowercase__ ) ): lowerCAmelCase_ :Dict = result_pipe[p][0].recv() process_array_[p].join() return arr def _snake_case ( ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :Any = list(range(1_0 , 0 , -1 ) ) print("""Initial List""" ) print(*lowercase__ ) lowerCAmelCase_ :Tuple = odd_even_transposition(lowercase__ ) print("""Sorted List\n""" ) print(*lowercase__ ) if __name__ == "__main__": main()
1
"""simple docstring""" import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class _SCREAMING_SNAKE_CASE : def __init__( self , __A ) -> Union[str, Any]: if isinstance(__A , __A ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden lowerCAmelCase_ :Tuple = deepcopy(__A ) elif os.path.exists(__A ): with io.open(__A , """r""" , encoding="""utf-8""" ) as f: lowerCAmelCase_ :str = json.load(__A ) else: try: lowerCAmelCase_ :Dict = baseaa.urlsafe_baadecode(__A ).decode("""utf-8""" ) lowerCAmelCase_ :int = json.loads(__A ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" ) lowerCAmelCase_ :Optional[Any] = config self.set_stage_and_offload() def __lowerCAmelCase ( self ) -> Tuple: # zero stage - this is done as early as possible, before model is created, to allow # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object # during ``zero.Init()`` which needs to know the dtype, and some other hparams. lowerCAmelCase_ :Tuple = self.get_value("""zero_optimization.stage""" , -1 ) # offload lowerCAmelCase_ :Dict = False if self.is_zeroa() or self.is_zeroa(): lowerCAmelCase_ :Optional[int] = set(["""cpu""", """nvme"""] ) lowerCAmelCase_ :Union[str, Any] = set( [ self.get_value("""zero_optimization.offload_optimizer.device""" ), self.get_value("""zero_optimization.offload_param.device""" ), ] ) if len(offload_devices & offload_devices_valid ) > 0: lowerCAmelCase_ :Optional[int] = True def __lowerCAmelCase ( self , __A ) -> Optional[Any]: lowerCAmelCase_ :str = self.config # find the config node of interest if it exists lowerCAmelCase_ :Tuple = ds_key_long.split(""".""" ) lowerCAmelCase_ :List[str] = nodes.pop() for node in nodes: lowerCAmelCase_ :Tuple = config.get(__A ) if config is None: return None, ds_key return config, ds_key def __lowerCAmelCase ( self , __A , __A=None ) -> Optional[Any]: lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.find_config_node(__A ) if config is None: return default return config.get(__A , __A ) def __lowerCAmelCase ( self , __A , __A=False ) -> Optional[Any]: lowerCAmelCase_ :Tuple = self.config # find the config node of interest if it exists lowerCAmelCase_ :Union[str, Any] = ds_key_long.split(""".""" ) for node in nodes: lowerCAmelCase_ :int = config lowerCAmelCase_ :Any = config.get(__A ) if config is None: if must_exist: raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" ) else: return # if found remove it if parent_config is not None: parent_config.pop(__A ) def __lowerCAmelCase ( self , __A ) -> Union[str, Any]: lowerCAmelCase_ :Optional[int] = self.get_value(__A ) return False if value is None else bool(__A ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: lowerCAmelCase_ :List[str] = self.get_value(__A ) return False if value is None else not bool(__A ) def __lowerCAmelCase ( self ) -> str: return self._stage == 2 def __lowerCAmelCase ( self ) -> Union[str, Any]: return self._stage == 3 def __lowerCAmelCase ( self ) -> Union[str, Any]: return self._offload class _SCREAMING_SNAKE_CASE : def __init__( self , __A ) -> Optional[int]: lowerCAmelCase_ :Dict = engine def __lowerCAmelCase ( self , __A , **__A ) -> str: # runs backpropagation and handles mixed precision self.engine.backward(__A , **__A ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A ) -> List[str]: super().__init__(__A , device_placement=__A , scaler=__A ) lowerCAmelCase_ :List[str] = hasattr(self.optimizer , """overflow""" ) def __lowerCAmelCase ( self , __A=None ) -> Optional[Any]: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def __lowerCAmelCase ( self ) -> List[Any]: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def __lowerCAmelCase ( self ) -> int: if self.__has_overflow__: return self.optimizer.overflow return False class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A , __A ) -> Optional[int]: super().__init__(__A , __A ) def __lowerCAmelCase ( self ) -> Any: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class _SCREAMING_SNAKE_CASE : def __init__( self , __A , __A=0.0_0_1 , __A=0 , **__A ) -> List[Any]: lowerCAmelCase_ :str = params lowerCAmelCase_ :Any = lr lowerCAmelCase_ :List[Any] = weight_decay lowerCAmelCase_ :Any = kwargs class _SCREAMING_SNAKE_CASE : def __init__( self , __A , __A=None , __A=0 , **__A ) -> List[str]: lowerCAmelCase_ :Optional[int] = optimizer lowerCAmelCase_ :int = total_num_steps lowerCAmelCase_ :List[Any] = warmup_num_steps lowerCAmelCase_ :int = kwargs
1
1
"""simple docstring""" from collections.abc import Generator def _snake_case ( ) -> Generator[int, None, None]: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = 0, 1 while True: lowerCAmelCase_ , lowerCAmelCase_ :List[str] = b, a + b yield b def _snake_case ( lowercase__ : int = 1_0_0_0 ) -> int: '''simple docstring''' lowerCAmelCase_ :Dict = 1 lowerCAmelCase_ :int = fibonacci_generator() while len(str(next(lowercase__ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Dict = ( "This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image." "It takes two arguments named `image` which should be the original image, and `label` which should be a text " "describing the elements what should be identified in the segmentation mask. The tool returns the mask." ) UpperCAmelCase_ :List[str] = "CIDAS/clipseg-rd64-refined" UpperCAmelCase_ :List[Any] = "image_segmenter" UpperCAmelCase_ :Optional[int] = CLIPSegForImageSegmentation UpperCAmelCase_ :Tuple = ["image", "text"] UpperCAmelCase_ :Dict = ["image"] def __init__( self , *__A , **__A ) -> Optional[Any]: requires_backends(self , ["""vision"""] ) super().__init__(*__A , **__A ) def __lowerCAmelCase ( self , __A , __A ) -> Any: return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" ) def __lowerCAmelCase ( self , __A ) -> Tuple: with torch.no_grad(): lowerCAmelCase_ :Dict = self.model(**__A ).logits return logits def __lowerCAmelCase ( self , __A ) -> Tuple: lowerCAmelCase_ :Optional[int] = outputs.cpu().detach().numpy() lowerCAmelCase_ :List[str] = 0 lowerCAmelCase_ :str = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
1
1
"""simple docstring""" import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) __UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _SCREAMING_SNAKE_CASE : UpperCAmelCase_ :str = field( default=A__ , metadata={"help": "Model type selected in the list: " + ", ".join(A__ )} ) UpperCAmelCase_ :str = field( default=A__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} ) UpperCAmelCase_ :int = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) UpperCAmelCase_ :int = field( default=128 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , ) UpperCAmelCase_ :int = field( default=64 , metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) } , ) UpperCAmelCase_ :int = field( default=30 , metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) } , ) UpperCAmelCase_ :bool = field( default=A__ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) UpperCAmelCase_ :bool = field( default=A__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} ) UpperCAmelCase_ :float = field( default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} ) UpperCAmelCase_ :int = field( default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} ) UpperCAmelCase_ :int = field( default=0 , metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) } , ) UpperCAmelCase_ :int = field(default=1 , metadata={"help": "multiple threads for converting example to features"} ) class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :List[Any] = "train" UpperCAmelCase_ :Dict = "dev" class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :SquadDataTrainingArguments UpperCAmelCase_ :List[SquadFeatures] UpperCAmelCase_ :Split UpperCAmelCase_ :bool def __init__( self , __A , __A , __A = None , __A = Split.train , __A = False , __A = None , __A = "pt" , ) -> int: lowerCAmelCase_ :List[Any] = args lowerCAmelCase_ :Any = is_language_sensitive lowerCAmelCase_ :Optional[Any] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(__A , __A ): try: lowerCAmelCase_ :Tuple = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) lowerCAmelCase_ :Optional[int] = mode # Load data features from cache or dataset file lowerCAmelCase_ :List[str] = """v2""" if args.version_2_with_negative else """v1""" lowerCAmelCase_ :int = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCAmelCase_ :Union[str, Any] = cached_features_file + """.lock""" with FileLock(__A ): if os.path.exists(__A ) and not args.overwrite_cache: lowerCAmelCase_ :Dict = time.time() lowerCAmelCase_ :str = torch.load(__A ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. lowerCAmelCase_ :str = self.old_features["""features"""] lowerCAmelCase_ :Tuple = self.old_features.get("""dataset""" , __A ) lowerCAmelCase_ :List[Any] = self.old_features.get("""examples""" , __A ) logger.info( f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in""" """ future run""" ) else: if mode == Split.dev: lowerCAmelCase_ :Any = self.processor.get_dev_examples(args.data_dir ) else: lowerCAmelCase_ :str = self.processor.get_train_examples(args.data_dir ) lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = squad_convert_examples_to_features( examples=self.examples , tokenizer=__A , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__A , ) lowerCAmelCase_ :str = time.time() torch.save( {"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , __A , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" ) def __len__( self ) -> Union[str, Any]: return len(self.features ) def __getitem__( self , __A ) -> Dict[str, torch.Tensor]: # Convert to Tensors and build dataset lowerCAmelCase_ :int = self.features[i] lowerCAmelCase_ :List[Any] = torch.tensor(feature.input_ids , dtype=torch.long ) lowerCAmelCase_ :Tuple = torch.tensor(feature.attention_mask , dtype=torch.long ) lowerCAmelCase_ :Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long ) lowerCAmelCase_ :Optional[int] = torch.tensor(feature.cls_index , dtype=torch.long ) lowerCAmelCase_ :List[str] = torch.tensor(feature.p_mask , dtype=torch.float ) lowerCAmelCase_ :List[str] = torch.tensor(feature.is_impossible , dtype=torch.float ) lowerCAmelCase_ :Optional[Any] = { """input_ids""": input_ids, """attention_mask""": attention_mask, """token_type_ids""": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} ) if self.args.version_2_with_negative: inputs.update({"""is_impossible""": is_impossible} ) if self.is_language_sensitive: inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: lowerCAmelCase_ :Optional[Any] = torch.tensor(feature.start_position , dtype=torch.long ) lowerCAmelCase_ :List[Any] = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} ) return inputs
1
"""simple docstring""" def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int: '''simple docstring''' if index == number_of_items: return 0 lowerCAmelCase_ :Any = 0 lowerCAmelCase_ :str = 0 lowerCAmelCase_ :Dict = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 ) if weights[index] <= max_weight: lowerCAmelCase_ :str = values[index] + knapsack( lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 ) return max(lowercase__ , lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
1
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'spm_char.model'} __UpperCAmelCase = { 'vocab_file': { 'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model', 'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model', 'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model', } } __UpperCAmelCase = { 'microsoft/speecht5_asr': 10_24, 'microsoft/speecht5_tts': 10_24, 'microsoft/speecht5_vc': 10_24, } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Dict = VOCAB_FILES_NAMES UpperCAmelCase_ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ :Optional[Any] = ["input_ids", "attention_mask"] def __init__( self , __A , __A="<s>" , __A="</s>" , __A="<unk>" , __A="<pad>" , __A = None , **__A , ) -> None: lowerCAmelCase_ :str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__A , eos_token=__A , unk_token=__A , pad_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , ) lowerCAmelCase_ :Dict = vocab_file lowerCAmelCase_ :Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__A ) @property def __lowerCAmelCase ( self ) -> List[str]: return self.sp_model.get_piece_size() def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :str = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Any: lowerCAmelCase_ :int = self.__dict__.copy() lowerCAmelCase_ :Optional[Any] = None return state def __setstate__( self , __A ) -> Any: lowerCAmelCase_ :Dict = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowerCAmelCase_ :Any = {} lowerCAmelCase_ :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowerCAmelCase ( self , __A ) -> List[str]: return self.sp_model.encode(__A , out_type=__A ) def __lowerCAmelCase ( self , __A ) -> str: return self.sp_model.piece_to_id(__A ) def __lowerCAmelCase ( self , __A ) -> List[Any]: lowerCAmelCase_ :str = self.sp_model.IdToPiece(__A ) return token def __lowerCAmelCase ( self , __A ) -> Union[str, Any]: lowerCAmelCase_ :Optional[Any] = [] lowerCAmelCase_ :List[Any] = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__A ) + token lowerCAmelCase_ :Tuple = [] else: current_sub_tokens.append(__A ) out_string += self.sp_model.decode(__A ) return out_string.strip() def __lowerCAmelCase ( self , __A , __A=None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def __lowerCAmelCase ( self , __A , __A = None , __A = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) lowerCAmelCase_ :Optional[int] = [1] if token_ids_a is None: return ([0] * len(__A )) + suffix_ones return ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]: if not os.path.isdir(__A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase_ :Union[str, Any] = os.path.join( __A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __A ) elif not os.path.isfile(self.vocab_file ): with open(__A , """wb""" ) as fi: lowerCAmelCase_ :str = self.sp_model.serialized_model_proto() fi.write(__A ) return (out_vocab_file,)
1
"""simple docstring""" from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def _snake_case ( lowercase__ : bool = True , *lowercase__ : Optional[int] , **lowercase__ : str ) -> Optional[Any]: '''simple docstring''' if not is_tqdm_available(): raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" ) lowerCAmelCase_ :Tuple = False if main_process_only: lowerCAmelCase_ :Dict = PartialState().local_process_index == 0 return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
1
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json', 'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json', 'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json', 'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json', 'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json', 'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json', 'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json', 'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json', 'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json', 'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :List[str] = "xlm" UpperCAmelCase_ :Any = { "hidden_size": "emb_dim", "num_attention_heads": "n_heads", "num_hidden_layers": "n_layers", "n_words": "vocab_size", # For backward compatibility } def __init__( self , __A=3_0145 , __A=2048 , __A=12 , __A=16 , __A=0.1 , __A=0.1 , __A=True , __A=False , __A=False , __A=False , __A=1 , __A=True , __A=512 , __A=2048**-0.5 , __A=1E-12 , __A=0.0_2 , __A=0 , __A=1 , __A=2 , __A=3 , __A=5 , __A=True , __A="first" , __A=True , __A=None , __A=True , __A=0.1 , __A=5 , __A=5 , __A=0 , __A=0 , __A=2 , __A=0 , **__A , ) -> Union[str, Any]: lowerCAmelCase_ :Any = vocab_size lowerCAmelCase_ :Tuple = emb_dim lowerCAmelCase_ :int = n_layers lowerCAmelCase_ :Any = n_heads lowerCAmelCase_ :Optional[int] = dropout lowerCAmelCase_ :Optional[Any] = attention_dropout lowerCAmelCase_ :str = gelu_activation lowerCAmelCase_ :int = sinusoidal_embeddings lowerCAmelCase_ :List[str] = causal lowerCAmelCase_ :str = asm lowerCAmelCase_ :Tuple = n_langs lowerCAmelCase_ :List[str] = use_lang_emb lowerCAmelCase_ :Optional[Any] = layer_norm_eps lowerCAmelCase_ :Optional[Any] = bos_index lowerCAmelCase_ :List[Any] = eos_index lowerCAmelCase_ :Tuple = pad_index lowerCAmelCase_ :Dict = unk_index lowerCAmelCase_ :List[str] = mask_index lowerCAmelCase_ :Optional[int] = is_encoder lowerCAmelCase_ :Optional[Any] = max_position_embeddings lowerCAmelCase_ :Dict = embed_init_std lowerCAmelCase_ :str = init_std lowerCAmelCase_ :Dict = summary_type lowerCAmelCase_ :Tuple = summary_use_proj lowerCAmelCase_ :str = summary_activation lowerCAmelCase_ :Union[str, Any] = summary_proj_to_labels lowerCAmelCase_ :Optional[int] = summary_first_dropout lowerCAmelCase_ :int = start_n_top lowerCAmelCase_ :Tuple = end_n_top lowerCAmelCase_ :int = mask_token_id lowerCAmelCase_ :Optional[Any] = lang_id if "n_words" in kwargs: lowerCAmelCase_ :Union[str, Any] = kwargs["""n_words"""] super().__init__(pad_token_id=__A , bos_token_id=__A , **__A ) class _SCREAMING_SNAKE_CASE ( A__ ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCAmelCase_ :Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCAmelCase_ :int = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
1
"""simple docstring""" import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 __UpperCAmelCase = get_tests_dir('fixtures/dummy-config.json') class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :int = 0 def __lowerCAmelCase ( self ) -> List[str]: self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""bert-base-uncased""" ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :int = AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :int = AutoConfig.for_model("""roberta""" ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Tuple: with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. lowerCAmelCase_ :int = os.path.join(__A , """fake-roberta""" ) os.makedirs(__A , exist_ok=__A ) with open(os.path.join(__A , """config.json""" ) , """w""" ) as f: f.write(json.dumps({} ) ) lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A ) self.assertEqual(type(__A ) , __A ) def __lowerCAmelCase ( self ) -> Optional[int]: try: AutoConfig.register("""custom""" , __A ) # Wrong model type will raise an error with self.assertRaises(__A ): AutoConfig.register("""model""" , __A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__A ): AutoConfig.register("""bert""" , __A ) # Now that the config is registered, it can be used as any other config with the auto-API lowerCAmelCase_ :Union[str, Any] = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__A ) lowerCAmelCase_ :Optional[int] = AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def __lowerCAmelCase ( self ) -> Tuple: with self.assertRaisesRegex( __A , """bert-base is not a local folder and is not a valid model identifier""" ): lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""bert-base""" ) def __lowerCAmelCase ( self ) -> Any: with self.assertRaisesRegex( __A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , revision="""aaaaaa""" ) def __lowerCAmelCase ( self ) -> int: with self.assertRaisesRegex( __A , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ): lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" ) def __lowerCAmelCase ( self ) -> Tuple: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__A ): lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__A ): lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) lowerCAmelCase_ :str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) self.assertEqual(config.__class__.__name__ , """NewModelConfig""" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__A ) lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , trust_remote_code=__A ) self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" ) def __lowerCAmelCase ( self ) -> int: class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :int = "new-model" try: AutoConfig.register("""new-model""" , __A ) # If remote code is not set, the default is to use local lowerCAmelCase_ :Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" ) # If remote code is disabled, we load the local one. lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" ) # If remote is enabled, we load from the Hub lowerCAmelCase_ :Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) self.assertEqual(config.__class__.__name__ , """NewModelConfig""" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
1
1
"""simple docstring""" import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :str = GPTSanJapaneseTokenizer UpperCAmelCase_ :Optional[int] = False UpperCAmelCase_ :Optional[int] = {"do_clean_text": False, "add_prefix_space": False} def __lowerCAmelCase ( self ) -> Tuple: super().setUp() # fmt: off lowerCAmelCase_ :Dict = ["""ใ“ใ‚“""", """ใ“ใ‚“ใซ""", """ใซใกใฏ""", """ใฐใ‚“ใฏ""", """ไธ–็•Œ,ใ”บ็•Œ""", """ใ€""", """ใ€‚""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""] # fmt: on lowerCAmelCase_ :List[str] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # ๐Ÿ˜€ lowerCAmelCase_ :int = {"""unk_token""": """<unk>"""} lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) with open(self.emoji_file , """w""" ) as emoji_writer: emoji_writer.write(json.dumps(__A ) ) def __lowerCAmelCase ( self , **__A ) -> int: kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A ) def __lowerCAmelCase ( self , __A ) -> Dict: lowerCAmelCase_ :List[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \nใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \nใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€""" return input_text, output_text def __lowerCAmelCase ( self , __A ) -> str: lowerCAmelCase_ , lowerCAmelCase_ :Tuple = self.get_input_output_texts(__A ) lowerCAmelCase_ :List[str] = tokenizer.encode(__A , add_special_tokens=__A ) lowerCAmelCase_ :str = tokenizer.decode(__A , clean_up_tokenization_spaces=__A ) return text, ids def __lowerCAmelCase ( self ) -> str: pass # TODO add if relevant def __lowerCAmelCase ( self ) -> Dict: pass # TODO add if relevant def __lowerCAmelCase ( self ) -> int: pass # TODO add if relevant def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Union[str, Any] = self.get_tokenizer() # Testing tokenization lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ€€ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚""" lowerCAmelCase_ :Any = ["""ใ“ใ‚“""", """ใซใกใฏ""", """ใ€""", """ไธ–็•Œ""", """ใ€‚""", """<SP>""", """ใ“ใ‚“""", """ใฐใ‚“ใฏ""", """ใ€""", """ใ”บ็•Œ""", """ใ€‚"""] lowerCAmelCase_ :Tuple = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) # Testing conversion to ids without special tokens lowerCAmelCase_ :List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] lowerCAmelCase_ :List[str] = tokenizer.convert_tokens_to_ids(__A ) self.assertListEqual(__A , __A ) # Testing conversion to ids with special tokens lowerCAmelCase_ :Any = tokens + [tokenizer.unk_token] lowerCAmelCase_ :Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] lowerCAmelCase_ :Union[str, Any] = tokenizer.convert_tokens_to_ids(__A ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :int = self.get_tokenizer() # Testing tokenization lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใซใกใฏใ€<|bagoftoken|>ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€<|bagoftoken|>ใ”บ็•Œใ€‚""" lowerCAmelCase_ :str = """ใ“ใ‚“ใซใกใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚""" lowerCAmelCase_ :str = tokenizer.encode(__A ) lowerCAmelCase_ :Dict = tokenizer.decode(__A ) self.assertEqual(__A , __A ) @slow def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization lowerCAmelCase_ :Optional[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚""" lowerCAmelCase_ :Any = """ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :Optional[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :List[Any] = tokenizer.encode(prefix_text + input_text ) lowerCAmelCase_ :List[str] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text ) lowerCAmelCase_ :int = tokenizer.encode(__A , prefix_text=__A ) lowerCAmelCase_ :int = tokenizer.decode(__A ) lowerCAmelCase_ :Dict = tokenizer.decode(__A ) lowerCAmelCase_ :Tuple = tokenizer.decode(__A ) self.assertEqual(__A , __A ) self.assertEqual(__A , __A ) self.assertEqual(__A , __A ) @slow def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization lowerCAmelCase_ :List[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚""" lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :List[str] = len(tokenizer.encode(__A ) ) - 2 lowerCAmelCase_ :Dict = len(tokenizer.encode(__A ) ) - 2 lowerCAmelCase_ :int = [1] + [0] * (len_prefix + len_text + 1) lowerCAmelCase_ :List[Any] = [1] * (len_prefix + len_text + 1) + [0] lowerCAmelCase_ :Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1) lowerCAmelCase_ :List[Any] = tokenizer(prefix_text + input_text ).token_type_ids lowerCAmelCase_ :List[str] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids lowerCAmelCase_ :List[Any] = tokenizer(__A , prefix_text=__A ).token_type_ids self.assertListEqual(__A , __A ) self.assertListEqual(__A , __A ) self.assertListEqual(__A , __A ) @slow def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) lowerCAmelCase_ :int = tokenizer.encode("""ใ‚ใƒณใ„ใƒฏ""" ) lowerCAmelCase_ :Optional[Any] = tokenizer.encode("""""" , prefix_text="""ใ‚ใƒณใ„ใƒฏ""" ) lowerCAmelCase_ :int = tokenizer.encode("""ใ„ใƒฏ""" , prefix_text="""ใ‚ใƒณ""" ) self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) ) self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) ) self.assertNotEqual(__A , __A ) self.assertNotEqual(__A , __A ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Union[str, Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) lowerCAmelCase_ :int = [["""ๆญฆ็”ฐไฟก็Ž„""", """ใฏใ€"""], ["""็น”็”ฐไฟก้•ท""", """ใฎ้…ไธ‹ใฎใ€"""]] lowerCAmelCase_ :Dict = tokenizer(__A , padding=__A ) lowerCAmelCase_ :Any = tokenizer.batch_encode_plus(__A , padding=__A ) # fmt: off lowerCAmelCase_ :int = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]] lowerCAmelCase_ :List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] lowerCAmelCase_ :int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , __A ) self.assertListEqual(x_token.token_type_ids , __A ) self.assertListEqual(x_token.attention_mask , __A ) self.assertListEqual(x_token_a.input_ids , __A ) self.assertListEqual(x_token_a.token_type_ids , __A ) self.assertListEqual(x_token_a.attention_mask , __A ) def __lowerCAmelCase ( self ) -> Tuple: # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def __lowerCAmelCase ( self ) -> str: # tokenizer has no padding token pass
1
"""simple docstring""" import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :str = GPTSanJapaneseTokenizer UpperCAmelCase_ :Optional[int] = False UpperCAmelCase_ :Optional[int] = {"do_clean_text": False, "add_prefix_space": False} def __lowerCAmelCase ( self ) -> Tuple: super().setUp() # fmt: off lowerCAmelCase_ :Dict = ["""ใ“ใ‚“""", """ใ“ใ‚“ใซ""", """ใซใกใฏ""", """ใฐใ‚“ใฏ""", """ไธ–็•Œ,ใ”บ็•Œ""", """ใ€""", """ใ€‚""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""] # fmt: on lowerCAmelCase_ :List[str] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # ๐Ÿ˜€ lowerCAmelCase_ :int = {"""unk_token""": """<unk>"""} lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) with open(self.emoji_file , """w""" ) as emoji_writer: emoji_writer.write(json.dumps(__A ) ) def __lowerCAmelCase ( self , **__A ) -> int: kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A ) def __lowerCAmelCase ( self , __A ) -> Dict: lowerCAmelCase_ :List[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \nใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \nใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€""" return input_text, output_text def __lowerCAmelCase ( self , __A ) -> str: lowerCAmelCase_ , lowerCAmelCase_ :Tuple = self.get_input_output_texts(__A ) lowerCAmelCase_ :List[str] = tokenizer.encode(__A , add_special_tokens=__A ) lowerCAmelCase_ :str = tokenizer.decode(__A , clean_up_tokenization_spaces=__A ) return text, ids def __lowerCAmelCase ( self ) -> str: pass # TODO add if relevant def __lowerCAmelCase ( self ) -> Dict: pass # TODO add if relevant def __lowerCAmelCase ( self ) -> int: pass # TODO add if relevant def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Union[str, Any] = self.get_tokenizer() # Testing tokenization lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ€€ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚""" lowerCAmelCase_ :Any = ["""ใ“ใ‚“""", """ใซใกใฏ""", """ใ€""", """ไธ–็•Œ""", """ใ€‚""", """<SP>""", """ใ“ใ‚“""", """ใฐใ‚“ใฏ""", """ใ€""", """ใ”บ็•Œ""", """ใ€‚"""] lowerCAmelCase_ :Tuple = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) # Testing conversion to ids without special tokens lowerCAmelCase_ :List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] lowerCAmelCase_ :List[str] = tokenizer.convert_tokens_to_ids(__A ) self.assertListEqual(__A , __A ) # Testing conversion to ids with special tokens lowerCAmelCase_ :Any = tokens + [tokenizer.unk_token] lowerCAmelCase_ :Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] lowerCAmelCase_ :Union[str, Any] = tokenizer.convert_tokens_to_ids(__A ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :int = self.get_tokenizer() # Testing tokenization lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใซใกใฏใ€<|bagoftoken|>ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€<|bagoftoken|>ใ”บ็•Œใ€‚""" lowerCAmelCase_ :str = """ใ“ใ‚“ใซใกใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚""" lowerCAmelCase_ :str = tokenizer.encode(__A ) lowerCAmelCase_ :Dict = tokenizer.decode(__A ) self.assertEqual(__A , __A ) @slow def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization lowerCAmelCase_ :Optional[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚""" lowerCAmelCase_ :Any = """ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :Optional[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :List[Any] = tokenizer.encode(prefix_text + input_text ) lowerCAmelCase_ :List[str] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text ) lowerCAmelCase_ :int = tokenizer.encode(__A , prefix_text=__A ) lowerCAmelCase_ :int = tokenizer.decode(__A ) lowerCAmelCase_ :Dict = tokenizer.decode(__A ) lowerCAmelCase_ :Tuple = tokenizer.decode(__A ) self.assertEqual(__A , __A ) self.assertEqual(__A , __A ) self.assertEqual(__A , __A ) @slow def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization lowerCAmelCase_ :List[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚""" lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :List[str] = len(tokenizer.encode(__A ) ) - 2 lowerCAmelCase_ :Dict = len(tokenizer.encode(__A ) ) - 2 lowerCAmelCase_ :int = [1] + [0] * (len_prefix + len_text + 1) lowerCAmelCase_ :List[Any] = [1] * (len_prefix + len_text + 1) + [0] lowerCAmelCase_ :Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1) lowerCAmelCase_ :List[Any] = tokenizer(prefix_text + input_text ).token_type_ids lowerCAmelCase_ :List[str] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids lowerCAmelCase_ :List[Any] = tokenizer(__A , prefix_text=__A ).token_type_ids self.assertListEqual(__A , __A ) self.assertListEqual(__A , __A ) self.assertListEqual(__A , __A ) @slow def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) lowerCAmelCase_ :int = tokenizer.encode("""ใ‚ใƒณใ„ใƒฏ""" ) lowerCAmelCase_ :Optional[Any] = tokenizer.encode("""""" , prefix_text="""ใ‚ใƒณใ„ใƒฏ""" ) lowerCAmelCase_ :int = tokenizer.encode("""ใ„ใƒฏ""" , prefix_text="""ใ‚ใƒณ""" ) self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) ) self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) ) self.assertNotEqual(__A , __A ) self.assertNotEqual(__A , __A ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Union[str, Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) lowerCAmelCase_ :int = [["""ๆญฆ็”ฐไฟก็Ž„""", """ใฏใ€"""], ["""็น”็”ฐไฟก้•ท""", """ใฎ้…ไธ‹ใฎใ€"""]] lowerCAmelCase_ :Dict = tokenizer(__A , padding=__A ) lowerCAmelCase_ :Any = tokenizer.batch_encode_plus(__A , padding=__A ) # fmt: off lowerCAmelCase_ :int = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]] lowerCAmelCase_ :List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] lowerCAmelCase_ :int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , __A ) self.assertListEqual(x_token.token_type_ids , __A ) self.assertListEqual(x_token.attention_mask , __A ) self.assertListEqual(x_token_a.input_ids , __A ) self.assertListEqual(x_token_a.token_type_ids , __A ) self.assertListEqual(x_token_a.attention_mask , __A ) def __lowerCAmelCase ( self ) -> Tuple: # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def __lowerCAmelCase ( self ) -> str: # tokenizer has no padding token pass
1
1
"""simple docstring""" def _snake_case ( lowercase__ : int , lowercase__ : int ) -> str: '''simple docstring''' if a < 0 or b < 0: raise ValueError("""the value of both inputs must be positive""" ) lowerCAmelCase_ :Union[str, Any] = str(bin(lowercase__ ) )[2:] # remove the leading "0b" lowerCAmelCase_ :Dict = str(bin(lowercase__ ) )[2:] lowerCAmelCase_ :List[Any] = max(len(lowercase__ ) , len(lowercase__ ) ) return "0b" + "".join( str(int("""1""" in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
1
"""simple docstring""" import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset __UpperCAmelCase = pd.read_csv( 'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/' 'position_salaries.csv' ) __UpperCAmelCase = dataset.iloc[:, 1:2].values __UpperCAmelCase = dataset.iloc[:, 2].values __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0) __UpperCAmelCase = PolynomialFeatures(degree=4) __UpperCAmelCase = poly_reg.fit_transform(X) __UpperCAmelCase = LinearRegression() pol_reg.fit(X_poly, y) def _snake_case ( ) -> str: '''simple docstring''' plt.scatter(lowercase__ , lowercase__ , color="""red""" ) plt.plot(lowercase__ , pol_reg.predict(poly_reg.fit_transform(lowercase__ ) ) , color="""blue""" ) plt.title("""Truth or Bluff (Linear Regression)""" ) plt.xlabel("""Position level""" ) plt.ylabel("""Salary""" ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
1
1
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class _SCREAMING_SNAKE_CASE ( unittest.TestCase , A__ ): def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Dict = load_tool("""text-classification""" ) self.tool.setup() lowerCAmelCase_ :int = load_tool("""text-classification""" , remote=__A ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Any = self.tool("""That's quite cool""" , ["""positive""", """negative"""] ) self.assertEqual(__A , """positive""" ) def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :str = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] ) self.assertEqual(__A , """positive""" ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Dict = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] ) self.assertEqual(__A , """positive""" ) def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Dict = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] ) self.assertEqual(__A , """positive""" )
1
"""simple docstring""" from __future__ import annotations __UpperCAmelCase = 1.6021e-19 # units = C def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float , ) -> tuple[str, float]: '''simple docstring''' if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif conductivity < 0: raise ValueError("""Conductivity cannot be negative""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative""" ) elif mobility < 0: raise ValueError("""mobility cannot be negative""" ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
1
1
"""simple docstring""" import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} __UpperCAmelCase = { 'vocab_file': { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json', 'allenai/longformer-large-4096': ( 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json' ), 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json' ), }, 'merges_file': { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt', 'allenai/longformer-large-4096': ( 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt' ), 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt' ), }, } __UpperCAmelCase = { 'allenai/longformer-base-4096': 40_96, 'allenai/longformer-large-4096': 40_96, 'allenai/longformer-large-4096-finetuned-triviaqa': 40_96, 'allenai/longformer-base-4096-extra.pos.embd.only': 40_96, 'allenai/longformer-large-4096-extra.pos.embd.only': 40_96, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _snake_case ( ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :int = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""ยก""" ) , ord("""ยฌ""" ) + 1 ) ) + list(range(ord("""ยฎ""" ) , ord("""รฟ""" ) + 1 ) ) ) lowerCAmelCase_ :Optional[int] = bs[:] lowerCAmelCase_ :int = 0 for b in range(2**8 ): if b not in bs: bs.append(lowercase__ ) cs.append(2**8 + n ) n += 1 lowerCAmelCase_ :Any = [chr(lowercase__ ) for n in cs] return dict(zip(lowercase__ , lowercase__ ) ) def _snake_case ( lowercase__ : List[str] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = set() lowerCAmelCase_ :List[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase_ :str = char return pairs class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Optional[int] = VOCAB_FILES_NAMES UpperCAmelCase_ :List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ :Optional[Any] = ["input_ids", "attention_mask"] def __init__( self , __A , __A , __A="replace" , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=False , **__A , ) -> int: lowerCAmelCase_ :List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token lowerCAmelCase_ :Optional[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token lowerCAmelCase_ :List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token lowerCAmelCase_ :List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token lowerCAmelCase_ :str = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token lowerCAmelCase_ :List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ :List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , ) with open(__A , encoding="""utf-8""" ) as vocab_handle: lowerCAmelCase_ :Optional[Any] = json.load(__A ) lowerCAmelCase_ :Optional[int] = {v: k for k, v in self.encoder.items()} lowerCAmelCase_ :List[str] = errors # how to handle errors in decoding lowerCAmelCase_ :Optional[Any] = bytes_to_unicode() lowerCAmelCase_ :List[str] = {v: k for k, v in self.byte_encoder.items()} with open(__A , encoding="""utf-8""" ) as merges_handle: lowerCAmelCase_ :Dict = merges_handle.read().split("""\n""" )[1:-1] lowerCAmelCase_ :Optional[int] = [tuple(merge.split() ) for merge in bpe_merges] lowerCAmelCase_ :Optional[int] = dict(zip(__A , range(len(__A ) ) ) ) lowerCAmelCase_ :Dict = {} lowerCAmelCase_ :Optional[Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCAmelCase_ :Optional[int] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property def __lowerCAmelCase ( self ) -> Optional[int]: return len(self.encoder ) def __lowerCAmelCase ( self ) -> Tuple: return dict(self.encoder , **self.added_tokens_encoder ) def __lowerCAmelCase ( self , __A ) -> Union[str, Any]: if token in self.cache: return self.cache[token] lowerCAmelCase_ :Dict = tuple(__A ) lowerCAmelCase_ :Any = get_pairs(__A ) if not pairs: return token while True: lowerCAmelCase_ :Tuple = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase_ , lowerCAmelCase_ :Dict = bigram lowerCAmelCase_ :Tuple = [] lowerCAmelCase_ :str = 0 while i < len(__A ): try: lowerCAmelCase_ :Optional[Any] = word.index(__A , __A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase_ :List[Any] = j if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase_ :Union[str, Any] = tuple(__A ) lowerCAmelCase_ :Optional[int] = new_word if len(__A ) == 1: break else: lowerCAmelCase_ :int = get_pairs(__A ) lowerCAmelCase_ :Any = """ """.join(__A ) lowerCAmelCase_ :Dict = word return word def __lowerCAmelCase ( self , __A ) -> Optional[Any]: lowerCAmelCase_ :Optional[Any] = [] for token in re.findall(self.pat , __A ): lowerCAmelCase_ :List[Any] = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(""" """ ) ) return bpe_tokens def __lowerCAmelCase ( self , __A ) -> Any: return self.encoder.get(__A , self.encoder.get(self.unk_token ) ) def __lowerCAmelCase ( self , __A ) -> Tuple: return self.decoder.get(__A ) def __lowerCAmelCase ( self , __A ) -> Optional[Any]: lowerCAmelCase_ :List[Any] = """""".join(__A ) lowerCAmelCase_ :Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]: if not os.path.isdir(__A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase_ :Dict = os.path.join( __A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase_ :Optional[Any] = os.path.join( __A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(__A , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + """\n""" ) lowerCAmelCase_ :List[Any] = 0 with open(__A , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) lowerCAmelCase_ :Tuple = token_index writer.write(""" """.join(__A ) + """\n""" ) index += 1 return vocab_file, merge_file def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase_ :Optional[Any] = [self.cls_token_id] lowerCAmelCase_ :Optional[int] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __lowerCAmelCase ( self , __A , __A = None , __A = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) if token_ids_a is None: return [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1] def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]: lowerCAmelCase_ :List[str] = [self.sep_token_id] lowerCAmelCase_ :List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowerCAmelCase ( self , __A , __A=False , **__A ) -> List[Any]: lowerCAmelCase_ :str = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()): lowerCAmelCase_ :int = """ """ + text return (text, kwargs)
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , *__A , **__A ) -> None: warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""" , __A , ) super().__init__(*__A , **__A )
1
1
"""simple docstring""" import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 __UpperCAmelCase = get_tests_dir('fixtures/dummy-config.json') class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :int = 0 def __lowerCAmelCase ( self ) -> List[str]: self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""bert-base-uncased""" ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :int = AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :int = AutoConfig.for_model("""roberta""" ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Tuple: with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. lowerCAmelCase_ :int = os.path.join(__A , """fake-roberta""" ) os.makedirs(__A , exist_ok=__A ) with open(os.path.join(__A , """config.json""" ) , """w""" ) as f: f.write(json.dumps({} ) ) lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A ) self.assertEqual(type(__A ) , __A ) def __lowerCAmelCase ( self ) -> Optional[int]: try: AutoConfig.register("""custom""" , __A ) # Wrong model type will raise an error with self.assertRaises(__A ): AutoConfig.register("""model""" , __A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__A ): AutoConfig.register("""bert""" , __A ) # Now that the config is registered, it can be used as any other config with the auto-API lowerCAmelCase_ :Union[str, Any] = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__A ) lowerCAmelCase_ :Optional[int] = AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def __lowerCAmelCase ( self ) -> Tuple: with self.assertRaisesRegex( __A , """bert-base is not a local folder and is not a valid model identifier""" ): lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""bert-base""" ) def __lowerCAmelCase ( self ) -> Any: with self.assertRaisesRegex( __A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , revision="""aaaaaa""" ) def __lowerCAmelCase ( self ) -> int: with self.assertRaisesRegex( __A , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ): lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" ) def __lowerCAmelCase ( self ) -> Tuple: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__A ): lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__A ): lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) lowerCAmelCase_ :str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) self.assertEqual(config.__class__.__name__ , """NewModelConfig""" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__A ) lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , trust_remote_code=__A ) self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" ) def __lowerCAmelCase ( self ) -> int: class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :int = "new-model" try: AutoConfig.register("""new-model""" , __A ) # If remote code is not set, the default is to use local lowerCAmelCase_ :Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" ) # If remote code is disabled, we load the local one. lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" ) # If remote is enabled, we load from the Hub lowerCAmelCase_ :Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) self.assertEqual(config.__class__.__name__ , """NewModelConfig""" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
1
"""simple docstring""" from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def _snake_case ( lowercase__ : str = "laptop" ) -> DataFrame: '''simple docstring''' lowerCAmelCase_ :Dict = f"""https://www.amazon.in/laptop/s?k={product}""" lowerCAmelCase_ :List[str] = { """User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""", """Accept-Language""": """en-US, en;q=0.5""", } lowerCAmelCase_ :List[Any] = BeautifulSoup(requests.get(lowercase__ , headers=lowercase__ ).text ) # Initialize a Pandas dataframe with the column titles lowerCAmelCase_ :Union[str, Any] = DataFrame( columns=[ """Product Title""", """Product Link""", """Current Price of the product""", """Product Rating""", """MRP of the product""", """Discount""", ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( """div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ): try: lowerCAmelCase_ :str = item.ha.text lowerCAmelCase_ :Dict = """https://www.amazon.in/""" + item.ha.a["""href"""] lowerCAmelCase_ :int = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text try: lowerCAmelCase_ :Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text except AttributeError: lowerCAmelCase_ :int = """Not available""" try: lowerCAmelCase_ :str = ( """โ‚น""" + item.find( """span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""โ‚น""" )[1] ) except AttributeError: lowerCAmelCase_ :Optional[Any] = """""" try: lowerCAmelCase_ :str = float( ( ( float(product_mrp.strip("""โ‚น""" ).replace(""",""" , """""" ) ) - float(product_price.strip("""โ‚น""" ).replace(""",""" , """""" ) ) ) / float(product_mrp.strip("""โ‚น""" ).replace(""",""" , """""" ) ) ) * 1_0_0 ) except ValueError: lowerCAmelCase_ :Union[str, Any] = float("""nan""" ) except AttributeError: pass lowerCAmelCase_ :Any = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] lowerCAmelCase_ :List[Any] = """ """ lowerCAmelCase_ :Tuple = """ """ data_frame.index += 1 return data_frame if __name__ == "__main__": __UpperCAmelCase = 'headphones' get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
1
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available __UpperCAmelCase = { 'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST', 'ErnieForCausalLM', 'ErnieForMaskedLM', 'ErnieForMultipleChoice', 'ErnieForNextSentencePrediction', 'ErnieForPreTraining', 'ErnieForQuestionAnswering', 'ErnieForSequenceClassification', 'ErnieForTokenClassification', 'ErnieModel', 'ErniePreTrainedModel', ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
1
"""simple docstring""" import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ :Any = """laion/clap-htsat-unfused""" lowerCAmelCase_ :Optional[Any] = tempfile.mkdtemp() def __lowerCAmelCase ( self , **__A ) -> List[Any]: return RobertaTokenizer.from_pretrained(self.checkpoint , **__A ) def __lowerCAmelCase ( self , **__A ) -> Tuple: return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__A ) def __lowerCAmelCase ( self ) -> int: shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Optional[Any] = self.get_tokenizer() lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor() lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase_ :Optional[Any] = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __A ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __A ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Dict = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase_ :str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowerCAmelCase_ :Dict = self.get_feature_extractor(do_normalize=__A , padding_value=1.0 ) lowerCAmelCase_ :Union[str, Any] = ClapProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __A ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __A ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Dict = self.get_feature_extractor() lowerCAmelCase_ :str = self.get_tokenizer() lowerCAmelCase_ :List[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) lowerCAmelCase_ :Optional[Any] = floats_list((3, 1000) ) lowerCAmelCase_ :Optional[Any] = feature_extractor(__A , return_tensors="""np""" ) lowerCAmelCase_ :str = processor(audios=__A , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :List[Any] = self.get_feature_extractor() lowerCAmelCase_ :Any = self.get_tokenizer() lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) lowerCAmelCase_ :List[Any] = """This is a test string""" lowerCAmelCase_ :Dict = processor(text=__A ) lowerCAmelCase_ :List[str] = tokenizer(__A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :int = self.get_feature_extractor() lowerCAmelCase_ :Tuple = self.get_tokenizer() lowerCAmelCase_ :Optional[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) lowerCAmelCase_ :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase_ :Tuple = processor.batch_decode(__A ) lowerCAmelCase_ :Optional[Any] = tokenizer.batch_decode(__A ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor() lowerCAmelCase_ :Any = self.get_tokenizer() lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
1
1
"""simple docstring""" __UpperCAmelCase = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []} __UpperCAmelCase = ['a', 'b', 'c', 'd', 'e'] def _snake_case ( lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : int ) -> str: '''simple docstring''' lowerCAmelCase_ :str = start # add current to visited visited.append(lowercase__ ) lowerCAmelCase_ :Union[str, Any] = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: lowerCAmelCase_ :Any = topological_sort(lowercase__ , lowercase__ , lowercase__ ) # if all neighbors visited add current to sort sort.append(lowercase__ ) # if all vertices haven't been visited select a new one to visit if len(lowercase__ ) != len(lowercase__ ): for vertice in vertices: if vertice not in visited: lowerCAmelCase_ :Optional[Any] = topological_sort(lowercase__ , lowercase__ , lowercase__ ) # return sort return sort if __name__ == "__main__": __UpperCAmelCase = topological_sort('a', [], []) print(sort)
1
"""simple docstring""" import os from math import logaa def _snake_case ( lowercase__ : str = "base_exp.txt" ) -> int: '''simple docstring''' lowerCAmelCase_ :float = 0 lowerCAmelCase_ :Union[str, Any] = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ): lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = list(map(lowercase__ , line.split(""",""" ) ) ) if x * logaa(lowercase__ ) > largest: lowerCAmelCase_ :Any = x * logaa(lowercase__ ) lowerCAmelCase_ :List[Any] = i + 1 return result if __name__ == "__main__": print(solution())
1
1
"""simple docstring""" from __future__ import annotations __UpperCAmelCase = 1.6021e-19 # units = C def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float , ) -> tuple[str, float]: '''simple docstring''' if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif conductivity < 0: raise ValueError("""Conductivity cannot be negative""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative""" ) elif mobility < 0: raise ValueError("""mobility cannot be negative""" ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
1
"""simple docstring""" import itertools import math def _snake_case ( lowercase__ : int ) -> bool: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _snake_case ( ) -> Dict: '''simple docstring''' lowerCAmelCase_ :List[Any] = 2 while True: if is_prime(lowercase__ ): yield num num += 1 def _snake_case ( lowercase__ : int = 1_0_0_0_1 ) -> int: '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , lowercase__ ) ) if __name__ == "__main__": print(F"""{solution() = }""")
1
1
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm __UpperCAmelCase = logging.get_logger(__name__) @dataclass class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Optional[Any] = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self , **__A ) -> Union[str, Any]: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowerCAmelCase_ :int = deprecated_arg[3:] setattr(self , __A , not kwargs.pop(__A ) ) logger.warning( f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or""" f""" {positive_arg}={kwargs[positive_arg]}""" ) lowerCAmelCase_ :Optional[int] = kwargs.pop("""torchscript""" , self.torchscript ) lowerCAmelCase_ :List[str] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics ) lowerCAmelCase_ :Tuple = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level ) super().__init__(**__A ) UpperCAmelCase_ :bool = field(default=A__ , metadata={"help": "Trace the models using torchscript"} ) UpperCAmelCase_ :bool = field(default=A__ , metadata={"help": "Print Xla/PyTorch tpu metrics"} ) UpperCAmelCase_ :str = field( default="O1" , metadata={ "help": ( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " "See details at https://nvidia.github.io/apex/amp.html" ) } , ) @cached_property def __lowerCAmelCase ( self ) -> Tuple["torch.device", int]: requires_backends(self , ["""torch"""] ) logger.info("""PyTorch: setting up devices""" ) if not self.cuda: lowerCAmelCase_ :str = torch.device("""cpu""" ) lowerCAmelCase_ :Tuple = 0 elif is_torch_tpu_available(): lowerCAmelCase_ :int = xm.xla_device() lowerCAmelCase_ :Optional[Any] = 0 else: lowerCAmelCase_ :Tuple = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) lowerCAmelCase_ :List[str] = torch.cuda.device_count() return device, n_gpu @property def __lowerCAmelCase ( self ) -> int: return is_torch_tpu_available() and self.tpu @property def __lowerCAmelCase ( self ) -> int: requires_backends(self , ["""torch"""] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def __lowerCAmelCase ( self ) -> "torch.device": requires_backends(self , ["""torch"""] ) return self._setup_devices[0] @property def __lowerCAmelCase ( self ) -> Union[str, Any]: requires_backends(self , ["""torch"""] ) return self._setup_devices[1] @property def __lowerCAmelCase ( self ) -> Union[str, Any]: return self.n_gpu > 0
1
"""simple docstring""" def _snake_case ( lowercase__ : int = 5_0 ) -> int: '''simple docstring''' lowerCAmelCase_ :int = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F"""{solution() = }""")
1
1
"""simple docstring""" from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def _snake_case ( lowercase__ : str = "laptop" ) -> DataFrame: '''simple docstring''' lowerCAmelCase_ :Dict = f"""https://www.amazon.in/laptop/s?k={product}""" lowerCAmelCase_ :List[str] = { """User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""", """Accept-Language""": """en-US, en;q=0.5""", } lowerCAmelCase_ :List[Any] = BeautifulSoup(requests.get(lowercase__ , headers=lowercase__ ).text ) # Initialize a Pandas dataframe with the column titles lowerCAmelCase_ :Union[str, Any] = DataFrame( columns=[ """Product Title""", """Product Link""", """Current Price of the product""", """Product Rating""", """MRP of the product""", """Discount""", ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( """div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ): try: lowerCAmelCase_ :str = item.ha.text lowerCAmelCase_ :Dict = """https://www.amazon.in/""" + item.ha.a["""href"""] lowerCAmelCase_ :int = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text try: lowerCAmelCase_ :Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text except AttributeError: lowerCAmelCase_ :int = """Not available""" try: lowerCAmelCase_ :str = ( """โ‚น""" + item.find( """span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""โ‚น""" )[1] ) except AttributeError: lowerCAmelCase_ :Optional[Any] = """""" try: lowerCAmelCase_ :str = float( ( ( float(product_mrp.strip("""โ‚น""" ).replace(""",""" , """""" ) ) - float(product_price.strip("""โ‚น""" ).replace(""",""" , """""" ) ) ) / float(product_mrp.strip("""โ‚น""" ).replace(""",""" , """""" ) ) ) * 1_0_0 ) except ValueError: lowerCAmelCase_ :Union[str, Any] = float("""nan""" ) except AttributeError: pass lowerCAmelCase_ :Any = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] lowerCAmelCase_ :List[Any] = """ """ lowerCAmelCase_ :Tuple = """ """ data_frame.index += 1 return data_frame if __name__ == "__main__": __UpperCAmelCase = 'headphones' get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
1
"""simple docstring""" # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ): UpperCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} UpperCAmelCase_ :Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCAmelCase_ :List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} ) UpperCAmelCase_ :Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS def __lowerCAmelCase ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ :Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) lowerCAmelCase_ :List[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , ) torch.manual_seed(0 ) lowerCAmelCase_ :List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowerCAmelCase_ :List[Any] = CLIPTextModel(__A ) lowerCAmelCase_ :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCAmelCase_ :Union[str, Any] = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def __lowerCAmelCase ( self , __A , __A=0 ) -> List[str]: if str(__A ).startswith("""mps""" ): lowerCAmelCase_ :Tuple = torch.manual_seed(__A ) else: lowerCAmelCase_ :Optional[int] = torch.Generator(device=__A ).manual_seed(__A ) lowerCAmelCase_ :List[Any] = 2 lowerCAmelCase_ :int = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ) lowerCAmelCase_ :Optional[int] = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A ) lowerCAmelCase_ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) ) lowerCAmelCase_ :Union[str, Any] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def __lowerCAmelCase ( self ) -> int: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def __lowerCAmelCase ( self ) -> List[str]: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ): UpperCAmelCase_ :List[str] = StableDiffusionControlNetImgaImgPipeline UpperCAmelCase_ :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCAmelCase_ :int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def __lowerCAmelCase ( self ) -> Optional[int]: torch.manual_seed(0 ) lowerCAmelCase_ :Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(__A ): if isinstance(__A , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) lowerCAmelCase_ :List[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__A ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__A ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowerCAmelCase_ :str = CLIPTextModel(__A ) lowerCAmelCase_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCAmelCase_ :Optional[Any] = MultiControlNetModel([controlneta, controlneta] ) lowerCAmelCase_ :List[Any] = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def __lowerCAmelCase ( self , __A , __A=0 ) -> str: if str(__A ).startswith("""mps""" ): lowerCAmelCase_ :Optional[Any] = torch.manual_seed(__A ) else: lowerCAmelCase_ :List[Any] = torch.Generator(device=__A ).manual_seed(__A ) lowerCAmelCase_ :Optional[Any] = 2 lowerCAmelCase_ :Optional[int] = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ), ] lowerCAmelCase_ :int = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A ) lowerCAmelCase_ :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) ) lowerCAmelCase_ :List[str] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :List[str] = self.get_dummy_components() lowerCAmelCase_ :Tuple = self.pipeline_class(**__A ) pipe.to(__A ) lowerCAmelCase_ :Union[str, Any] = 1_0.0 lowerCAmelCase_ :Union[str, Any] = 4 lowerCAmelCase_ :Tuple = self.get_dummy_inputs(__A ) lowerCAmelCase_ :List[str] = steps lowerCAmelCase_ :int = scale lowerCAmelCase_ :Union[str, Any] = pipe(**__A )[0] lowerCAmelCase_ :Any = self.get_dummy_inputs(__A ) lowerCAmelCase_ :str = steps lowerCAmelCase_ :str = scale lowerCAmelCase_ :Tuple = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Union[str, Any] = steps lowerCAmelCase_ :Union[str, Any] = scale lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] lowerCAmelCase_ :List[str] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Optional[int] = steps lowerCAmelCase_ :Tuple = scale lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def __lowerCAmelCase ( self ) -> Dict: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __lowerCAmelCase ( self ) -> Tuple: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def __lowerCAmelCase ( self ) -> Optional[int]: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :str = self.get_dummy_components() lowerCAmelCase_ :Tuple = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(__A ) except NotImplementedError: pass @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :Any = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" ) lowerCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , safety_checker=__A , controlnet=__A ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowerCAmelCase_ :List[Any] = """evil space-punk bird""" lowerCAmelCase_ :List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) ) lowerCAmelCase_ :int = load_image( """https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) ) lowerCAmelCase_ :Union[str, Any] = pipe( __A , __A , control_image=__A , generator=__A , output_type="""np""" , num_inference_steps=50 , strength=0.6 , ) lowerCAmelCase_ :Tuple = output.images[0] assert image.shape == (512, 512, 3) lowerCAmelCase_ :Tuple = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" ) assert np.abs(expected_image - image ).max() < 9E-2
1
1
"""simple docstring""" import qiskit def _snake_case ( lowercase__ : int , lowercase__ : int ) -> qiskit.result.counts.Counts: '''simple docstring''' lowerCAmelCase_ :str = qiskit.Aer.get_backend("""aer_simulator""" ) lowerCAmelCase_ :Any = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator lowerCAmelCase_ :List[Any] = qiskit.execute(lowercase__ , lowercase__ , shots=1_0_0_0 ) # Return the histogram data of the results of the experiment return job.result().get_counts(lowercase__ ) if __name__ == "__main__": __UpperCAmelCase = half_adder(1, 1) print(F"""Half Adder Output Qubit Counts: {counts}""")
1
"""simple docstring""" from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ ): UpperCAmelCase_ :List[str] = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"] @register_to_config def __init__( self , __A , __A , __A = None , __A = 5_0257 , __A = 1024 , __A = 768 , __A = 12 , __A = 12 , __A = None , __A = "gelu_new" , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 1E-5 , __A = 0.0_2 , __A = True , __A = True , __A = False , __A = False , ) -> Optional[Any]: super().__init__() lowerCAmelCase_ :List[str] = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and""" f""" `n_embd`: {n_embd} are not equal.""" ) lowerCAmelCase_ :Union[str, Any] = prefix_inner_dim lowerCAmelCase_ :str = prefix_hidden_dim lowerCAmelCase_ :str = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) lowerCAmelCase_ :List[Any] = ( nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity() ) lowerCAmelCase_ :Any = GPTaConfig( vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , ) lowerCAmelCase_ :Any = GPTaLMHeadModel(__A ) def __lowerCAmelCase ( self , __A , __A , __A = None , __A = None , ) -> List[str]: lowerCAmelCase_ :str = self.transformer.transformer.wte(__A ) lowerCAmelCase_ :Any = self.encode_prefix(__A ) lowerCAmelCase_ :Optional[Any] = self.decode_prefix(__A ) lowerCAmelCase_ :Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: lowerCAmelCase_ :int = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) lowerCAmelCase_ :Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 ) lowerCAmelCase_ :Tuple = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def __lowerCAmelCase ( self , __A , __A ) -> torch.Tensor: return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: return self.encode_prefix(__A ) @torch.no_grad() def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[int]: lowerCAmelCase_ :Tuple = torch.split(__A , 1 , dim=0 ) lowerCAmelCase_ :Optional[int] = [] lowerCAmelCase_ :List[str] = [] for feature in features: lowerCAmelCase_ :Tuple = self.decode_prefix(feature.to(__A ) ) # back to the clip feature # Only support beam search for now lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.generate_beam( input_embeds=__A , device=__A , eos_token_id=__A ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) lowerCAmelCase_ :Tuple = torch.stack(__A ) lowerCAmelCase_ :int = torch.stack(__A ) return generated_tokens, generated_seq_lengths @torch.no_grad() def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A = 5 , __A = 67 , __A = 1.0 , __A = None , ) -> Union[str, Any]: lowerCAmelCase_ :Optional[int] = eos_token_id lowerCAmelCase_ :Optional[int] = None lowerCAmelCase_ :Any = None lowerCAmelCase_ :int = torch.ones(__A , device=__A , dtype=torch.int ) lowerCAmelCase_ :Optional[int] = torch.zeros(__A , device=__A , dtype=torch.bool ) if input_embeds is not None: lowerCAmelCase_ :List[str] = input_embeds else: lowerCAmelCase_ :Union[str, Any] = self.transformer.transformer.wte(__A ) for i in range(__A ): lowerCAmelCase_ :Optional[int] = self.transformer(inputs_embeds=__A ) lowerCAmelCase_ :str = outputs.logits lowerCAmelCase_ :str = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) lowerCAmelCase_ :Dict = logits.softmax(-1 ).log() if scores is None: lowerCAmelCase_ , lowerCAmelCase_ :Any = logits.topk(__A , -1 ) lowerCAmelCase_ :Union[str, Any] = generated.expand(__A , *generated.shape[1:] ) lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: lowerCAmelCase_ :List[str] = next_tokens else: lowerCAmelCase_ :List[Any] = tokens.expand(__A , *tokens.shape[1:] ) lowerCAmelCase_ :Any = torch.cat((tokens, next_tokens) , dim=1 ) else: lowerCAmelCase_ :List[Any] = -float(np.inf ) lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Optional[int] = scores[:, None] + logits seq_lengths[~is_stopped] += 1 lowerCAmelCase_ :List[Any] = scores_sum / seq_lengths[:, None] lowerCAmelCase_ , lowerCAmelCase_ :Tuple = scores_sum_average.view(-1 ).topk(__A , -1 ) lowerCAmelCase_ :Optional[Any] = next_tokens // scores_sum.shape[1] lowerCAmelCase_ :Dict = seq_lengths[next_tokens_source] lowerCAmelCase_ :Tuple = next_tokens % scores_sum.shape[1] lowerCAmelCase_ :Optional[Any] = next_tokens.unsqueeze(1 ) lowerCAmelCase_ :str = tokens[next_tokens_source] lowerCAmelCase_ :List[Any] = torch.cat((tokens, next_tokens) , dim=1 ) lowerCAmelCase_ :Dict = generated[next_tokens_source] lowerCAmelCase_ :Dict = scores_sum_average * seq_lengths lowerCAmelCase_ :Tuple = is_stopped[next_tokens_source] lowerCAmelCase_ :str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) lowerCAmelCase_ :List[Any] = torch.cat((generated, next_token_embed) , dim=1 ) lowerCAmelCase_ :Optional[int] = is_stopped + next_tokens.eq(__A ).squeeze() if is_stopped.all(): break lowerCAmelCase_ :str = scores / seq_lengths lowerCAmelCase_ :Optional[int] = scores.argsort(descending=__A ) # tokens tensors are already padded to max_seq_length lowerCAmelCase_ :Optional[Any] = [tokens[i] for i in order] lowerCAmelCase_ :Dict = torch.stack(__A , dim=0 ) lowerCAmelCase_ :Tuple = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
1
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: __UpperCAmelCase = None __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} __UpperCAmelCase = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json' ), }, } __UpperCAmelCase = { 'facebook/nllb-large-en-ro': 10_24, 'facebook/nllb-200-distilled-600M': 10_24, } # fmt: off __UpperCAmelCase = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Any = VOCAB_FILES_NAMES UpperCAmelCase_ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ :str = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ :str = ["input_ids", "attention_mask"] UpperCAmelCase_ :List[str] = NllbTokenizer UpperCAmelCase_ :List[int] = [] UpperCAmelCase_ :List[int] = [] def __init__( self , __A=None , __A=None , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=None , __A=None , __A=None , __A=False , **__A , ) -> Optional[Any]: # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ :str = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token lowerCAmelCase_ :List[Any] = legacy_behaviour super().__init__( vocab_file=__A , tokenizer_file=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , src_lang=__A , tgt_lang=__A , additional_special_tokens=__A , legacy_behaviour=__A , **__A , ) lowerCAmelCase_ :Any = vocab_file lowerCAmelCase_ :Dict = False if not self.vocab_file else True lowerCAmelCase_ :List[Any] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} ) lowerCAmelCase_ :List[str] = { lang_code: self.convert_tokens_to_ids(__A ) for lang_code in FAIRSEQ_LANGUAGE_CODES } lowerCAmelCase_ :int = src_lang if src_lang is not None else """eng_Latn""" lowerCAmelCase_ :Any = self.convert_tokens_to_ids(self._src_lang ) lowerCAmelCase_ :str = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __lowerCAmelCase ( self ) -> str: return self._src_lang @src_lang.setter def __lowerCAmelCase ( self , __A ) -> None: lowerCAmelCase_ :Union[str, Any] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]: lowerCAmelCase_ :Tuple = [self.sep_token_id] lowerCAmelCase_ :Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowerCAmelCase ( self , __A , __A , __A , __A , **__A ) -> str: if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) lowerCAmelCase_ :Tuple = src_lang lowerCAmelCase_ :Union[str, Any] = self(__A , add_special_tokens=__A , return_tensors=__A , **__A ) lowerCAmelCase_ :Dict = self.convert_tokens_to_ids(__A ) lowerCAmelCase_ :Optional[int] = tgt_lang_id return inputs def __lowerCAmelCase ( self , __A , __A = "eng_Latn" , __A = None , __A = "fra_Latn" , **__A , ) -> BatchEncoding: lowerCAmelCase_ :Optional[int] = src_lang lowerCAmelCase_ :str = tgt_lang return super().prepare_seqaseq_batch(__A , __A , **__A ) def __lowerCAmelCase ( self ) -> Tuple: return self.set_src_lang_special_tokens(self.src_lang ) def __lowerCAmelCase ( self ) -> Dict: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __lowerCAmelCase ( self , __A ) -> None: lowerCAmelCase_ :Optional[int] = self.convert_tokens_to_ids(__A ) if self.legacy_behaviour: lowerCAmelCase_ :Any = [] lowerCAmelCase_ :Any = [self.eos_token_id, self.cur_lang_code] else: lowerCAmelCase_ :int = [self.cur_lang_code] lowerCAmelCase_ :Any = [self.eos_token_id] lowerCAmelCase_ :int = self.convert_ids_to_tokens(self.prefix_tokens ) lowerCAmelCase_ :Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens ) lowerCAmelCase_ :Dict = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __lowerCAmelCase ( self , __A ) -> None: lowerCAmelCase_ :Optional[int] = self.convert_tokens_to_ids(__A ) if self.legacy_behaviour: lowerCAmelCase_ :Optional[Any] = [] lowerCAmelCase_ :Tuple = [self.eos_token_id, self.cur_lang_code] else: lowerCAmelCase_ :Union[str, Any] = [self.cur_lang_code] lowerCAmelCase_ :Tuple = [self.eos_token_id] lowerCAmelCase_ :Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens ) lowerCAmelCase_ :List[str] = self.convert_ids_to_tokens(self.suffix_tokens ) lowerCAmelCase_ :Optional[int] = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return lowerCAmelCase_ :Tuple = os.path.join( __A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ): copyfile(self.vocab_file , __A ) return (out_vocab_file,)
1
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json', # See all DETR models at https://huggingface.co/models?filter=detr } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "detr" UpperCAmelCase_ :str = ["past_key_values"] UpperCAmelCase_ :Tuple = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , __A=True , __A=None , __A=3 , __A=100 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=1 , __A=5 , __A=2 , __A=1 , __A=1 , __A=5 , __A=2 , __A=0.1 , **__A , ) -> List[Any]: if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) lowerCAmelCase_ :int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(__A , __A ): lowerCAmelCase_ :str = backbone_config.get("""model_type""" ) lowerCAmelCase_ :List[Any] = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase_ :Optional[Any] = config_class.from_dict(__A ) # set timm attributes to None lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = None, None, None lowerCAmelCase_ :Tuple = use_timm_backbone lowerCAmelCase_ :Optional[int] = backbone_config lowerCAmelCase_ :Optional[int] = num_channels lowerCAmelCase_ :int = num_queries lowerCAmelCase_ :List[Any] = d_model lowerCAmelCase_ :Optional[int] = encoder_ffn_dim lowerCAmelCase_ :Tuple = encoder_layers lowerCAmelCase_ :int = encoder_attention_heads lowerCAmelCase_ :Optional[Any] = decoder_ffn_dim lowerCAmelCase_ :List[str] = decoder_layers lowerCAmelCase_ :Dict = decoder_attention_heads lowerCAmelCase_ :Dict = dropout lowerCAmelCase_ :Tuple = attention_dropout lowerCAmelCase_ :Union[str, Any] = activation_dropout lowerCAmelCase_ :Any = activation_function lowerCAmelCase_ :List[str] = init_std lowerCAmelCase_ :Optional[int] = init_xavier_std lowerCAmelCase_ :int = encoder_layerdrop lowerCAmelCase_ :Union[str, Any] = decoder_layerdrop lowerCAmelCase_ :List[str] = encoder_layers lowerCAmelCase_ :Union[str, Any] = auxiliary_loss lowerCAmelCase_ :str = position_embedding_type lowerCAmelCase_ :List[Any] = backbone lowerCAmelCase_ :str = use_pretrained_backbone lowerCAmelCase_ :str = dilation # Hungarian matcher lowerCAmelCase_ :List[Any] = class_cost lowerCAmelCase_ :Union[str, Any] = bbox_cost lowerCAmelCase_ :Tuple = giou_cost # Loss coefficients lowerCAmelCase_ :Optional[int] = mask_loss_coefficient lowerCAmelCase_ :Union[str, Any] = dice_loss_coefficient lowerCAmelCase_ :Tuple = bbox_loss_coefficient lowerCAmelCase_ :Tuple = giou_loss_coefficient lowerCAmelCase_ :Dict = eos_coefficient super().__init__(is_encoder_decoder=__A , **__A ) @property def __lowerCAmelCase ( self ) -> int: return self.encoder_attention_heads @property def __lowerCAmelCase ( self ) -> int: return self.d_model @classmethod def __lowerCAmelCase ( cls , __A , **__A ) -> Any: return cls(backbone_config=__A , **__A ) def __lowerCAmelCase ( self ) -> Dict[str, any]: lowerCAmelCase_ :List[str] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: lowerCAmelCase_ :Dict = self.backbone_config.to_dict() lowerCAmelCase_ :str = self.__class__.model_type return output class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :List[Any] = version.parse("1.11" ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def __lowerCAmelCase ( self ) -> float: return 1E-5 @property def __lowerCAmelCase ( self ) -> int: return 12
1
1
"""simple docstring""" from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __UpperCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['DeiTFeatureExtractor'] __UpperCAmelCase = ['DeiTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DeiTForImageClassification', 'DeiTForImageClassificationWithTeacher', 'DeiTForMaskedImageModeling', 'DeiTModel', 'DeiTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDeiTForImageClassification', 'TFDeiTForImageClassificationWithTeacher', 'TFDeiTForMaskedImageModeling', 'TFDeiTModel', 'TFDeiTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
1
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { 'configuration_squeezebert': [ 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SqueezeBertConfig', 'SqueezeBertOnnxConfig', ], 'tokenization_squeezebert': ['SqueezeBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['SqueezeBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertModule', 'SqueezeBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { 'configuration_squeezebert': [ 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SqueezeBertConfig', 'SqueezeBertOnnxConfig', ], 'tokenization_squeezebert': ['SqueezeBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['SqueezeBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertModule', 'SqueezeBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
1
1
"""simple docstring""" import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def _snake_case ( lowercase__ : Optional[int] , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : List[str]=None , lowercase__ : str=None , lowercase__ : List[Any]=None , lowercase__ : Dict=None , lowercase__ : List[Any]=None , ) -> List[Any]: '''simple docstring''' if attention_mask is None: lowerCAmelCase_ :str = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: lowerCAmelCase_ :List[str] = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: lowerCAmelCase_ :Dict = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=lowercase__ ) if decoder_head_mask is None: lowerCAmelCase_ :List[str] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowercase__ ) if cross_attn_head_mask is None: lowerCAmelCase_ :Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowercase__ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class _SCREAMING_SNAKE_CASE : def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=False , __A=99 , __A=16 , __A=2 , __A=4 , __A=4 , __A="relu" , __A=0.1 , __A=0.1 , __A=0.0 , __A=0.0 , __A=20 , __A=2 , __A=1 , __A=0 , ) -> int: lowerCAmelCase_ :Union[str, Any] = parent lowerCAmelCase_ :Tuple = batch_size lowerCAmelCase_ :Optional[Any] = seq_length lowerCAmelCase_ :str = is_training lowerCAmelCase_ :str = use_labels lowerCAmelCase_ :List[str] = vocab_size lowerCAmelCase_ :Dict = hidden_size lowerCAmelCase_ :Optional[Any] = num_hidden_layers lowerCAmelCase_ :Any = num_attention_heads lowerCAmelCase_ :Union[str, Any] = intermediate_size lowerCAmelCase_ :Optional[int] = hidden_act lowerCAmelCase_ :int = hidden_dropout_prob lowerCAmelCase_ :Tuple = attention_probs_dropout_prob lowerCAmelCase_ :Optional[Any] = encoder_layerdrop lowerCAmelCase_ :Optional[int] = decoder_layerdrop lowerCAmelCase_ :Dict = max_position_embeddings lowerCAmelCase_ :Optional[int] = eos_token_id lowerCAmelCase_ :Optional[int] = pad_token_id lowerCAmelCase_ :List[str] = bos_token_id def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ :Tuple = self.eos_token_id # Eos Token lowerCAmelCase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input lowerCAmelCase_ :List[str] = input_ids.clamp(self.pad_token_id + 1 ) lowerCAmelCase_ :int = decoder_input_ids.clamp(self.pad_token_id + 1 ) lowerCAmelCase_ :List[Any] = self.get_config() lowerCAmelCase_ :int = prepare_mam_aaa_inputs_dict(__A , __A , __A ) return config, inputs_dict def __lowerCAmelCase ( self ) -> int: return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = self.prepare_config_and_inputs() return config, inputs_dict def __lowerCAmelCase ( self , __A , __A ) -> Any: lowerCAmelCase_ :str = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval() lowerCAmelCase_ :Union[str, Any] = inputs_dict["""input_ids"""] lowerCAmelCase_ :Optional[Any] = inputs_dict["""attention_mask"""] lowerCAmelCase_ :Any = inputs_dict["""head_mask"""] # first forward pass lowerCAmelCase_ :int = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A ) lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase_ :str = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase_ :List[Any] = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and lowerCAmelCase_ :List[str] = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase_ :str = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) lowerCAmelCase_ :Dict = model(__A , attention_mask=__A )["""last_hidden_state"""] lowerCAmelCase_ :Dict = model(__A , attention_mask=__A , past_key_values=__A )[ """last_hidden_state""" ] # select random slice lowerCAmelCase_ :Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase_ :int = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase_ :Any = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__A , __A , atol=1E-2 ) ) def __lowerCAmelCase ( self , __A , __A ) -> int: lowerCAmelCase_ :List[str] = MaMaaaModel(config=__A ).to(__A ).eval() lowerCAmelCase_ :Dict = model(**__A ) lowerCAmelCase_ :Tuple = outputs.encoder_last_hidden_state lowerCAmelCase_ :Union[str, Any] = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase_ :Optional[int] = model.get_encoder() encoder.save_pretrained(__A ) lowerCAmelCase_ :Union[str, Any] = MaMaaaEncoder.from_pretrained(__A ).to(__A ) lowerCAmelCase_ :List[Any] = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase_ :Union[str, Any] = model.get_decoder() decoder.save_pretrained(__A ) lowerCAmelCase_ :Any = MaMaaaDecoder.from_pretrained(__A ).to(__A ) lowerCAmelCase_ :Tuple = decoder( input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ): UpperCAmelCase_ :Union[str, Any] = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) UpperCAmelCase_ :str = (MaMaaaForConditionalGeneration,) if is_torch_available() else () UpperCAmelCase_ :Dict = ( { "conversational": MaMaaaForConditionalGeneration, "feature-extraction": MaMaaaModel, "summarization": MaMaaaForConditionalGeneration, "text2text-generation": MaMaaaForConditionalGeneration, "translation": MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) UpperCAmelCase_ :Optional[Any] = True UpperCAmelCase_ :Optional[int] = True UpperCAmelCase_ :List[str] = False UpperCAmelCase_ :Optional[Any] = False def __lowerCAmelCase ( self , __A , __A , __A , __A , __A ) -> Optional[int]: if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :str = MaMaaaModelTester(self ) lowerCAmelCase_ :Optional[Any] = ConfigTester(self , config_class=__A ) def __lowerCAmelCase ( self ) -> List[Any]: self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ , lowerCAmelCase_ :str = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: lowerCAmelCase_ :Union[str, Any] = model_class(__A ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__A ) lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = model_class.from_pretrained(__A , output_loading_info=__A ) self.assertEqual(info["""missing_keys"""] , [] ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Dict = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*__A ) def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): lowerCAmelCase_ :Any = model_class(__A ) model.to(__A ) model.eval() lowerCAmelCase_ :str = copy.deepcopy(self._prepare_for_class(__A , __A ) ) if not self.is_encoder_decoder: lowerCAmelCase_ :List[str] = inputs["""input_ids"""] del inputs["input_ids"] else: lowerCAmelCase_ :Tuple = inputs["""input_ids"""] lowerCAmelCase_ :Tuple = inputs.get("""decoder_input_ids""" , __A ) del inputs["input_ids"] inputs.pop("""decoder_input_ids""" , __A ) lowerCAmelCase_ :Any = model.get_input_embeddings() if not self.is_encoder_decoder: lowerCAmelCase_ :Optional[int] = wte(__A ) else: lowerCAmelCase_ :Tuple = wte(__A ) lowerCAmelCase_ :List[str] = wte(__A ) with torch.no_grad(): model(**__A )[0] def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase_ :str = input_dict["""input_ids"""] lowerCAmelCase_ :Optional[int] = input_ids.ne(1 ).to(__A ) lowerCAmelCase_ :int = MaMaaaForConditionalGeneration(__A ).eval().to(__A ) if torch_device == "cuda": model.half() model.generate(__A , attention_mask=__A ) model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 ) def _snake_case ( lowercase__ : int ) -> List[str]: '''simple docstring''' return torch.tensor(lowercase__ , dtype=torch.long , device=lowercase__ ) __UpperCAmelCase = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def __lowerCAmelCase ( self ) -> Dict: return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" ) def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Optional[int] = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(__A ) lowerCAmelCase_ :Dict = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] ) lowerCAmelCase_ :Optional[int] = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] ) lowerCAmelCase_ :List[str] = prepare_mam_aaa_inputs_dict(model.config , __A , __A ) with torch.no_grad(): lowerCAmelCase_ :Union[str, Any] = model(**__A )[0] lowerCAmelCase_ :Union[str, Any] = torch.Size((1, 11, 1024) ) self.assertEqual(output.shape , __A ) # change to expected output here lowerCAmelCase_ :List[Any] = torch.tensor( [[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__A ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) ) def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Dict = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__A ) # change to intended input lowerCAmelCase_ :Tuple = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] ) lowerCAmelCase_ :Optional[Any] = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] ) lowerCAmelCase_ :str = prepare_mam_aaa_inputs_dict(model.config , __A , __A ) with torch.no_grad(): lowerCAmelCase_ :str = model(**__A )[0] lowerCAmelCase_ :str = torch.Size((1, 11, model.config.vocab_size) ) self.assertEqual(output.shape , __A ) # change to expected output here lowerCAmelCase_ :Union[str, Any] = torch.tensor( [[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__A ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) ) def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :List[str] = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__A ) lowerCAmelCase_ :Tuple = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" ) lowerCAmelCase_ :Optional[Any] = [ """L'affaire NSA souligne l'absence totale de dรฉbat sur le renseignement""", """Selon moi, il y a deux niveaux de rรฉponse de la part du gouvernement franรงais.""", """Lorsque Franรงois Hollande tรฉlรฉphone ร  Barack Obama ou quand le ministre des affaires รฉtrangรจres Laurent""" """ Fabius convoque l'ambassadeur des Etats-Unis, ils rรฉagissent ร  une vraie dรฉcouverte, qui est celle de""" """ l'ampleur de la surveillance amรฉricaine sur l'ensemble des communications en France.""", ] # The below article tests that we don't add any hypotheses outside of the top n_beams lowerCAmelCase_ :List[str] = tokenizer(__A , padding=__A , return_tensors="""pt""" ) lowerCAmelCase_ :Dict = model.generate( input_ids=dct["""input_ids"""].to(__A ) , attention_mask=dct["""attention_mask"""].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , ) lowerCAmelCase_ :List[str] = [ """The NSA case highlights the total absence of intelligence debate""", """I think there are two levels of response from the French government.""", """When Franรงois Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.""" """ Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all""" """ communications in France.""", ] lowerCAmelCase_ :Dict = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A ) assert generated == expected_en
1
"""simple docstring""" __UpperCAmelCase = 2_56 # Modulus to hash a string __UpperCAmelCase = 1_00_00_03 def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool: '''simple docstring''' lowerCAmelCase_ :Tuple = len(lowercase__ ) lowerCAmelCase_ :List[str] = len(lowercase__ ) if p_len > t_len: return False lowerCAmelCase_ :List[str] = 0 lowerCAmelCase_ :Optional[int] = 0 lowerCAmelCase_ :Any = 1 # Calculating the hash of pattern and substring of text for i in range(lowercase__ ): lowerCAmelCase_ :int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus lowerCAmelCase_ :Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue lowerCAmelCase_ :Optional[Any] = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash lowerCAmelCase_ :Any = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def _snake_case ( ) -> None: '''simple docstring''' lowerCAmelCase_ :int = """abc1abc12""" lowerCAmelCase_ :Dict = """alskfjaldsabc1abc1abc12k23adsfabcabc""" lowerCAmelCase_ :int = """alskfjaldsk23adsfabcabc""" assert rabin_karp(lowercase__ , lowercase__ ) and not rabin_karp(lowercase__ , lowercase__ ) # Test 2) lowerCAmelCase_ :Dict = """ABABX""" lowerCAmelCase_ :int = """ABABZABABYABABX""" assert rabin_karp(lowercase__ , lowercase__ ) # Test 3) lowerCAmelCase_ :Union[str, Any] = """AAAB""" lowerCAmelCase_ :List[str] = """ABAAAAAB""" assert rabin_karp(lowercase__ , lowercase__ ) # Test 4) lowerCAmelCase_ :Dict = """abcdabcy""" lowerCAmelCase_ :Union[str, Any] = """abcxabcdabxabcdabcdabcy""" assert rabin_karp(lowercase__ , lowercase__ ) # Test 5) lowerCAmelCase_ :Optional[int] = """Lรผ""" lowerCAmelCase_ :Optional[int] = """Lรผsai""" assert rabin_karp(lowercase__ , lowercase__ ) lowerCAmelCase_ :Optional[int] = """Lue""" assert not rabin_karp(lowercase__ , lowercase__ ) print("""Success.""" ) if __name__ == "__main__": test_rabin_karp()
1
1
"""simple docstring""" import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @property def __lowerCAmelCase ( self ) -> str: torch.manual_seed(0 ) lowerCAmelCase_ :int = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :int = self.dummy_uncond_unet lowerCAmelCase_ :Tuple = ScoreSdeVeScheduler() lowerCAmelCase_ :Optional[int] = ScoreSdeVePipeline(unet=__A , scheduler=__A ) sde_ve.to(__A ) sde_ve.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Tuple = torch.manual_seed(0 ) lowerCAmelCase_ :Dict = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__A ).images lowerCAmelCase_ :Tuple = torch.manual_seed(0 ) lowerCAmelCase_ :Union[str, Any] = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__A , return_dict=__A )[ 0 ] lowerCAmelCase_ :int = image[0, -3:, -3:, -1] lowerCAmelCase_ :Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase_ :Optional[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Union[str, Any] = """google/ncsnpp-church-256""" lowerCAmelCase_ :Optional[int] = UNetaDModel.from_pretrained(__A ) lowerCAmelCase_ :str = ScoreSdeVeScheduler.from_pretrained(__A ) lowerCAmelCase_ :Union[str, Any] = ScoreSdeVePipeline(unet=__A , scheduler=__A ) sde_ve.to(__A ) sde_ve.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[str] = torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=__A ).images lowerCAmelCase_ :List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCAmelCase_ :List[Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
1
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __UpperCAmelCase = 16 __UpperCAmelCase = 32 def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 ) -> str: '''simple docstring''' lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowercase__ : int ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase_ :Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCAmelCase_ :Optional[Any] = datasets.map( lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase_ :str = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowercase__ : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCAmelCase_ :int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCAmelCase_ :List[Any] = 1_6 elif accelerator.mixed_precision != "no": lowerCAmelCase_ :List[str] = 8 else: lowerCAmelCase_ :Optional[int] = None return tokenizer.pad( lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , ) # Instantiate dataloaders. lowerCAmelCase_ :Optional[Any] = DataLoader( tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) lowerCAmelCase_ :List[Any] = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __UpperCAmelCase = mocked_dataloaders # noqa: F811 def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[int] ) -> Optional[Any]: '''simple docstring''' if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1": lowerCAmelCase_ :Optional[Any] = 2 # New Code # lowerCAmelCase_ :List[str] = int(args.gradient_accumulation_steps ) lowerCAmelCase_ :int = int(args.local_sgd_steps ) # Initialize accelerator lowerCAmelCase_ :str = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase_ :int = config["""lr"""] lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] ) lowerCAmelCase_ :int = int(config["""seed"""] ) lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] ) lowerCAmelCase_ :Union[str, Any] = evaluate.load("""glue""" , """mrpc""" ) set_seed(lowercase__ ) lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = get_dataloaders(lowercase__ , lowercase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase_ :Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCAmelCase_ :Union[str, Any] = model.to(accelerator.device ) # Instantiate optimizer lowerCAmelCase_ :Optional[Any] = AdamW(params=model.parameters() , lr=lowercase__ ) # Instantiate scheduler lowerCAmelCase_ :Union[str, Any] = get_linear_schedule_with_warmup( optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.prepare( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Now we train the model for epoch in range(lowercase__ ): model.train() with LocalSGD( accelerator=lowercase__ , model=lowercase__ , local_sgd_steps=lowercase__ , enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowercase__ ): lowerCAmelCase_ :str = model(**lowercase__ ) lowerCAmelCase_ :Optional[int] = output.loss accelerator.backward(lowercase__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase_ :Optional[int] = model(**lowercase__ ) lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 ) lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowercase__ , references=lowercase__ , ) lowerCAmelCase_ :Any = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , lowercase__ ) def _snake_case ( ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :str = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=lowercase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument( """--local_sgd_steps""" , type=lowercase__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) lowerCAmelCase_ :Optional[Any] = parser.parse_args() lowerCAmelCase_ :Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6} training_function(lowercase__ , lowercase__ ) if __name__ == "__main__": main()
1
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class _SCREAMING_SNAKE_CASE : UpperCAmelCase_ :str = XGLMConfig UpperCAmelCase_ :Optional[Any] = {} UpperCAmelCase_ :Any = "gelu" def __init__( self , __A , __A=14 , __A=7 , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=2 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , ) -> Union[str, Any]: lowerCAmelCase_ :Any = parent lowerCAmelCase_ :List[Any] = batch_size lowerCAmelCase_ :List[Any] = seq_length lowerCAmelCase_ :str = is_training lowerCAmelCase_ :str = use_input_mask lowerCAmelCase_ :Tuple = use_labels lowerCAmelCase_ :Optional[int] = vocab_size lowerCAmelCase_ :Dict = d_model lowerCAmelCase_ :str = num_hidden_layers lowerCAmelCase_ :Dict = num_attention_heads lowerCAmelCase_ :Optional[int] = ffn_dim lowerCAmelCase_ :Tuple = activation_function lowerCAmelCase_ :List[Any] = activation_dropout lowerCAmelCase_ :str = attention_dropout lowerCAmelCase_ :Union[str, Any] = max_position_embeddings lowerCAmelCase_ :Optional[int] = initializer_range lowerCAmelCase_ :Any = None lowerCAmelCase_ :Any = 0 lowerCAmelCase_ :Dict = 2 lowerCAmelCase_ :Union[str, Any] = 1 def __lowerCAmelCase ( self ) -> Any: return XGLMConfig.from_pretrained("""facebook/xglm-564M""" ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Optional[int] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) lowerCAmelCase_ :List[str] = None if self.use_input_mask: lowerCAmelCase_ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase_ :int = self.get_config() lowerCAmelCase_ :List[str] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def __lowerCAmelCase ( self ) -> Union[str, Any]: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__a , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__a , ) def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :List[str] = self.prepare_config_and_inputs() ( lowerCAmelCase_ ) :List[Any] = config_and_inputs lowerCAmelCase_ :Union[str, Any] = { 'input_ids': input_ids, 'head_mask': head_mask, } return config, inputs_dict @require_tf class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): UpperCAmelCase_ :str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () UpperCAmelCase_ :Union[str, Any] = (TFXGLMForCausalLM,) if is_tf_available() else () UpperCAmelCase_ :List[str] = ( {"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {} ) UpperCAmelCase_ :Optional[int] = False UpperCAmelCase_ :Union[str, Any] = False UpperCAmelCase_ :Union[str, Any] = False def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :List[str] = TFXGLMModelTester(self ) lowerCAmelCase_ :str = ConfigTester(self , config_class=__a , n_embd=37 ) def __lowerCAmelCase ( self ) -> List[str]: self.config_tester.run_common_tests() @slow def __lowerCAmelCase ( self ) -> int: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ :str = TFXGLMModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" ) def __lowerCAmelCase ( self ) -> str: super().test_resize_token_embeddings() @require_tf class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def __lowerCAmelCase ( self , __A=True ) -> Any: lowerCAmelCase_ :Optional[Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) lowerCAmelCase_ :Union[str, Any] = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off lowerCAmelCase_ :str = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581] # fmt: on lowerCAmelCase_ :int = model.generate(__a , do_sample=__a , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , __a ) @slow def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Union[str, Any] = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) lowerCAmelCase_ :int = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) tf.random.set_seed(0 ) lowerCAmelCase_ :int = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" ) lowerCAmelCase_ :Any = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0""" ): lowerCAmelCase_ :Dict = model.generate(__a , do_sample=__a , seed=[7, 0] ) lowerCAmelCase_ :Any = tokenizer.decode(output_ids[0] , skip_special_tokens=__a ) lowerCAmelCase_ :str = ( 'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due' ) self.assertEqual(__a , __a ) @slow def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Optional[int] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) lowerCAmelCase_ :Dict = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) lowerCAmelCase_ :int = 'left' # use different length sentences to test batching lowerCAmelCase_ :Optional[Any] = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When', 'Hello, my dog is a little', ] lowerCAmelCase_ :int = tokenizer(__a , return_tensors="""tf""" , padding=__a ) lowerCAmelCase_ :List[Any] = inputs['input_ids'] lowerCAmelCase_ :Tuple = model.generate(input_ids=__a , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 ) lowerCAmelCase_ :List[str] = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids lowerCAmelCase_ :List[Any] = model.generate(input_ids=__a , max_new_tokens=12 ) lowerCAmelCase_ :List[str] = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids lowerCAmelCase_ :List[str] = model.generate(input_ids=__a , max_new_tokens=12 ) lowerCAmelCase_ :Optional[int] = tokenizer.batch_decode(__a , skip_special_tokens=__a ) lowerCAmelCase_ :Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a ) lowerCAmelCase_ :List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__a ) lowerCAmelCase_ :int = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ' 'a single', 'Hello, my dog is a little bit of a shy one, but he is very friendly', ] self.assertListEqual(__a , __a ) self.assertListEqual(__a , [non_padded_sentence, padded_sentence] )
350
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __UpperCAmelCase = 16 __UpperCAmelCase = 32 def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 , lowercase__ : str = "bert-base-cased" ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained(lowercase__ ) lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowercase__ : List[str] ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase_ :str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCAmelCase_ :str = datasets.map( lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase_ :List[str] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowercase__ : Union[str, Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" ) return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. lowerCAmelCase_ :Optional[int] = DataLoader( tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) lowerCAmelCase_ :Any = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) return train_dataloader, eval_dataloader def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : int ) -> List[str]: '''simple docstring''' model.eval() lowerCAmelCase_ :Dict = 0 for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase_ :Optional[int] = model(**lowercase__ ) lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(lowercase__ ) - 1: lowerCAmelCase_ :Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowerCAmelCase_ :Any = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=lowercase__ , references=lowercase__ , ) lowerCAmelCase_ :Tuple = metric.compute() return eval_metric["accuracy"] def _snake_case ( lowercase__ : str , lowercase__ : List[str] ) -> Any: '''simple docstring''' lowerCAmelCase_ :Optional[int] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase_ :int = config["""lr"""] lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] ) lowerCAmelCase_ :Optional[int] = int(config["""seed"""] ) lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] ) lowerCAmelCase_ :Optional[Any] = args.model_name_or_path set_seed(lowercase__ ) lowerCAmelCase_ , lowerCAmelCase_ :Dict = get_dataloaders(lowercase__ , lowercase__ , lowercase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase_ :str = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ ) # Instantiate optimizer lowerCAmelCase_ :List[str] = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowerCAmelCase_ :str = optimizer_cls(params=model.parameters() , lr=lowercase__ ) if accelerator.state.deepspeed_plugin is not None: lowerCAmelCase_ :Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: lowerCAmelCase_ :Any = 1 lowerCAmelCase_ :str = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowerCAmelCase_ :List[str] = get_linear_schedule_with_warmup( optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , ) else: lowerCAmelCase_ :int = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.prepare( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # We need to keep track of how many total steps we have iterated over lowerCAmelCase_ :List[str] = 0 # We also need to keep track of the stating epoch so files are named properly lowerCAmelCase_ :List[Any] = 0 lowerCAmelCase_ :str = evaluate.load("""glue""" , """mrpc""" ) lowerCAmelCase_ :Optional[Any] = num_epochs if args.partial_train_epoch is not None: lowerCAmelCase_ :Dict = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) lowerCAmelCase_ :Optional[Any] = args.resume_from_checkpoint.split("""epoch_""" )[1] lowerCAmelCase_ :int = """""" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break lowerCAmelCase_ :Union[str, Any] = int(lowercase__ ) + 1 lowerCAmelCase_ :Optional[int] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) accelerator.print("""resumed checkpoint performance:""" , lowercase__ ) accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] ) accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] ) with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f: lowerCAmelCase_ :List[str] = json.load(lowercase__ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model lowerCAmelCase_ :List[Any] = {} for epoch in range(lowercase__ , lowercase__ ): model.train() for step, batch in enumerate(lowercase__ ): lowerCAmelCase_ :Optional[int] = model(**lowercase__ ) lowerCAmelCase_ :Dict = outputs.loss lowerCAmelCase_ :int = loss / gradient_accumulation_steps accelerator.backward(lowercase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 lowerCAmelCase_ :List[str] = f"""epoch_{epoch}""" lowerCAmelCase_ :Any = os.path.join(args.output_dir , lowercase__ ) accelerator.save_state(lowercase__ ) lowerCAmelCase_ :List[Any] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) lowerCAmelCase_ :Union[str, Any] = accuracy lowerCAmelCase_ :Any = lr_scheduler.get_lr()[0] lowerCAmelCase_ :str = optimizer.param_groups[0]["""lr"""] lowerCAmelCase_ :List[Any] = epoch lowerCAmelCase_ :Tuple = overall_step accelerator.print(f"""epoch {epoch}:""" , lowercase__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) def _snake_case ( ) -> int: '''simple docstring''' lowerCAmelCase_ :List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , ) parser.add_argument( """--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--resume_from_checkpoint""" , type=lowercase__ , default=lowercase__ , help="""If the training should continue from a checkpoint folder.""" , ) parser.add_argument( """--partial_train_epoch""" , type=lowercase__ , default=lowercase__ , help="""If passed, the training will stop after this number of epochs.""" , ) parser.add_argument( """--num_epochs""" , type=lowercase__ , default=2 , help="""Number of train epochs.""" , ) lowerCAmelCase_ :Optional[int] = parser.parse_args() lowerCAmelCase_ :List[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6} training_function(lowercase__ , lowercase__ ) if __name__ == "__main__": main()
1
0
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class _SCREAMING_SNAKE_CASE ( _UpperCAmelCase , unittest.TestCase ): UpperCAmelCase_ :Dict = ShapEImgaImgPipeline UpperCAmelCase_ :Union[str, Any] = ["image"] UpperCAmelCase_ :Dict = ["image"] UpperCAmelCase_ :Any = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] UpperCAmelCase_ :Dict = False @property def __lowerCAmelCase ( self ) -> Optional[int]: return 32 @property def __lowerCAmelCase ( self ) -> Tuple: return 32 @property def __lowerCAmelCase ( self ) -> Dict: return self.time_input_dim * 4 @property def __lowerCAmelCase ( self ) -> int: return 8 @property def __lowerCAmelCase ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ :Union[str, Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) lowerCAmelCase_ :Dict = CLIPVisionModel(_UpperCAmelCase ) return model @property def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :List[str] = CLIPImageProcessor( crop_size=224 , do_center_crop=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_resize=_UpperCAmelCase , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , ) return image_processor @property def __lowerCAmelCase ( self ) -> int: torch.manual_seed(0 ) lowerCAmelCase_ :List[str] = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } lowerCAmelCase_ :str = PriorTransformer(**_UpperCAmelCase ) return model @property def __lowerCAmelCase ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ :Dict = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } lowerCAmelCase_ :str = ShapERenderer(**_UpperCAmelCase ) return model def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :int = self.dummy_prior lowerCAmelCase_ :Optional[int] = self.dummy_image_encoder lowerCAmelCase_ :List[Any] = self.dummy_image_processor lowerCAmelCase_ :Union[str, Any] = self.dummy_renderer lowerCAmelCase_ :Tuple = HeunDiscreteScheduler( beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , ) lowerCAmelCase_ :Dict = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def __lowerCAmelCase ( self , __A , __A=0 ) -> List[str]: lowerCAmelCase_ :Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) if str(_UpperCAmelCase ).startswith("""mps""" ): lowerCAmelCase_ :Union[str, Any] = torch.manual_seed(_UpperCAmelCase ) else: lowerCAmelCase_ :str = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) lowerCAmelCase_ :Optional[int] = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :str = '''cpu''' lowerCAmelCase_ :Optional[Any] = self.get_dummy_components() lowerCAmelCase_ :Dict = self.pipeline_class(**_UpperCAmelCase ) lowerCAmelCase_ :Any = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowerCAmelCase_ :str = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) ) lowerCAmelCase_ :Tuple = output.images[0] lowerCAmelCase_ :Any = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) lowerCAmelCase_ :Tuple = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self ) -> str: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :List[str] = torch_device == '''cpu''' lowerCAmelCase_ :str = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , ) def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :List[str] = self.get_dummy_components() lowerCAmelCase_ :Any = self.pipeline_class(**_UpperCAmelCase ) lowerCAmelCase_ :Any = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowerCAmelCase_ :Union[str, Any] = 1 lowerCAmelCase_ :List[str] = 2 lowerCAmelCase_ :Any = self.get_dummy_inputs(_UpperCAmelCase ) for key in inputs.keys(): if key in self.batch_params: lowerCAmelCase_ :Dict = batch_size * [inputs[key]] lowerCAmelCase_ :List[str] = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" ) lowerCAmelCase_ :int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/test_shap_e_img2img_out.npy""" ) lowerCAmelCase_ :List[str] = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" ) lowerCAmelCase_ :List[Any] = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowerCAmelCase_ :List[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = pipe( _UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
351
"""simple docstring""" import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class _SCREAMING_SNAKE_CASE : def __init__( self , __A ) -> Union[str, Any]: if isinstance(__A , __A ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden lowerCAmelCase_ :Tuple = deepcopy(__A ) elif os.path.exists(__A ): with io.open(__A , """r""" , encoding="""utf-8""" ) as f: lowerCAmelCase_ :str = json.load(__A ) else: try: lowerCAmelCase_ :Dict = baseaa.urlsafe_baadecode(__A ).decode("""utf-8""" ) lowerCAmelCase_ :int = json.loads(__A ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" ) lowerCAmelCase_ :Optional[Any] = config self.set_stage_and_offload() def __lowerCAmelCase ( self ) -> Tuple: # zero stage - this is done as early as possible, before model is created, to allow # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object # during ``zero.Init()`` which needs to know the dtype, and some other hparams. lowerCAmelCase_ :Tuple = self.get_value("""zero_optimization.stage""" , -1 ) # offload lowerCAmelCase_ :Dict = False if self.is_zeroa() or self.is_zeroa(): lowerCAmelCase_ :Optional[int] = set(["""cpu""", """nvme"""] ) lowerCAmelCase_ :Union[str, Any] = set( [ self.get_value("""zero_optimization.offload_optimizer.device""" ), self.get_value("""zero_optimization.offload_param.device""" ), ] ) if len(offload_devices & offload_devices_valid ) > 0: lowerCAmelCase_ :Optional[int] = True def __lowerCAmelCase ( self , __A ) -> Optional[Any]: lowerCAmelCase_ :str = self.config # find the config node of interest if it exists lowerCAmelCase_ :Tuple = ds_key_long.split(""".""" ) lowerCAmelCase_ :List[str] = nodes.pop() for node in nodes: lowerCAmelCase_ :Tuple = config.get(__A ) if config is None: return None, ds_key return config, ds_key def __lowerCAmelCase ( self , __A , __A=None ) -> Optional[Any]: lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.find_config_node(__A ) if config is None: return default return config.get(__A , __A ) def __lowerCAmelCase ( self , __A , __A=False ) -> Optional[Any]: lowerCAmelCase_ :Tuple = self.config # find the config node of interest if it exists lowerCAmelCase_ :Union[str, Any] = ds_key_long.split(""".""" ) for node in nodes: lowerCAmelCase_ :int = config lowerCAmelCase_ :Any = config.get(__A ) if config is None: if must_exist: raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" ) else: return # if found remove it if parent_config is not None: parent_config.pop(__A ) def __lowerCAmelCase ( self , __A ) -> Union[str, Any]: lowerCAmelCase_ :Optional[int] = self.get_value(__A ) return False if value is None else bool(__A ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: lowerCAmelCase_ :List[str] = self.get_value(__A ) return False if value is None else not bool(__A ) def __lowerCAmelCase ( self ) -> str: return self._stage == 2 def __lowerCAmelCase ( self ) -> Union[str, Any]: return self._stage == 3 def __lowerCAmelCase ( self ) -> Union[str, Any]: return self._offload class _SCREAMING_SNAKE_CASE : def __init__( self , __A ) -> Optional[int]: lowerCAmelCase_ :Dict = engine def __lowerCAmelCase ( self , __A , **__A ) -> str: # runs backpropagation and handles mixed precision self.engine.backward(__A , **__A ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A ) -> List[str]: super().__init__(__A , device_placement=__A , scaler=__A ) lowerCAmelCase_ :List[str] = hasattr(self.optimizer , """overflow""" ) def __lowerCAmelCase ( self , __A=None ) -> Optional[Any]: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def __lowerCAmelCase ( self ) -> List[Any]: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def __lowerCAmelCase ( self ) -> int: if self.__has_overflow__: return self.optimizer.overflow return False class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A , __A ) -> Optional[int]: super().__init__(__A , __A ) def __lowerCAmelCase ( self ) -> Any: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class _SCREAMING_SNAKE_CASE : def __init__( self , __A , __A=0.0_0_1 , __A=0 , **__A ) -> List[Any]: lowerCAmelCase_ :str = params lowerCAmelCase_ :Any = lr lowerCAmelCase_ :List[Any] = weight_decay lowerCAmelCase_ :Any = kwargs class _SCREAMING_SNAKE_CASE : def __init__( self , __A , __A=None , __A=0 , **__A ) -> List[str]: lowerCAmelCase_ :Optional[int] = optimizer lowerCAmelCase_ :int = total_num_steps lowerCAmelCase_ :List[Any] = warmup_num_steps lowerCAmelCase_ :int = kwargs
1
0
"""simple docstring""" __UpperCAmelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def _snake_case ( lowercase__ : bytes ) -> bytes: '''simple docstring''' if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowerCAmelCase_ :Tuple = f"""a bytes-like object is required, not \'{data.__class__.__name__}\'""" raise TypeError(__lowerCAmelCase ) lowerCAmelCase_ :int = """""".join(bin(__lowerCAmelCase )[2:].zfill(8 ) for byte in data ) lowerCAmelCase_ :Dict = len(__lowerCAmelCase ) % 6 != 0 if padding_needed: # The padding that will be added later lowerCAmelCase_ :List[Any] = b"""=""" * ((6 - len(__lowerCAmelCase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(__lowerCAmelCase ) % 6) else: lowerCAmelCase_ :Dict = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(__lowerCAmelCase ) , 6 ) ).encode() + padding ) def _snake_case ( lowercase__ : str ) -> bytes: '''simple docstring''' if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) and not isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowerCAmelCase_ :Optional[int] = ( """argument should be a bytes-like object or ASCII string, """ f"""not \'{encoded_data.__class__.__name__}\'""" ) raise TypeError(__lowerCAmelCase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(__lowerCAmelCase , __lowerCAmelCase ): try: lowerCAmelCase_ :Tuple = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) lowerCAmelCase_ :Tuple = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(__lowerCAmelCase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowerCAmelCase_ :int = encoded_data[:-padding] lowerCAmelCase_ :int = """""".join( bin(B64_CHARSET.index(__lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowerCAmelCase_ :int = """""".join( bin(B64_CHARSET.index(__lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data ) lowerCAmelCase_ :Union[str, Any] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(__lowerCAmelCase ) , 8 ) ] return bytes(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
352
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Dict = ( "This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image." "It takes two arguments named `image` which should be the original image, and `label` which should be a text " "describing the elements what should be identified in the segmentation mask. The tool returns the mask." ) UpperCAmelCase_ :List[str] = "CIDAS/clipseg-rd64-refined" UpperCAmelCase_ :List[Any] = "image_segmenter" UpperCAmelCase_ :Optional[int] = CLIPSegForImageSegmentation UpperCAmelCase_ :Tuple = ["image", "text"] UpperCAmelCase_ :Dict = ["image"] def __init__( self , *__A , **__A ) -> Optional[Any]: requires_backends(self , ["""vision"""] ) super().__init__(*__A , **__A ) def __lowerCAmelCase ( self , __A , __A ) -> Any: return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" ) def __lowerCAmelCase ( self , __A ) -> Tuple: with torch.no_grad(): lowerCAmelCase_ :Dict = self.model(**__A ).logits return logits def __lowerCAmelCase ( self , __A ) -> Tuple: lowerCAmelCase_ :Optional[int] = outputs.cpu().detach().numpy() lowerCAmelCase_ :List[str] = 0 lowerCAmelCase_ :str = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
1
0
"""simple docstring""" from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( a__ ): UpperCAmelCase_ :List[Any] = ["pixel_values"] def __init__( self , __A = True , __A = 32 , __A=PILImageResampling.BILINEAR , __A = True , **__A , ) -> Optional[int]: lowerCAmelCase_ :Optional[Any] = do_resize lowerCAmelCase_ :List[str] = do_rescale lowerCAmelCase_ :Union[str, Any] = size_divisor lowerCAmelCase_ :Any = resample super().__init__(**_lowerCamelCase ) def __lowerCAmelCase ( self , __A , __A , __A , __A = None , **__A ) -> Tuple: lowerCAmelCase_ :Tuple = get_image_size(_lowerCamelCase ) # Rounds the height and width down to the closest multiple of size_divisor lowerCAmelCase_ :Union[str, Any] = height // size_divisor * size_divisor lowerCAmelCase_ :Dict = width // size_divisor * size_divisor lowerCAmelCase_ :Any = resize(_lowerCamelCase , (new_h, new_w) , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase ) return image def __lowerCAmelCase ( self , __A , __A , __A = None , **__A ) -> Optional[int]: return rescale(image=_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase ) def __lowerCAmelCase ( self , __A , __A = None , __A = None , __A=None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> Tuple: lowerCAmelCase_ :Dict = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ :List[str] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ :str = size_divisor if size_divisor is not None else self.size_divisor lowerCAmelCase_ :Dict = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError("""size_divisor is required for resizing""" ) lowerCAmelCase_ :Tuple = make_list_of_images(_lowerCamelCase ) if not valid_images(_lowerCamelCase ): raise ValueError("""Invalid image(s)""" ) # All transformations expect numpy arrays. lowerCAmelCase_ :Dict = [to_numpy_array(_lowerCamelCase ) for img in images] if do_resize: lowerCAmelCase_ :Tuple = [self.resize(_lowerCamelCase , size_divisor=_lowerCamelCase , resample=_lowerCamelCase ) for image in images] if do_rescale: lowerCAmelCase_ :str = [self.rescale(_lowerCamelCase , scale=1 / 255 ) for image in images] lowerCAmelCase_ :List[str] = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images] lowerCAmelCase_ :Union[str, Any] = {'''pixel_values''': images} return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
353
"""simple docstring""" def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int: '''simple docstring''' if index == number_of_items: return 0 lowerCAmelCase_ :Any = 0 lowerCAmelCase_ :str = 0 lowerCAmelCase_ :Dict = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 ) if weights[index] <= max_weight: lowerCAmelCase_ :str = values[index] + knapsack( lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 ) return max(lowercase__ , lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
1
0
"""simple docstring""" import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _SCREAMING_SNAKE_CASE ( lowercase__ ): def __init__( self , *__A , __A=None , __A=None , **__A ) -> Dict: super().__init__(*_a , **_a ) lowerCAmelCase_ :int = eval_examples lowerCAmelCase_ :Optional[Any] = post_process_function def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A = "eval" ) -> Any: lowerCAmelCase_ :Dict = self.eval_dataset if eval_dataset is None else eval_dataset lowerCAmelCase_ :Union[str, Any] = self.get_eval_dataloader(_a ) lowerCAmelCase_ :Tuple = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. lowerCAmelCase_ :int = self.compute_metrics lowerCAmelCase_ :List[str] = None lowerCAmelCase_ :Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop lowerCAmelCase_ :List[Any] = time.time() try: lowerCAmelCase_ :Optional[Any] = eval_loop( _a , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , ) finally: lowerCAmelCase_ :List[Any] = compute_metrics lowerCAmelCase_ :Optional[Any] = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( _a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default lowerCAmelCase_ :List[Any] = self.post_process_function(_a , _a , output.predictions ) lowerCAmelCase_ :str = self.compute_metrics(_a ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): lowerCAmelCase_ :Optional[int] = metrics.pop(_a ) metrics.update(output.metrics ) else: lowerCAmelCase_ :Dict = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_a ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) lowerCAmelCase_ :Optional[int] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _a ) return metrics def __lowerCAmelCase ( self , __A , __A , __A=None , __A = "test" ) -> List[Any]: lowerCAmelCase_ :Optional[int] = self.get_test_dataloader(_a ) # Temporarily disable metric computation, we will do it in the loop here. lowerCAmelCase_ :int = self.compute_metrics lowerCAmelCase_ :Tuple = None lowerCAmelCase_ :Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop lowerCAmelCase_ :List[Any] = time.time() try: lowerCAmelCase_ :str = eval_loop( _a , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , ) finally: lowerCAmelCase_ :Optional[int] = compute_metrics lowerCAmelCase_ :Any = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( _a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output lowerCAmelCase_ :Dict = self.post_process_function(_a , _a , output.predictions , """predict""" ) lowerCAmelCase_ :Optional[Any] = self.compute_metrics(_a ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): lowerCAmelCase_ :Tuple = metrics.pop(_a ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_a )
354
"""simple docstring""" from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def _snake_case ( lowercase__ : bool = True , *lowercase__ : Optional[int] , **lowercase__ : str ) -> Optional[Any]: '''simple docstring''' if not is_tqdm_available(): raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" ) lowerCAmelCase_ :Tuple = False if main_process_only: lowerCAmelCase_ :Dict = PartialState().local_process_index == 0 return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
1
0
"""simple docstring""" from __future__ import annotations def _snake_case ( lowercase__ : list[float] , lowercase__ : list[float] ) -> float: '''simple docstring''' lowerCAmelCase_ :Any = sorted(numsa + numsa ) lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = divmod(len(__snake_case ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = [float(x) for x in input('Enter the elements of first array: ').split()] __UpperCAmelCase = [float(x) for x in input('Enter the elements of second array: ').split()] print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
355
"""simple docstring""" import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 __UpperCAmelCase = get_tests_dir('fixtures/dummy-config.json') class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :int = 0 def __lowerCAmelCase ( self ) -> List[str]: self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""bert-base-uncased""" ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :int = AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :int = AutoConfig.for_model("""roberta""" ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Tuple: with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. lowerCAmelCase_ :int = os.path.join(__A , """fake-roberta""" ) os.makedirs(__A , exist_ok=__A ) with open(os.path.join(__A , """config.json""" ) , """w""" ) as f: f.write(json.dumps({} ) ) lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A ) self.assertEqual(type(__A ) , __A ) def __lowerCAmelCase ( self ) -> Optional[int]: try: AutoConfig.register("""custom""" , __A ) # Wrong model type will raise an error with self.assertRaises(__A ): AutoConfig.register("""model""" , __A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__A ): AutoConfig.register("""bert""" , __A ) # Now that the config is registered, it can be used as any other config with the auto-API lowerCAmelCase_ :Union[str, Any] = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__A ) lowerCAmelCase_ :Optional[int] = AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def __lowerCAmelCase ( self ) -> Tuple: with self.assertRaisesRegex( __A , """bert-base is not a local folder and is not a valid model identifier""" ): lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""bert-base""" ) def __lowerCAmelCase ( self ) -> Any: with self.assertRaisesRegex( __A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , revision="""aaaaaa""" ) def __lowerCAmelCase ( self ) -> int: with self.assertRaisesRegex( __A , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ): lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" ) def __lowerCAmelCase ( self ) -> Tuple: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__A ): lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__A ): lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) lowerCAmelCase_ :str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) self.assertEqual(config.__class__.__name__ , """NewModelConfig""" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__A ) lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , trust_remote_code=__A ) self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" ) def __lowerCAmelCase ( self ) -> int: class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :int = "new-model" try: AutoConfig.register("""new-model""" , __A ) # If remote code is not set, the default is to use local lowerCAmelCase_ :Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" ) # If remote code is disabled, we load the local one. lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" ) # If remote is enabled, we load from the Hub lowerCAmelCase_ :Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) self.assertEqual(config.__class__.__name__ , """NewModelConfig""" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
1
0
from collections import deque class _SCREAMING_SNAKE_CASE : def __init__( self , __A , __A , __A ) -> Dict: lowerCAmelCase_ :Union[str, Any] = process_name # process name lowerCAmelCase_ :List[str] = arrival_time # arrival time of the process # completion time of finished process or last interrupted time lowerCAmelCase_ :List[str] = arrival_time lowerCAmelCase_ :str = burst_time # remaining burst time lowerCAmelCase_ :Union[str, Any] = 0 # total time of the process wait in ready queue lowerCAmelCase_ :int = 0 # time from arrival time to completion time class _SCREAMING_SNAKE_CASE : def __init__( self , __A , __A , __A , __A , ) -> Optional[Any]: lowerCAmelCase_ :List[str] = number_of_queues # time slice of queues that round robin algorithm applied lowerCAmelCase_ :Optional[Any] = time_slices # unfinished process is in this ready_queue lowerCAmelCase_ :Dict = queue # current time lowerCAmelCase_ :Any = current_time # finished process is in this sequence queue lowerCAmelCase_ :Optional[int] = deque() def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Any = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def __lowerCAmelCase ( self , __A ) -> Optional[Any]: lowerCAmelCase_ :List[Any] = [] for i in range(len(_snake_case ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def __lowerCAmelCase ( self , __A ) -> List[str]: lowerCAmelCase_ :Any = [] for i in range(len(_snake_case ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def __lowerCAmelCase ( self , __A ) -> int: lowerCAmelCase_ :Optional[Any] = [] for i in range(len(_snake_case ) ): completion_times.append(queue[i].stop_time ) return completion_times def __lowerCAmelCase ( self , __A ) -> Tuple: return [q.burst_time for q in queue] def __lowerCAmelCase ( self , __A ) -> str: process.waiting_time += self.current_time - process.stop_time return process.waiting_time def __lowerCAmelCase ( self , __A ) -> List[str]: lowerCAmelCase_ :Tuple = deque() # sequence deque of finished process while len(_snake_case ) != 0: lowerCAmelCase_ :Optional[int] = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(_snake_case ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 lowerCAmelCase_ :Dict = 0 # set the process's turnaround time because it is finished lowerCAmelCase_ :Optional[int] = self.current_time - cp.arrival_time # set the completion time lowerCAmelCase_ :int = self.current_time # add the process to queue that has finished queue finished.append(_snake_case ) self.finish_queue.extend(_snake_case ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def __lowerCAmelCase ( self , __A , __A ) -> int: lowerCAmelCase_ :Any = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(_snake_case ) ): lowerCAmelCase_ :Dict = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(_snake_case ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time lowerCAmelCase_ :Optional[int] = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(_snake_case ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished lowerCAmelCase_ :Any = 0 # set the finish time lowerCAmelCase_ :Dict = self.current_time # update the process' turnaround time because it is finished lowerCAmelCase_ :List[str] = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(_snake_case ) self.finish_queue.extend(_snake_case ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def __lowerCAmelCase ( self ) -> List[Any]: for i in range(self.number_of_queues - 1 ): lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest __UpperCAmelCase = Process('P1', 0, 53) __UpperCAmelCase = Process('P2', 0, 17) __UpperCAmelCase = Process('P3', 0, 68) __UpperCAmelCase = Process('P4', 0, 24) __UpperCAmelCase = 3 __UpperCAmelCase = [17, 25] __UpperCAmelCase = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])}) __UpperCAmelCase = Process('P1', 0, 53) __UpperCAmelCase = Process('P2', 0, 17) __UpperCAmelCase = Process('P3', 0, 68) __UpperCAmelCase = Process('P4', 0, 24) __UpperCAmelCase = 3 __UpperCAmelCase = [17, 25] __UpperCAmelCase = deque([Pa, Pa, Pa, Pa]) __UpperCAmelCase = MLFQ(number_of_queues, time_slices, queue, 0) __UpperCAmelCase = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F"""waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print completion times of processes(P1, P2, P3, P4) print( F"""completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print total turnaround times of processes(P1, P2, P3, P4) print( F"""turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print sequence of finished processes print( F"""sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}""" )
356
"""simple docstring""" import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :str = GPTSanJapaneseTokenizer UpperCAmelCase_ :Optional[int] = False UpperCAmelCase_ :Optional[int] = {"do_clean_text": False, "add_prefix_space": False} def __lowerCAmelCase ( self ) -> Tuple: super().setUp() # fmt: off lowerCAmelCase_ :Dict = ["""ใ“ใ‚“""", """ใ“ใ‚“ใซ""", """ใซใกใฏ""", """ใฐใ‚“ใฏ""", """ไธ–็•Œ,ใ”บ็•Œ""", """ใ€""", """ใ€‚""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""] # fmt: on lowerCAmelCase_ :List[str] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # ๐Ÿ˜€ lowerCAmelCase_ :int = {"""unk_token""": """<unk>"""} lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) with open(self.emoji_file , """w""" ) as emoji_writer: emoji_writer.write(json.dumps(__A ) ) def __lowerCAmelCase ( self , **__A ) -> int: kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A ) def __lowerCAmelCase ( self , __A ) -> Dict: lowerCAmelCase_ :List[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \nใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \nใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€""" return input_text, output_text def __lowerCAmelCase ( self , __A ) -> str: lowerCAmelCase_ , lowerCAmelCase_ :Tuple = self.get_input_output_texts(__A ) lowerCAmelCase_ :List[str] = tokenizer.encode(__A , add_special_tokens=__A ) lowerCAmelCase_ :str = tokenizer.decode(__A , clean_up_tokenization_spaces=__A ) return text, ids def __lowerCAmelCase ( self ) -> str: pass # TODO add if relevant def __lowerCAmelCase ( self ) -> Dict: pass # TODO add if relevant def __lowerCAmelCase ( self ) -> int: pass # TODO add if relevant def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Union[str, Any] = self.get_tokenizer() # Testing tokenization lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ€€ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚""" lowerCAmelCase_ :Any = ["""ใ“ใ‚“""", """ใซใกใฏ""", """ใ€""", """ไธ–็•Œ""", """ใ€‚""", """<SP>""", """ใ“ใ‚“""", """ใฐใ‚“ใฏ""", """ใ€""", """ใ”บ็•Œ""", """ใ€‚"""] lowerCAmelCase_ :Tuple = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) # Testing conversion to ids without special tokens lowerCAmelCase_ :List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] lowerCAmelCase_ :List[str] = tokenizer.convert_tokens_to_ids(__A ) self.assertListEqual(__A , __A ) # Testing conversion to ids with special tokens lowerCAmelCase_ :Any = tokens + [tokenizer.unk_token] lowerCAmelCase_ :Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] lowerCAmelCase_ :Union[str, Any] = tokenizer.convert_tokens_to_ids(__A ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :int = self.get_tokenizer() # Testing tokenization lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใซใกใฏใ€<|bagoftoken|>ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€<|bagoftoken|>ใ”บ็•Œใ€‚""" lowerCAmelCase_ :str = """ใ“ใ‚“ใซใกใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚""" lowerCAmelCase_ :str = tokenizer.encode(__A ) lowerCAmelCase_ :Dict = tokenizer.decode(__A ) self.assertEqual(__A , __A ) @slow def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization lowerCAmelCase_ :Optional[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚""" lowerCAmelCase_ :Any = """ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :Optional[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :List[Any] = tokenizer.encode(prefix_text + input_text ) lowerCAmelCase_ :List[str] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text ) lowerCAmelCase_ :int = tokenizer.encode(__A , prefix_text=__A ) lowerCAmelCase_ :int = tokenizer.decode(__A ) lowerCAmelCase_ :Dict = tokenizer.decode(__A ) lowerCAmelCase_ :Tuple = tokenizer.decode(__A ) self.assertEqual(__A , __A ) self.assertEqual(__A , __A ) self.assertEqual(__A , __A ) @slow def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization lowerCAmelCase_ :List[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚""" lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :List[str] = len(tokenizer.encode(__A ) ) - 2 lowerCAmelCase_ :Dict = len(tokenizer.encode(__A ) ) - 2 lowerCAmelCase_ :int = [1] + [0] * (len_prefix + len_text + 1) lowerCAmelCase_ :List[Any] = [1] * (len_prefix + len_text + 1) + [0] lowerCAmelCase_ :Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1) lowerCAmelCase_ :List[Any] = tokenizer(prefix_text + input_text ).token_type_ids lowerCAmelCase_ :List[str] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids lowerCAmelCase_ :List[Any] = tokenizer(__A , prefix_text=__A ).token_type_ids self.assertListEqual(__A , __A ) self.assertListEqual(__A , __A ) self.assertListEqual(__A , __A ) @slow def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) lowerCAmelCase_ :int = tokenizer.encode("""ใ‚ใƒณใ„ใƒฏ""" ) lowerCAmelCase_ :Optional[Any] = tokenizer.encode("""""" , prefix_text="""ใ‚ใƒณใ„ใƒฏ""" ) lowerCAmelCase_ :int = tokenizer.encode("""ใ„ใƒฏ""" , prefix_text="""ใ‚ใƒณ""" ) self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) ) self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) ) self.assertNotEqual(__A , __A ) self.assertNotEqual(__A , __A ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Union[str, Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) lowerCAmelCase_ :int = [["""ๆญฆ็”ฐไฟก็Ž„""", """ใฏใ€"""], ["""็น”็”ฐไฟก้•ท""", """ใฎ้…ไธ‹ใฎใ€"""]] lowerCAmelCase_ :Dict = tokenizer(__A , padding=__A ) lowerCAmelCase_ :Any = tokenizer.batch_encode_plus(__A , padding=__A ) # fmt: off lowerCAmelCase_ :int = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]] lowerCAmelCase_ :List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] lowerCAmelCase_ :int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , __A ) self.assertListEqual(x_token.token_type_ids , __A ) self.assertListEqual(x_token.attention_mask , __A ) self.assertListEqual(x_token_a.input_ids , __A ) self.assertListEqual(x_token_a.token_type_ids , __A ) self.assertListEqual(x_token_a.attention_mask , __A ) def __lowerCAmelCase ( self ) -> Tuple: # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def __lowerCAmelCase ( self ) -> str: # tokenizer has no padding token pass
1
0
"""simple docstring""" import math def _snake_case ( lowercase__ : int ) -> List[Any]: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _snake_case ( lowercase__ : int = 1_0_0_0_1 ) -> Optional[int]: '''simple docstring''' try: lowerCAmelCase_ :List[Any] = int(lowerCamelCase_ ) except (TypeError, ValueError): raise TypeError("""Parameter nth must be int or castable to int.""" ) from None if nth <= 0: raise ValueError("""Parameter nth must be greater than or equal to one.""" ) lowerCAmelCase_ :list[int] = [] lowerCAmelCase_ :Union[str, Any] = 2 while len(lowerCamelCase_ ) < nth: if is_prime(lowerCamelCase_ ): primes.append(lowerCamelCase_ ) num += 1 else: num += 1 return primes[len(lowerCamelCase_ ) - 1] if __name__ == "__main__": print(F"""{solution() = }""")
357
"""simple docstring""" import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset __UpperCAmelCase = pd.read_csv( 'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/' 'position_salaries.csv' ) __UpperCAmelCase = dataset.iloc[:, 1:2].values __UpperCAmelCase = dataset.iloc[:, 2].values __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0) __UpperCAmelCase = PolynomialFeatures(degree=4) __UpperCAmelCase = poly_reg.fit_transform(X) __UpperCAmelCase = LinearRegression() pol_reg.fit(X_poly, y) def _snake_case ( ) -> str: '''simple docstring''' plt.scatter(lowercase__ , lowercase__ , color="""red""" ) plt.plot(lowercase__ , pol_reg.predict(poly_reg.fit_transform(lowercase__ ) ) , color="""blue""" ) plt.title("""Truth or Bluff (Linear Regression)""" ) plt.xlabel("""Position level""" ) plt.ylabel("""Salary""" ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
1
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __UpperCAmelCase = { """configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["""AlbertTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["""AlbertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """AlbertForMaskedLM""", """AlbertForMultipleChoice""", """AlbertForPreTraining""", """AlbertForQuestionAnswering""", """AlbertForSequenceClassification""", """AlbertForTokenClassification""", """AlbertModel""", """AlbertPreTrainedModel""", """load_tf_weights_in_albert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFAlbertForMaskedLM""", """TFAlbertForMultipleChoice""", """TFAlbertForPreTraining""", """TFAlbertForQuestionAnswering""", """TFAlbertForSequenceClassification""", """TFAlbertForTokenClassification""", """TFAlbertMainLayer""", """TFAlbertModel""", """TFAlbertPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """FlaxAlbertForMaskedLM""", """FlaxAlbertForMultipleChoice""", """FlaxAlbertForPreTraining""", """FlaxAlbertForQuestionAnswering""", """FlaxAlbertForSequenceClassification""", """FlaxAlbertForTokenClassification""", """FlaxAlbertModel""", """FlaxAlbertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
358
"""simple docstring""" from __future__ import annotations __UpperCAmelCase = 1.6021e-19 # units = C def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float , ) -> tuple[str, float]: '''simple docstring''' if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif conductivity < 0: raise ValueError("""Conductivity cannot be negative""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative""" ) elif mobility < 0: raise ValueError("""mobility cannot be negative""" ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
1
0
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class _SCREAMING_SNAKE_CASE ( A_ ): UpperCAmelCase_ :Optional[int] = "" UpperCAmelCase_ :Optional[int] = "hf-legacy" # "hf://"" is reserved for hffs def __init__( self , __A = None , __A = None , **__A , ) -> Optional[int]: super().__init__(self , **snake_case__ ) lowerCAmelCase_ :List[Any] = repo_info lowerCAmelCase_ :Any = token lowerCAmelCase_ :int = None def __lowerCAmelCase ( self ) -> Any: if self.dir_cache is None: lowerCAmelCase_ :Union[str, Any] = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes lowerCAmelCase_ :Optional[int] = { "name": hf_file.rfilename, "size": None, "type": "file", } self.dir_cache.update( { str(snake_case__ ): {"""name""": str(snake_case__ ), """size""": None, """type""": """directory"""} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def __lowerCAmelCase ( self , __A , __A = "rb" , **__A , ) -> Union[str, Any]: if not isinstance(self.repo_info , snake_case__ ): raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" ) lowerCAmelCase_ :List[str] = hf_hub_url(self.repo_info.id , snake_case__ , revision=self.repo_info.sha ) return fsspec.open( snake_case__ , mode=snake_case__ , headers=get_authentication_headers_for_url(snake_case__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open() def __lowerCAmelCase ( self , __A , **__A ) -> List[str]: self._get_dirs() lowerCAmelCase_ :int = self._strip_protocol(snake_case__ ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(snake_case__ ) def __lowerCAmelCase ( self , __A , __A=False , **__A ) -> Optional[int]: self._get_dirs() lowerCAmelCase_ :Dict = PurePosixPath(path.strip("""/""" ) ) lowerCAmelCase_ :Dict = {} for p, f in self.dir_cache.items(): lowerCAmelCase_ :str = PurePosixPath(p.strip("""/""" ) ) lowerCAmelCase_ :Optional[Any] = p.parent if root == path: lowerCAmelCase_ :Optional[int] = f lowerCAmelCase_ :Optional[Any] = list(paths.values() ) if detail: return out else: return sorted(f["""name"""] for f in out )
359
"""simple docstring""" import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , *__A , **__A ) -> None: warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""" , __A , ) super().__init__(*__A , **__A )
1
0
"""simple docstring""" import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'vocab.json'} __UpperCAmelCase = { 'vocab_file': { 'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json', } } __UpperCAmelCase = {'mgp-str': 27} class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): UpperCAmelCase_ :List[str] = VOCAB_FILES_NAMES UpperCAmelCase_ :List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , __A , __A="[GO]" , __A="[GO]" , __A="[s]" , __A="[GO]" , **__A ) -> List[str]: super().__init__( unk_token=_a , bos_token=_a , eos_token=_a , pad_token=_a , **_a , ) with open(_a , encoding="""utf-8""" ) as vocab_handle: lowerCAmelCase_ :List[Any] = json.load(_a ) lowerCAmelCase_ :List[str] = {v: k for k, v in self.vocab.items()} @property def __lowerCAmelCase ( self ) -> List[Any]: return len(self.vocab ) def __lowerCAmelCase ( self ) -> Tuple: return dict(self.vocab , **self.added_tokens_encoder ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: lowerCAmelCase_ :Any = [] for s in text: char_tokens.extend(_a ) return char_tokens def __lowerCAmelCase ( self , __A ) -> Optional[Any]: return self.vocab.get(_a , self.vocab.get(self.unk_token ) ) def __lowerCAmelCase ( self , __A ) -> Any: return self.decoder.get(_a ) def __lowerCAmelCase ( self , __A , __A = None ) -> int: if not os.path.isdir(_a ): logger.error("""Vocabulary path ({}) should be a directory""".format(_a ) ) return lowerCAmelCase_ :Optional[Any] = os.path.join( _a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) with open(_a , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=_a , ensure_ascii=_a ) + """\n""" ) return (vocab_file,)
360
"""simple docstring""" from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def _snake_case ( lowercase__ : str = "laptop" ) -> DataFrame: '''simple docstring''' lowerCAmelCase_ :Dict = f"""https://www.amazon.in/laptop/s?k={product}""" lowerCAmelCase_ :List[str] = { """User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""", """Accept-Language""": """en-US, en;q=0.5""", } lowerCAmelCase_ :List[Any] = BeautifulSoup(requests.get(lowercase__ , headers=lowercase__ ).text ) # Initialize a Pandas dataframe with the column titles lowerCAmelCase_ :Union[str, Any] = DataFrame( columns=[ """Product Title""", """Product Link""", """Current Price of the product""", """Product Rating""", """MRP of the product""", """Discount""", ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( """div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ): try: lowerCAmelCase_ :str = item.ha.text lowerCAmelCase_ :Dict = """https://www.amazon.in/""" + item.ha.a["""href"""] lowerCAmelCase_ :int = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text try: lowerCAmelCase_ :Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text except AttributeError: lowerCAmelCase_ :int = """Not available""" try: lowerCAmelCase_ :str = ( """โ‚น""" + item.find( """span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""โ‚น""" )[1] ) except AttributeError: lowerCAmelCase_ :Optional[Any] = """""" try: lowerCAmelCase_ :str = float( ( ( float(product_mrp.strip("""โ‚น""" ).replace(""",""" , """""" ) ) - float(product_price.strip("""โ‚น""" ).replace(""",""" , """""" ) ) ) / float(product_mrp.strip("""โ‚น""" ).replace(""",""" , """""" ) ) ) * 1_0_0 ) except ValueError: lowerCAmelCase_ :Union[str, Any] = float("""nan""" ) except AttributeError: pass lowerCAmelCase_ :Any = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] lowerCAmelCase_ :List[Any] = """ """ lowerCAmelCase_ :Tuple = """ """ data_frame.index += 1 return data_frame if __name__ == "__main__": __UpperCAmelCase = 'headphones' get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
1
0
"""simple docstring""" from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class _SCREAMING_SNAKE_CASE : UpperCAmelCase_ :Optional[Any] = 42 UpperCAmelCase_ :Optional[int] = None UpperCAmelCase_ :Any = None __UpperCAmelCase = namedtuple('CoinsDistribResult', 'moves excess') def _snake_case ( lowercase__ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' if root is None: return 0 # Validation def count_nodes(lowercase__ : Optional[Any] ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(lowercase__ : List[Any] ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(lowercase__ ) != count_coins(lowercase__ ): raise ValueError("""The nodes number should be same as the number of coins""" ) # Main calculation def get_distrib(lowercase__ : Any ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) lowerCAmelCase_ :str = get_distrib(node.left ) lowerCAmelCase_ :str = get_distrib(node.right ) lowerCAmelCase_ :Optional[Any] = 1 - left_distrib_excess lowerCAmelCase_ :Tuple = 1 - right_distrib_excess lowerCAmelCase_ :Optional[Any] = ( left_distrib_moves + right_distrib_moves + abs(lowercase__ ) + abs(lowercase__ ) ) lowerCAmelCase_ :Tuple = node.data - coins_to_left - coins_to_right return CoinsDistribResult(lowercase__ , lowercase__ ) return get_distrib(lowercase__ )[0] if __name__ == "__main__": import doctest doctest.testmod()
361
"""simple docstring""" import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ :Any = """laion/clap-htsat-unfused""" lowerCAmelCase_ :Optional[Any] = tempfile.mkdtemp() def __lowerCAmelCase ( self , **__A ) -> List[Any]: return RobertaTokenizer.from_pretrained(self.checkpoint , **__A ) def __lowerCAmelCase ( self , **__A ) -> Tuple: return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__A ) def __lowerCAmelCase ( self ) -> int: shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Optional[Any] = self.get_tokenizer() lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor() lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase_ :Optional[Any] = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __A ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __A ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Dict = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase_ :str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowerCAmelCase_ :Dict = self.get_feature_extractor(do_normalize=__A , padding_value=1.0 ) lowerCAmelCase_ :Union[str, Any] = ClapProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __A ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __A ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Dict = self.get_feature_extractor() lowerCAmelCase_ :str = self.get_tokenizer() lowerCAmelCase_ :List[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) lowerCAmelCase_ :Optional[Any] = floats_list((3, 1000) ) lowerCAmelCase_ :Optional[Any] = feature_extractor(__A , return_tensors="""np""" ) lowerCAmelCase_ :str = processor(audios=__A , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :List[Any] = self.get_feature_extractor() lowerCAmelCase_ :Any = self.get_tokenizer() lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) lowerCAmelCase_ :List[Any] = """This is a test string""" lowerCAmelCase_ :Dict = processor(text=__A ) lowerCAmelCase_ :List[str] = tokenizer(__A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :int = self.get_feature_extractor() lowerCAmelCase_ :Tuple = self.get_tokenizer() lowerCAmelCase_ :Optional[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) lowerCAmelCase_ :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase_ :Tuple = processor.batch_decode(__A ) lowerCAmelCase_ :Optional[Any] = tokenizer.batch_decode(__A ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor() lowerCAmelCase_ :Any = self.get_tokenizer() lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
1
0
"""simple docstring""" def _snake_case ( lowercase__ : List[Any] ) -> int: '''simple docstring''' lowerCAmelCase_ :list[list[int]] = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )] for i in range(m + 1 ): lowerCAmelCase_ :Tuple = 1 for n in range(m + 1 ): for k in range(1 , _lowerCamelCase ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: __UpperCAmelCase = int(input('Enter a number: ').strip()) print(partition(n)) except ValueError: print('Please enter a number.') else: try: __UpperCAmelCase = int(sys.argv[1]) print(partition(n)) except ValueError: print('Please pass a number.')
362
"""simple docstring""" import os from math import logaa def _snake_case ( lowercase__ : str = "base_exp.txt" ) -> int: '''simple docstring''' lowerCAmelCase_ :float = 0 lowerCAmelCase_ :Union[str, Any] = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ): lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = list(map(lowercase__ , line.split(""",""" ) ) ) if x * logaa(lowercase__ ) > largest: lowerCAmelCase_ :Any = x * logaa(lowercase__ ) lowerCAmelCase_ :List[Any] = i + 1 return result if __name__ == "__main__": print(solution())
1
0
"""simple docstring""" import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging __UpperCAmelCase = logging.get_logger(__name__) logging.set_verbosity_info() def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[Any] ) -> int: '''simple docstring''' if "xprophetnet" in prophetnet_checkpoint_path: lowerCAmelCase_ :Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(a_ ) lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = XLMProphetNetForConditionalGeneration.from_pretrained( a_ , output_loading_info=a_ ) else: lowerCAmelCase_ :Optional[int] = ProphetNetForConditionalGenerationOld.from_pretrained(a_ ) lowerCAmelCase_ , lowerCAmelCase_ :Tuple = ProphetNetForConditionalGeneration.from_pretrained( a_ , output_loading_info=a_ ) lowerCAmelCase_ :Optional[int] = ["""key_proj""", """value_proj""", """query_proj"""] lowerCAmelCase_ :Tuple = { """self_attn""": """ngram_self_attn""", """cross_attn""": """encoder_attn""", """cross_attn_layer_norm""": """encoder_attn_layer_norm""", """feed_forward_layer_norm""": """final_layer_norm""", """feed_forward""": """""", """intermediate""": """fc1""", """output""": """fc2""", """key_proj""": """k_proj""", """query_proj""": """q_proj""", """value_proj""": """v_proj""", """word_embeddings""": """embed_tokens""", """embeddings_layer_norm""": """emb_layer_norm""", """relative_pos_embeddings""": """relative_linear""", """ngram_embeddings""": """ngram_input_embed""", """position_embeddings""": """embed_positions""", } for key in loading_info["missing_keys"]: lowerCAmelCase_ :Optional[Any] = key.split(""".""" ) if attributes[0] == "lm_head": lowerCAmelCase_ :List[Any] = prophet lowerCAmelCase_ :Any = prophet_old else: lowerCAmelCase_ :str = prophet.prophetnet lowerCAmelCase_ :Optional[Any] = prophet_old.model lowerCAmelCase_ :str = False for attribute in attributes: if attribute in mapping: lowerCAmelCase_ :Optional[Any] = mapping[attribute] if not hasattr(a_ , a_ ) and len(a_ ) > 0: lowerCAmelCase_ :Union[str, Any] = attribute elif hasattr(a_ , a_ ): lowerCAmelCase_ :Optional[int] = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" lowerCAmelCase_ :Dict = old_model.weight logger.info(f"""{attribute} is initialized.""" ) lowerCAmelCase_ :Union[str, Any] = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" lowerCAmelCase_ :str = old_model.bias logger.info(f"""{attribute} is initialized""" ) lowerCAmelCase_ :Union[str, Any] = True break elif attribute in special_keys and hasattr(a_ , """in_proj_weight""" ): lowerCAmelCase_ :Optional[Any] = old_model.in_proj_weight.shape[0] // 3 lowerCAmelCase_ :str = getattr(a_ , a_ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": lowerCAmelCase_ :int = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) lowerCAmelCase_ :List[str] = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": lowerCAmelCase_ :Dict = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) lowerCAmelCase_ :Dict = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": lowerCAmelCase_ :Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) lowerCAmelCase_ :Dict = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) lowerCAmelCase_ :Any = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings." lowerCAmelCase_ :Dict = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] ) lowerCAmelCase_ :List[Any] = True break if attribute.isdigit(): lowerCAmelCase_ :Optional[Any] = model[int(a_ )] lowerCAmelCase_ :Any = old_model[int(a_ )] else: lowerCAmelCase_ :Tuple = getattr(a_ , a_ ) if old_attribute == "": lowerCAmelCase_ :Union[str, Any] = old_model else: if not hasattr(a_ , a_ ): raise ValueError(f"""{old_model} does not have {old_attribute}""" ) lowerCAmelCase_ :Dict = getattr(a_ , a_ ) if not is_key_init: raise ValueError(f"""{key} was not correctly initialized!""" ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) prophet.save_pretrained(a_ ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __UpperCAmelCase = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
363
"""simple docstring""" import itertools import math def _snake_case ( lowercase__ : int ) -> bool: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _snake_case ( ) -> Dict: '''simple docstring''' lowerCAmelCase_ :List[Any] = 2 while True: if is_prime(lowercase__ ): yield num num += 1 def _snake_case ( lowercase__ : int = 1_0_0_0_1 ) -> int: '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , lowercase__ ) ) if __name__ == "__main__": print(F"""{solution() = }""")
1
0
"""simple docstring""" def _snake_case ( lowercase__ : list[int] , lowercase__ : int ) -> bool: '''simple docstring''' lowerCAmelCase_ :List[str] = len(__a ) lowerCAmelCase_ :Optional[int] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): lowerCAmelCase_ :Optional[Any] = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): lowerCAmelCase_ :Dict = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: lowerCAmelCase_ :Union[str, Any] = subset[i - 1][j] if arr[i - 1] <= j: lowerCAmelCase_ :Tuple = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
364
"""simple docstring""" def _snake_case ( lowercase__ : int = 5_0 ) -> int: '''simple docstring''' lowerCAmelCase_ :int = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F"""{solution() = }""")
1
0
"""simple docstring""" import os def _snake_case ( lowercase__ : str = "input.txt" ) -> Any: '''simple docstring''' with open(os.path.join(os.path.dirname(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) as input_file: lowerCAmelCase_ :List[Any] = [ [int(lowerCAmelCase__ ) for element in line.split(""",""" )] for line in input_file.readlines() ] lowerCAmelCase_ :int = len(lowerCAmelCase__ ) lowerCAmelCase_ :str = len(matrix[0] ) lowerCAmelCase_ :List[str] = [[-1 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )] for i in range(lowerCAmelCase__ ): lowerCAmelCase_ :str = matrix[i][0] for j in range(1 , lowerCAmelCase__ ): for i in range(lowerCAmelCase__ ): lowerCAmelCase_ :str = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , lowerCAmelCase__ ): lowerCAmelCase_ :Any = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowerCAmelCase_ :Optional[Any] = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(F"""{solution() = }""")
365
"""simple docstring""" # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ): UpperCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} UpperCAmelCase_ :Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCAmelCase_ :List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} ) UpperCAmelCase_ :Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS def __lowerCAmelCase ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ :Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) lowerCAmelCase_ :List[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , ) torch.manual_seed(0 ) lowerCAmelCase_ :List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowerCAmelCase_ :List[Any] = CLIPTextModel(__A ) lowerCAmelCase_ :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCAmelCase_ :Union[str, Any] = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def __lowerCAmelCase ( self , __A , __A=0 ) -> List[str]: if str(__A ).startswith("""mps""" ): lowerCAmelCase_ :Tuple = torch.manual_seed(__A ) else: lowerCAmelCase_ :Optional[int] = torch.Generator(device=__A ).manual_seed(__A ) lowerCAmelCase_ :List[Any] = 2 lowerCAmelCase_ :int = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ) lowerCAmelCase_ :Optional[int] = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A ) lowerCAmelCase_ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) ) lowerCAmelCase_ :Union[str, Any] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def __lowerCAmelCase ( self ) -> int: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def __lowerCAmelCase ( self ) -> List[str]: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ): UpperCAmelCase_ :List[str] = StableDiffusionControlNetImgaImgPipeline UpperCAmelCase_ :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCAmelCase_ :int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def __lowerCAmelCase ( self ) -> Optional[int]: torch.manual_seed(0 ) lowerCAmelCase_ :Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(__A ): if isinstance(__A , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) lowerCAmelCase_ :List[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__A ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__A ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowerCAmelCase_ :str = CLIPTextModel(__A ) lowerCAmelCase_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCAmelCase_ :Optional[Any] = MultiControlNetModel([controlneta, controlneta] ) lowerCAmelCase_ :List[Any] = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def __lowerCAmelCase ( self , __A , __A=0 ) -> str: if str(__A ).startswith("""mps""" ): lowerCAmelCase_ :Optional[Any] = torch.manual_seed(__A ) else: lowerCAmelCase_ :List[Any] = torch.Generator(device=__A ).manual_seed(__A ) lowerCAmelCase_ :Optional[Any] = 2 lowerCAmelCase_ :Optional[int] = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ), ] lowerCAmelCase_ :int = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A ) lowerCAmelCase_ :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) ) lowerCAmelCase_ :List[str] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :List[str] = self.get_dummy_components() lowerCAmelCase_ :Tuple = self.pipeline_class(**__A ) pipe.to(__A ) lowerCAmelCase_ :Union[str, Any] = 1_0.0 lowerCAmelCase_ :Union[str, Any] = 4 lowerCAmelCase_ :Tuple = self.get_dummy_inputs(__A ) lowerCAmelCase_ :List[str] = steps lowerCAmelCase_ :int = scale lowerCAmelCase_ :Union[str, Any] = pipe(**__A )[0] lowerCAmelCase_ :Any = self.get_dummy_inputs(__A ) lowerCAmelCase_ :str = steps lowerCAmelCase_ :str = scale lowerCAmelCase_ :Tuple = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Union[str, Any] = steps lowerCAmelCase_ :Union[str, Any] = scale lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] lowerCAmelCase_ :List[str] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Optional[int] = steps lowerCAmelCase_ :Tuple = scale lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def __lowerCAmelCase ( self ) -> Dict: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __lowerCAmelCase ( self ) -> Tuple: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def __lowerCAmelCase ( self ) -> Optional[int]: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :str = self.get_dummy_components() lowerCAmelCase_ :Tuple = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(__A ) except NotImplementedError: pass @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :Any = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" ) lowerCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , safety_checker=__A , controlnet=__A ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowerCAmelCase_ :List[Any] = """evil space-punk bird""" lowerCAmelCase_ :List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) ) lowerCAmelCase_ :int = load_image( """https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) ) lowerCAmelCase_ :Union[str, Any] = pipe( __A , __A , control_image=__A , generator=__A , output_type="""np""" , num_inference_steps=50 , strength=0.6 , ) lowerCAmelCase_ :Tuple = output.images[0] assert image.shape == (512, 512, 3) lowerCAmelCase_ :Tuple = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" ) assert np.abs(expected_image - image ).max() < 9E-2
1
0
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :str = BarthezTokenizer UpperCAmelCase_ :List[Any] = BarthezTokenizerFast UpperCAmelCase_ :Optional[int] = True UpperCAmelCase_ :Optional[int] = True def __lowerCAmelCase ( self ) -> Dict: super().setUp() lowerCAmelCase_ :Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase__ ) lowerCAmelCase_ :List[str] = tokenizer def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ :Tuple = "<pad>" lowerCAmelCase_ :Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ ) def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ :List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-1] , """<mask>""" ) self.assertEqual(len(lowerCAmelCase__ ) , 10_1122 ) def __lowerCAmelCase ( self ) -> Dict: self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 ) @require_torch def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :int = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowerCAmelCase_ :Optional[int] = [0, 57, 3018, 7_0307, 91, 2] lowerCAmelCase_ :int = self.tokenizer( lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="""pt""" ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) lowerCAmelCase_ :str = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __lowerCAmelCase ( self ) -> Optional[Any]: if not self.test_rust_tokenizer: return lowerCAmelCase_ :Optional[int] = self.get_tokenizer() lowerCAmelCase_ :Optional[int] = self.get_rust_tokenizer() lowerCAmelCase_ :Tuple = "I was born in 92000, and this is falsรฉ." lowerCAmelCase_ :Dict = tokenizer.tokenize(lowerCAmelCase__ ) lowerCAmelCase_ :Union[str, Any] = rust_tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) lowerCAmelCase_ :Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) lowerCAmelCase_ :Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) lowerCAmelCase_ :Optional[Any] = self.get_rust_tokenizer() lowerCAmelCase_ :Optional[Any] = tokenizer.encode(lowerCAmelCase__ ) lowerCAmelCase_ :Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Optional[Any] = {"input_ids": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. lowerCAmelCase_ :Tuple = [ "Le transformeur est un modรจle d'apprentissage profond introduit en 2017, " "utilisรฉ principalement dans le domaine du traitement automatique des langues (TAL).", "ร€ l'instar des rรฉseaux de neurones rรฉcurrents (RNN), les transformeurs sont conรงus " "pour gรฉrer des donnรฉes sรฉquentielles, telles que le langage naturel, pour des tรขches " "telles que la traduction et la synthรจse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=lowerCAmelCase__ , )
366
"""simple docstring""" from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ ): UpperCAmelCase_ :List[str] = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"] @register_to_config def __init__( self , __A , __A , __A = None , __A = 5_0257 , __A = 1024 , __A = 768 , __A = 12 , __A = 12 , __A = None , __A = "gelu_new" , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 1E-5 , __A = 0.0_2 , __A = True , __A = True , __A = False , __A = False , ) -> Optional[Any]: super().__init__() lowerCAmelCase_ :List[str] = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and""" f""" `n_embd`: {n_embd} are not equal.""" ) lowerCAmelCase_ :Union[str, Any] = prefix_inner_dim lowerCAmelCase_ :str = prefix_hidden_dim lowerCAmelCase_ :str = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) lowerCAmelCase_ :List[Any] = ( nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity() ) lowerCAmelCase_ :Any = GPTaConfig( vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , ) lowerCAmelCase_ :Any = GPTaLMHeadModel(__A ) def __lowerCAmelCase ( self , __A , __A , __A = None , __A = None , ) -> List[str]: lowerCAmelCase_ :str = self.transformer.transformer.wte(__A ) lowerCAmelCase_ :Any = self.encode_prefix(__A ) lowerCAmelCase_ :Optional[Any] = self.decode_prefix(__A ) lowerCAmelCase_ :Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: lowerCAmelCase_ :int = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) lowerCAmelCase_ :Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 ) lowerCAmelCase_ :Tuple = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def __lowerCAmelCase ( self , __A , __A ) -> torch.Tensor: return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: return self.encode_prefix(__A ) @torch.no_grad() def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[int]: lowerCAmelCase_ :Tuple = torch.split(__A , 1 , dim=0 ) lowerCAmelCase_ :Optional[int] = [] lowerCAmelCase_ :List[str] = [] for feature in features: lowerCAmelCase_ :Tuple = self.decode_prefix(feature.to(__A ) ) # back to the clip feature # Only support beam search for now lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.generate_beam( input_embeds=__A , device=__A , eos_token_id=__A ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) lowerCAmelCase_ :Tuple = torch.stack(__A ) lowerCAmelCase_ :int = torch.stack(__A ) return generated_tokens, generated_seq_lengths @torch.no_grad() def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A = 5 , __A = 67 , __A = 1.0 , __A = None , ) -> Union[str, Any]: lowerCAmelCase_ :Optional[int] = eos_token_id lowerCAmelCase_ :Optional[int] = None lowerCAmelCase_ :Any = None lowerCAmelCase_ :int = torch.ones(__A , device=__A , dtype=torch.int ) lowerCAmelCase_ :Optional[int] = torch.zeros(__A , device=__A , dtype=torch.bool ) if input_embeds is not None: lowerCAmelCase_ :List[str] = input_embeds else: lowerCAmelCase_ :Union[str, Any] = self.transformer.transformer.wte(__A ) for i in range(__A ): lowerCAmelCase_ :Optional[int] = self.transformer(inputs_embeds=__A ) lowerCAmelCase_ :str = outputs.logits lowerCAmelCase_ :str = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) lowerCAmelCase_ :Dict = logits.softmax(-1 ).log() if scores is None: lowerCAmelCase_ , lowerCAmelCase_ :Any = logits.topk(__A , -1 ) lowerCAmelCase_ :Union[str, Any] = generated.expand(__A , *generated.shape[1:] ) lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: lowerCAmelCase_ :List[str] = next_tokens else: lowerCAmelCase_ :List[Any] = tokens.expand(__A , *tokens.shape[1:] ) lowerCAmelCase_ :Any = torch.cat((tokens, next_tokens) , dim=1 ) else: lowerCAmelCase_ :List[Any] = -float(np.inf ) lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Optional[int] = scores[:, None] + logits seq_lengths[~is_stopped] += 1 lowerCAmelCase_ :List[Any] = scores_sum / seq_lengths[:, None] lowerCAmelCase_ , lowerCAmelCase_ :Tuple = scores_sum_average.view(-1 ).topk(__A , -1 ) lowerCAmelCase_ :Optional[Any] = next_tokens // scores_sum.shape[1] lowerCAmelCase_ :Dict = seq_lengths[next_tokens_source] lowerCAmelCase_ :Tuple = next_tokens % scores_sum.shape[1] lowerCAmelCase_ :Optional[Any] = next_tokens.unsqueeze(1 ) lowerCAmelCase_ :str = tokens[next_tokens_source] lowerCAmelCase_ :List[Any] = torch.cat((tokens, next_tokens) , dim=1 ) lowerCAmelCase_ :Dict = generated[next_tokens_source] lowerCAmelCase_ :Dict = scores_sum_average * seq_lengths lowerCAmelCase_ :Tuple = is_stopped[next_tokens_source] lowerCAmelCase_ :str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) lowerCAmelCase_ :List[Any] = torch.cat((generated, next_token_embed) , dim=1 ) lowerCAmelCase_ :Optional[int] = is_stopped + next_tokens.eq(__A ).squeeze() if is_stopped.all(): break lowerCAmelCase_ :str = scores / seq_lengths lowerCAmelCase_ :Optional[int] = scores.argsort(descending=__A ) # tokens tensors are already padded to max_seq_length lowerCAmelCase_ :Optional[Any] = [tokens[i] for i in order] lowerCAmelCase_ :Dict = torch.stack(__A , dim=0 ) lowerCAmelCase_ :Tuple = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
1
0
"""simple docstring""" def _snake_case ( lowercase__ : str , lowercase__ : str = " " ) -> list: '''simple docstring''' lowerCAmelCase_ :str = [] lowerCAmelCase_ :Dict = 0 for index, char in enumerate(_UpperCamelCase ): if char == separator: split_words.append(string[last_index:index] ) lowerCAmelCase_ :Optional[Any] = index + 1 elif index + 1 == len(_UpperCamelCase ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
367
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json', # See all DETR models at https://huggingface.co/models?filter=detr } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "detr" UpperCAmelCase_ :str = ["past_key_values"] UpperCAmelCase_ :Tuple = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , __A=True , __A=None , __A=3 , __A=100 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=1 , __A=5 , __A=2 , __A=1 , __A=1 , __A=5 , __A=2 , __A=0.1 , **__A , ) -> List[Any]: if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) lowerCAmelCase_ :int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(__A , __A ): lowerCAmelCase_ :str = backbone_config.get("""model_type""" ) lowerCAmelCase_ :List[Any] = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase_ :Optional[Any] = config_class.from_dict(__A ) # set timm attributes to None lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = None, None, None lowerCAmelCase_ :Tuple = use_timm_backbone lowerCAmelCase_ :Optional[int] = backbone_config lowerCAmelCase_ :Optional[int] = num_channels lowerCAmelCase_ :int = num_queries lowerCAmelCase_ :List[Any] = d_model lowerCAmelCase_ :Optional[int] = encoder_ffn_dim lowerCAmelCase_ :Tuple = encoder_layers lowerCAmelCase_ :int = encoder_attention_heads lowerCAmelCase_ :Optional[Any] = decoder_ffn_dim lowerCAmelCase_ :List[str] = decoder_layers lowerCAmelCase_ :Dict = decoder_attention_heads lowerCAmelCase_ :Dict = dropout lowerCAmelCase_ :Tuple = attention_dropout lowerCAmelCase_ :Union[str, Any] = activation_dropout lowerCAmelCase_ :Any = activation_function lowerCAmelCase_ :List[str] = init_std lowerCAmelCase_ :Optional[int] = init_xavier_std lowerCAmelCase_ :int = encoder_layerdrop lowerCAmelCase_ :Union[str, Any] = decoder_layerdrop lowerCAmelCase_ :List[str] = encoder_layers lowerCAmelCase_ :Union[str, Any] = auxiliary_loss lowerCAmelCase_ :str = position_embedding_type lowerCAmelCase_ :List[Any] = backbone lowerCAmelCase_ :str = use_pretrained_backbone lowerCAmelCase_ :str = dilation # Hungarian matcher lowerCAmelCase_ :List[Any] = class_cost lowerCAmelCase_ :Union[str, Any] = bbox_cost lowerCAmelCase_ :Tuple = giou_cost # Loss coefficients lowerCAmelCase_ :Optional[int] = mask_loss_coefficient lowerCAmelCase_ :Union[str, Any] = dice_loss_coefficient lowerCAmelCase_ :Tuple = bbox_loss_coefficient lowerCAmelCase_ :Tuple = giou_loss_coefficient lowerCAmelCase_ :Dict = eos_coefficient super().__init__(is_encoder_decoder=__A , **__A ) @property def __lowerCAmelCase ( self ) -> int: return self.encoder_attention_heads @property def __lowerCAmelCase ( self ) -> int: return self.d_model @classmethod def __lowerCAmelCase ( cls , __A , **__A ) -> Any: return cls(backbone_config=__A , **__A ) def __lowerCAmelCase ( self ) -> Dict[str, any]: lowerCAmelCase_ :List[str] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: lowerCAmelCase_ :Dict = self.backbone_config.to_dict() lowerCAmelCase_ :str = self.__class__.model_type return output class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :List[Any] = version.parse("1.11" ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def __lowerCAmelCase ( self ) -> float: return 1E-5 @property def __lowerCAmelCase ( self ) -> int: return 12
1
0
"""simple docstring""" import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { '''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''', '''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''', } class _SCREAMING_SNAKE_CASE ( a__ ): UpperCAmelCase_ :Union[str, Any] = 'encodec' def __init__( self , __A=[1.5, 3.0, 6.0, 12.0, 24.0] , __A=2_4000 , __A=1 , __A=False , __A=None , __A=None , __A=128 , __A=32 , __A=1 , __A=[8, 5, 4, 2] , __A="weight_norm" , __A=7 , __A=7 , __A=3 , __A=2 , __A=True , __A="reflect" , __A=2 , __A=2 , __A=1.0 , __A=1024 , __A=None , __A=True , **__A , ) -> Dict: lowerCAmelCase_ :List[Any] = target_bandwidths lowerCAmelCase_ :List[str] = sampling_rate lowerCAmelCase_ :List[Any] = audio_channels lowerCAmelCase_ :Optional[int] = normalize lowerCAmelCase_ :int = chunk_length_s lowerCAmelCase_ :Optional[int] = overlap lowerCAmelCase_ :int = hidden_size lowerCAmelCase_ :Tuple = num_filters lowerCAmelCase_ :Any = num_residual_layers lowerCAmelCase_ :Optional[Any] = upsampling_ratios lowerCAmelCase_ :Any = norm_type lowerCAmelCase_ :Any = kernel_size lowerCAmelCase_ :int = last_kernel_size lowerCAmelCase_ :int = residual_kernel_size lowerCAmelCase_ :Union[str, Any] = dilation_growth_rate lowerCAmelCase_ :Optional[int] = use_causal_conv lowerCAmelCase_ :List[Any] = pad_mode lowerCAmelCase_ :Tuple = compress lowerCAmelCase_ :int = num_lstm_layers lowerCAmelCase_ :str = trim_right_ratio lowerCAmelCase_ :Tuple = codebook_size lowerCAmelCase_ :List[str] = codebook_dim if codebook_dim is not None else hidden_size lowerCAmelCase_ :int = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" ) super().__init__(**_lowerCamelCase ) @property def __lowerCAmelCase ( self ) -> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def __lowerCAmelCase ( self ) -> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :List[Any] = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def __lowerCAmelCase ( self ) -> int: return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
368
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __UpperCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['DeiTFeatureExtractor'] __UpperCAmelCase = ['DeiTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DeiTForImageClassification', 'DeiTForImageClassificationWithTeacher', 'DeiTForMaskedImageModeling', 'DeiTModel', 'DeiTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDeiTForImageClassification', 'TFDeiTForImageClassificationWithTeacher', 'TFDeiTForMaskedImageModeling', 'TFDeiTModel', 'TFDeiTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
1
0
"""simple docstring""" class _SCREAMING_SNAKE_CASE : def __init__( self , __A ) -> List[str]: # we need a list not a string, so do something to change the type lowerCAmelCase_ :Any = arr.split(""",""" ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Optional[Any] = [int(self.array[0] )] * len(self.array ) lowerCAmelCase_ :Union[str, Any] = [int(self.array[0] )] * len(self.array ) for i in range(1 , len(self.array ) ): lowerCAmelCase_ :Optional[int] = max( int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) ) lowerCAmelCase_ :int = max(sum_value[i] , rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": __UpperCAmelCase = input('please input some numbers:') __UpperCAmelCase = SubArray(whole_array) __UpperCAmelCase = array.solve_sub_array() print(('the results is:', re))
369
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { 'configuration_squeezebert': [ 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SqueezeBertConfig', 'SqueezeBertOnnxConfig', ], 'tokenization_squeezebert': ['SqueezeBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['SqueezeBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertModule', 'SqueezeBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
1
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = "โ–" __UpperCAmelCase = {"vocab_file": "sentencepiece.bpe.model"} __UpperCAmelCase = { "vocab_file": { "facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model", } } __UpperCAmelCase = { "facebook/xglm-564M": 20_48, } class _SCREAMING_SNAKE_CASE ( _UpperCAmelCase ): UpperCAmelCase_ :int = VOCAB_FILES_NAMES UpperCAmelCase_ :List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ :List[str] = ["input_ids", "attention_mask"] def __init__( self , __A , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A = None , **__A , ) -> List[str]: lowerCAmelCase_ :Dict = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer lowerCAmelCase_ :List[str] = 7 lowerCAmelCase_ :Union[str, Any] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )] lowerCAmelCase_ :Optional[Any] = kwargs.get("""additional_special_tokens""" , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , ) lowerCAmelCase_ :Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowercase_ ) ) lowerCAmelCase_ :str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | 'โ–' | 's' | 'โ–de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | 'โ–' | 's' | 'โ–de' | '-' | 'โ–a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowerCAmelCase_ :Optional[Any] = 1 # Mimic fairseq token-to-id alignment for the first 4 token lowerCAmelCase_ :int = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} lowerCAmelCase_ :Dict = len(self.sp_model ) lowerCAmelCase_ :Dict = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(lowercase_ ) lowerCAmelCase_ :Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Optional[Any]: lowerCAmelCase_ :Optional[Any] = self.__dict__.copy() lowerCAmelCase_ :int = None lowerCAmelCase_ :Optional[Any] = self.sp_model.serialized_model_proto() return state def __setstate__( self , __A ) -> int: lowerCAmelCase_ :int = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowerCAmelCase_ :Union[str, Any] = {} lowerCAmelCase_ :List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __lowerCAmelCase ( self , __A , __A = None ) -> str: if token_ids_a is None: return [self.sep_token_id] + token_ids_a lowerCAmelCase_ :str = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def __lowerCAmelCase ( self , __A , __A = None , __A = False ) -> Optional[Any]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ ) if token_ids_a is None: return [1] + ([0] * len(lowercase_ )) return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) def __lowerCAmelCase ( self , __A , __A = None ) -> str: lowerCAmelCase_ :Tuple = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def __lowerCAmelCase ( self ) -> Optional[Any]: return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :str = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __lowerCAmelCase ( self , __A ) -> Union[str, Any]: return self.sp_model.encode(lowercase_ , out_type=lowercase_ ) def __lowerCAmelCase ( self , __A ) -> Dict: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowerCAmelCase_ :int = self.sp_model.PieceToId(lowercase_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __lowerCAmelCase ( self , __A ) -> Union[str, Any]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: lowerCAmelCase_ :str = """""".join(lowercase_ ).replace(lowercase_ , """ """ ).strip() return out_string def __lowerCAmelCase ( self , __A , __A = None ) -> Optional[int]: if not os.path.isdir(lowercase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase_ :Optional[int] = os.path.join( lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase_ , """wb""" ) as fi: lowerCAmelCase_ :List[Any] = self.sp_model.serialized_model_proto() fi.write(lowercase_ ) return (out_vocab_file,)
370
"""simple docstring""" __UpperCAmelCase = 2_56 # Modulus to hash a string __UpperCAmelCase = 1_00_00_03 def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool: '''simple docstring''' lowerCAmelCase_ :Tuple = len(lowercase__ ) lowerCAmelCase_ :List[str] = len(lowercase__ ) if p_len > t_len: return False lowerCAmelCase_ :List[str] = 0 lowerCAmelCase_ :Optional[int] = 0 lowerCAmelCase_ :Any = 1 # Calculating the hash of pattern and substring of text for i in range(lowercase__ ): lowerCAmelCase_ :int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus lowerCAmelCase_ :Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue lowerCAmelCase_ :Optional[Any] = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash lowerCAmelCase_ :Any = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def _snake_case ( ) -> None: '''simple docstring''' lowerCAmelCase_ :int = """abc1abc12""" lowerCAmelCase_ :Dict = """alskfjaldsabc1abc1abc12k23adsfabcabc""" lowerCAmelCase_ :int = """alskfjaldsk23adsfabcabc""" assert rabin_karp(lowercase__ , lowercase__ ) and not rabin_karp(lowercase__ , lowercase__ ) # Test 2) lowerCAmelCase_ :Dict = """ABABX""" lowerCAmelCase_ :int = """ABABZABABYABABX""" assert rabin_karp(lowercase__ , lowercase__ ) # Test 3) lowerCAmelCase_ :Union[str, Any] = """AAAB""" lowerCAmelCase_ :List[str] = """ABAAAAAB""" assert rabin_karp(lowercase__ , lowercase__ ) # Test 4) lowerCAmelCase_ :Dict = """abcdabcy""" lowerCAmelCase_ :Union[str, Any] = """abcxabcdabxabcdabcdabcy""" assert rabin_karp(lowercase__ , lowercase__ ) # Test 5) lowerCAmelCase_ :Optional[int] = """Lรผ""" lowerCAmelCase_ :Optional[int] = """Lรผsai""" assert rabin_karp(lowercase__ , lowercase__ ) lowerCAmelCase_ :Optional[int] = """Lue""" assert not rabin_karp(lowercase__ , lowercase__ ) print("""Success.""" ) if __name__ == "__main__": test_rabin_karp()
1
0
"""simple docstring""" import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class _SCREAMING_SNAKE_CASE : def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=True , __A=False , __A=False , __A=False , __A=2 , __A=99 , __A=0 , __A=32 , __A=5 , __A=4 , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.0_2 , __A=2 , __A=4 , __A="last" , __A=True , __A=None , __A=0 , ) -> Union[str, Any]: lowerCAmelCase_ :str = parent lowerCAmelCase_ :List[str] = batch_size lowerCAmelCase_ :Any = seq_length lowerCAmelCase_ :Any = is_training lowerCAmelCase_ :List[Any] = use_input_lengths lowerCAmelCase_ :Optional[Any] = use_token_type_ids lowerCAmelCase_ :Optional[Any] = use_labels lowerCAmelCase_ :List[Any] = gelu_activation lowerCAmelCase_ :Union[str, Any] = sinusoidal_embeddings lowerCAmelCase_ :Optional[Any] = causal lowerCAmelCase_ :List[str] = asm lowerCAmelCase_ :Dict = n_langs lowerCAmelCase_ :Any = vocab_size lowerCAmelCase_ :str = n_special lowerCAmelCase_ :Tuple = hidden_size lowerCAmelCase_ :Optional[Any] = num_hidden_layers lowerCAmelCase_ :List[Any] = num_attention_heads lowerCAmelCase_ :int = hidden_dropout_prob lowerCAmelCase_ :str = attention_probs_dropout_prob lowerCAmelCase_ :Tuple = max_position_embeddings lowerCAmelCase_ :List[str] = type_sequence_label_size lowerCAmelCase_ :Tuple = initializer_range lowerCAmelCase_ :Optional[Any] = num_labels lowerCAmelCase_ :List[Any] = num_choices lowerCAmelCase_ :int = summary_type lowerCAmelCase_ :Union[str, Any] = use_proj lowerCAmelCase_ :Any = scope lowerCAmelCase_ :Optional[int] = bos_token_id def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase_ :Dict = None if self.use_input_lengths: lowerCAmelCase_ :Dict = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCAmelCase_ :Any = None if self.use_token_type_ids: lowerCAmelCase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowerCAmelCase_ :Optional[Any] = None lowerCAmelCase_ :Any = None lowerCAmelCase_ :List[str] = None if self.use_labels: lowerCAmelCase_ :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase_ :str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase_ :List[Any] = ids_tensor([self.batch_size] , 2 ).float() lowerCAmelCase_ :str = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase_ :List[Any] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __lowerCAmelCase ( self ) -> List[str]: return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) -> List[str]: lowerCAmelCase_ :Optional[int] = XLMModel(config=_a ) model.to(_a ) model.eval() lowerCAmelCase_ :Dict = model(_a , lengths=_a , langs=_a ) lowerCAmelCase_ :Tuple = model(_a , langs=_a ) lowerCAmelCase_ :Any = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) -> Optional[int]: lowerCAmelCase_ :Tuple = XLMWithLMHeadModel(_a ) model.to(_a ) model.eval() lowerCAmelCase_ :List[Any] = model(_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) -> List[Any]: lowerCAmelCase_ :List[Any] = XLMForQuestionAnsweringSimple(_a ) model.to(_a ) model.eval() lowerCAmelCase_ :Any = model(_a ) lowerCAmelCase_ :Dict = model(_a , start_positions=_a , end_positions=_a ) lowerCAmelCase_ :Optional[int] = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) -> Optional[Any]: lowerCAmelCase_ :Union[str, Any] = XLMForQuestionAnswering(_a ) model.to(_a ) model.eval() lowerCAmelCase_ :Dict = model(_a ) lowerCAmelCase_ :Tuple = model( _a , start_positions=_a , end_positions=_a , cls_index=_a , is_impossible=_a , p_mask=_a , ) lowerCAmelCase_ :List[str] = model( _a , start_positions=_a , end_positions=_a , cls_index=_a , is_impossible=_a , ) ((lowerCAmelCase_ ) , ) :Union[str, Any] = result_with_labels.to_tuple() lowerCAmelCase_ :int = model(_a , start_positions=_a , end_positions=_a ) ((lowerCAmelCase_ ) , ) :Tuple = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) -> int: lowerCAmelCase_ :List[Any] = XLMForSequenceClassification(_a ) model.to(_a ) model.eval() lowerCAmelCase_ :Union[str, Any] = model(_a ) lowerCAmelCase_ :Tuple = model(_a , labels=_a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) -> Any: lowerCAmelCase_ :Optional[int] = self.num_labels lowerCAmelCase_ :int = XLMForTokenClassification(_a ) model.to(_a ) model.eval() lowerCAmelCase_ :Union[str, Any] = model(_a , attention_mask=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) -> Tuple: lowerCAmelCase_ :List[Any] = self.num_choices lowerCAmelCase_ :Optional[Any] = XLMForMultipleChoice(config=_a ) model.to(_a ) model.eval() lowerCAmelCase_ :Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase_ :List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase_ :str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase_ :Any = model( _a , attention_mask=_a , token_type_ids=_a , labels=_a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :int = self.prepare_config_and_inputs() ( ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ) :Optional[Any] = config_and_inputs lowerCAmelCase_ :int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , __lowercase , unittest.TestCase ): UpperCAmelCase_ :List[Any] = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) UpperCAmelCase_ :Tuple = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable UpperCAmelCase_ :Dict = ( { "feature-extraction": XLMModel, "fill-mask": XLMWithLMHeadModel, "question-answering": XLMForQuestionAnsweringSimple, "text-classification": XLMForSequenceClassification, "text-generation": XLMWithLMHeadModel, "token-classification": XLMForTokenClassification, "zero-shot": XLMForSequenceClassification, } if is_torch_available() else {} ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A ) -> Optional[int]: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __lowerCAmelCase ( self , __A , __A , __A=False ) -> str: lowerCAmelCase_ :List[Any] = super()._prepare_for_class(_a , _a , return_labels=_a ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": lowerCAmelCase_ :List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_a ) lowerCAmelCase_ :Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_a ) return inputs_dict def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :List[Any] = XLMModelTester(self ) lowerCAmelCase_ :Optional[int] = ConfigTester(self , config_class=_a , emb_dim=37 ) def __lowerCAmelCase ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*_a ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*_a ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*_a ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*_a ) def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*_a ) def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*_a ) def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*_a ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A=False , __A=1 ) -> List[str]: self.assertIsInstance(_a , _a ) self.assertListEqual( [isinstance(_a , _a ) for iter_attentions in attentions] , [True] * len(_a ) ) self.assertEqual(len(_a ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(_a ): # adds PAD dummy token lowerCAmelCase_ :int = min_length + idx + 1 lowerCAmelCase_ :Dict = min_length + idx + 1 lowerCAmelCase_ :str = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_a ) ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A=False , __A=1 ) -> str: self.assertIsInstance(_a , _a ) self.assertListEqual( [isinstance(_a , _a ) for iter_hidden_states in hidden_states] , [True] * len(_a ) , ) self.assertEqual(len(_a ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(_a ): # adds PAD dummy token lowerCAmelCase_ :int = min_length + idx + 1 lowerCAmelCase_ :Dict = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_a ) , ) pass @slow def __lowerCAmelCase ( self ) -> Dict: for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ :Optional[Any] = XLMModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Any = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" ) model.to(_a ) lowerCAmelCase_ :Optional[Any] = torch.tensor([[14, 447]] , dtype=torch.long , device=_a ) # the president lowerCAmelCase_ :List[str] = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference lowerCAmelCase_ :List[Any] = model.generate(_a , do_sample=_a ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _a )
371
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __UpperCAmelCase = 16 __UpperCAmelCase = 32 def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 ) -> str: '''simple docstring''' lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowercase__ : int ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase_ :Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCAmelCase_ :Optional[Any] = datasets.map( lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase_ :str = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowercase__ : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCAmelCase_ :int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCAmelCase_ :List[Any] = 1_6 elif accelerator.mixed_precision != "no": lowerCAmelCase_ :List[str] = 8 else: lowerCAmelCase_ :Optional[int] = None return tokenizer.pad( lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , ) # Instantiate dataloaders. lowerCAmelCase_ :Optional[Any] = DataLoader( tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) lowerCAmelCase_ :List[Any] = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __UpperCAmelCase = mocked_dataloaders # noqa: F811 def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[int] ) -> Optional[Any]: '''simple docstring''' if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1": lowerCAmelCase_ :Optional[Any] = 2 # New Code # lowerCAmelCase_ :List[str] = int(args.gradient_accumulation_steps ) lowerCAmelCase_ :int = int(args.local_sgd_steps ) # Initialize accelerator lowerCAmelCase_ :str = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase_ :int = config["""lr"""] lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] ) lowerCAmelCase_ :int = int(config["""seed"""] ) lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] ) lowerCAmelCase_ :Union[str, Any] = evaluate.load("""glue""" , """mrpc""" ) set_seed(lowercase__ ) lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = get_dataloaders(lowercase__ , lowercase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase_ :Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCAmelCase_ :Union[str, Any] = model.to(accelerator.device ) # Instantiate optimizer lowerCAmelCase_ :Optional[Any] = AdamW(params=model.parameters() , lr=lowercase__ ) # Instantiate scheduler lowerCAmelCase_ :Union[str, Any] = get_linear_schedule_with_warmup( optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.prepare( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Now we train the model for epoch in range(lowercase__ ): model.train() with LocalSGD( accelerator=lowercase__ , model=lowercase__ , local_sgd_steps=lowercase__ , enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowercase__ ): lowerCAmelCase_ :str = model(**lowercase__ ) lowerCAmelCase_ :Optional[int] = output.loss accelerator.backward(lowercase__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase_ :Optional[int] = model(**lowercase__ ) lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 ) lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowercase__ , references=lowercase__ , ) lowerCAmelCase_ :Any = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , lowercase__ ) def _snake_case ( ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :str = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=lowercase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument( """--local_sgd_steps""" , type=lowercase__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) lowerCAmelCase_ :Optional[Any] = parser.parse_args() lowerCAmelCase_ :Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6} training_function(lowercase__ , lowercase__ ) if __name__ == "__main__": main()
1
0
"""simple docstring""" import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def _snake_case ( lowercase__ : str ) -> str: '''simple docstring''' def wrapper(*lowercase__ : Optional[int] , **lowercase__ : List[str] ): lowerCAmelCase_ :Dict = timeit.default_timer() lowerCAmelCase_ :Any = func(*__lowerCamelCase , **__lowerCamelCase ) lowerCAmelCase_ :Dict = timeit.default_timer() - starttime return delta lowerCAmelCase_ :Dict = func.__name__ return wrapper def _snake_case ( lowercase__ : Optional[int] , lowercase__ : List[str]=1_0_0 , lowercase__ : Dict=None ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :Dict = [] lowerCAmelCase_ :List[str] = seq_shapes or {} for i in range(__lowerCamelCase ): lowerCAmelCase_ :List[str] = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(__lowerCamelCase , _ArrayXD ): lowerCAmelCase_ :int = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(__lowerCamelCase , datasets.Value ): if v.dtype == "string": lowerCAmelCase_ :Union[str, Any] = "The small grey turtle was surprisingly fast when challenged." else: lowerCAmelCase_ :Dict = np.random.randint(1_0 , size=1 ).astype(v.dtype ).item() elif isinstance(__lowerCamelCase , datasets.Sequence ): while isinstance(__lowerCamelCase , datasets.Sequence ): lowerCAmelCase_ :Dict = v.feature lowerCAmelCase_ :str = seq_shapes[k] lowerCAmelCase_ :List[Any] = np.random.rand(*__lowerCamelCase ).astype(v.dtype ) lowerCAmelCase_ :Optional[Any] = data dummy_data.append((i, example) ) return dummy_data def _snake_case ( lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : int=1_0_0 , lowercase__ : Any=None ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :List[Any] = generate_examples(__lowerCamelCase , num_examples=__lowerCamelCase , seq_shapes=__lowerCamelCase ) with ArrowWriter(features=__lowerCamelCase , path=__lowerCamelCase ) as writer: for key, record in dummy_data: lowerCAmelCase_ :Optional[int] = features.encode_example(__lowerCamelCase ) writer.write(__lowerCamelCase ) lowerCAmelCase_ :Dict = writer.finalize() if not num_final_examples == num_examples: raise ValueError( f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" ) lowerCAmelCase_ :Union[str, Any] = datasets.Dataset.from_file(filename=__lowerCamelCase , info=datasets.DatasetInfo(features=__lowerCamelCase ) ) return dataset
350
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __UpperCAmelCase = 16 __UpperCAmelCase = 32 def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 , lowercase__ : str = "bert-base-cased" ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained(lowercase__ ) lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowercase__ : List[str] ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase_ :str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCAmelCase_ :str = datasets.map( lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase_ :List[str] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowercase__ : Union[str, Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" ) return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. lowerCAmelCase_ :Optional[int] = DataLoader( tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) lowerCAmelCase_ :Any = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) return train_dataloader, eval_dataloader def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : int ) -> List[str]: '''simple docstring''' model.eval() lowerCAmelCase_ :Dict = 0 for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase_ :Optional[int] = model(**lowercase__ ) lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(lowercase__ ) - 1: lowerCAmelCase_ :Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowerCAmelCase_ :Any = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=lowercase__ , references=lowercase__ , ) lowerCAmelCase_ :Tuple = metric.compute() return eval_metric["accuracy"] def _snake_case ( lowercase__ : str , lowercase__ : List[str] ) -> Any: '''simple docstring''' lowerCAmelCase_ :Optional[int] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase_ :int = config["""lr"""] lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] ) lowerCAmelCase_ :Optional[int] = int(config["""seed"""] ) lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] ) lowerCAmelCase_ :Optional[Any] = args.model_name_or_path set_seed(lowercase__ ) lowerCAmelCase_ , lowerCAmelCase_ :Dict = get_dataloaders(lowercase__ , lowercase__ , lowercase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase_ :str = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ ) # Instantiate optimizer lowerCAmelCase_ :List[str] = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowerCAmelCase_ :str = optimizer_cls(params=model.parameters() , lr=lowercase__ ) if accelerator.state.deepspeed_plugin is not None: lowerCAmelCase_ :Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: lowerCAmelCase_ :Any = 1 lowerCAmelCase_ :str = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowerCAmelCase_ :List[str] = get_linear_schedule_with_warmup( optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , ) else: lowerCAmelCase_ :int = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.prepare( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # We need to keep track of how many total steps we have iterated over lowerCAmelCase_ :List[str] = 0 # We also need to keep track of the stating epoch so files are named properly lowerCAmelCase_ :List[Any] = 0 lowerCAmelCase_ :str = evaluate.load("""glue""" , """mrpc""" ) lowerCAmelCase_ :Optional[Any] = num_epochs if args.partial_train_epoch is not None: lowerCAmelCase_ :Dict = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) lowerCAmelCase_ :Optional[Any] = args.resume_from_checkpoint.split("""epoch_""" )[1] lowerCAmelCase_ :int = """""" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break lowerCAmelCase_ :Union[str, Any] = int(lowercase__ ) + 1 lowerCAmelCase_ :Optional[int] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) accelerator.print("""resumed checkpoint performance:""" , lowercase__ ) accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] ) accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] ) with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f: lowerCAmelCase_ :List[str] = json.load(lowercase__ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model lowerCAmelCase_ :List[Any] = {} for epoch in range(lowercase__ , lowercase__ ): model.train() for step, batch in enumerate(lowercase__ ): lowerCAmelCase_ :Optional[int] = model(**lowercase__ ) lowerCAmelCase_ :Dict = outputs.loss lowerCAmelCase_ :int = loss / gradient_accumulation_steps accelerator.backward(lowercase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 lowerCAmelCase_ :List[str] = f"""epoch_{epoch}""" lowerCAmelCase_ :Any = os.path.join(args.output_dir , lowercase__ ) accelerator.save_state(lowercase__ ) lowerCAmelCase_ :List[Any] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) lowerCAmelCase_ :Union[str, Any] = accuracy lowerCAmelCase_ :Any = lr_scheduler.get_lr()[0] lowerCAmelCase_ :str = optimizer.param_groups[0]["""lr"""] lowerCAmelCase_ :List[Any] = epoch lowerCAmelCase_ :Tuple = overall_step accelerator.print(f"""epoch {epoch}:""" , lowercase__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) def _snake_case ( ) -> int: '''simple docstring''' lowerCAmelCase_ :List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , ) parser.add_argument( """--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--resume_from_checkpoint""" , type=lowercase__ , default=lowercase__ , help="""If the training should continue from a checkpoint folder.""" , ) parser.add_argument( """--partial_train_epoch""" , type=lowercase__ , default=lowercase__ , help="""If passed, the training will stop after this number of epochs.""" , ) parser.add_argument( """--num_epochs""" , type=lowercase__ , default=2 , help="""Number of train epochs.""" , ) lowerCAmelCase_ :Optional[int] = parser.parse_args() lowerCAmelCase_ :List[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6} training_function(lowercase__ , lowercase__ ) if __name__ == "__main__": main()
1
0
import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def _snake_case ( ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :int = torch.nn.Linear(2 , 4 ) lowerCAmelCase_ :Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 ) lowerCAmelCase_ :Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 ) lowerCAmelCase_ :List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) lowerCAmelCase_ :Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def _snake_case ( lowercase__ : str ) -> Optional[Any]: '''simple docstring''' return (model.weight.abs().sum() + model.bias.abs().sum()).item() def _snake_case ( lowercase__ : Union[str, Any] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(_snake_case ) class _SCREAMING_SNAKE_CASE ( __snake_case ): @require_cuda def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Optional[int] = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(a_ ): lowerCAmelCase_ :Any = Accelerator(cpu=a_ ) def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Optional[int] = Accelerator() lowerCAmelCase_ :Optional[int] = GradientState() assert state.num_steps == 1 lowerCAmelCase_ :str = 4 assert state.num_steps == 4 assert state.sync_gradients is True lowerCAmelCase_ :List[Any] = False assert state.sync_gradients is False GradientState._reset_state() def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :Optional[Any] = Accelerator() lowerCAmelCase_ :Optional[Any] = create_components() ( lowerCAmelCase_ ) :Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Tuple = Accelerator() lowerCAmelCase_ :Union[str, Any] = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def __lowerCAmelCase ( self ) -> List[Any]: PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*__A , **__A ): pass with patch("""torch.cuda.set_device""" , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ): lowerCAmelCase_ :List[Any] = Accelerator() self.assertEqual(str(accelerator.state.device ) , """cuda:64""" ) def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ :Optional[int] = Accelerator() lowerCAmelCase_ :str = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) lowerCAmelCase_ :Any = get_signature(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # make sure loaded weights match accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ :int = Accelerator() lowerCAmelCase_ :str = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) lowerCAmelCase_ :List[Any] = get_signature(a_ ) # saving hook def save_config(__A , __A , __A ): lowerCAmelCase_ :Optional[Any] = {'''class_name''': models[0].__class__.__name__} with open(os.path.join(a_ , """data.json""" ) , """w""" ) as f: json.dump(a_ , a_ ) # loading hook def load_config(__A , __A ): with open(os.path.join(a_ , """data.json""" ) , """r""" ) as f: lowerCAmelCase_ :Any = json.load(a_ ) lowerCAmelCase_ :List[str] = config['''class_name'''] lowerCAmelCase_ :str = accelerator.register_save_state_pre_hook(a_ ) lowerCAmelCase_ :Union[str, Any] = accelerator.register_load_state_pre_hook(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match with hooks load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # random class name to verify correct one is loaded lowerCAmelCase_ :Any = '''random''' # make sure loaded weights match with hooks accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match with hooks removed load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # random class name to verify correct one is loaded lowerCAmelCase_ :Union[str, Any] = '''random''' # make sure loaded weights match with hooks removed accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :List[Any] = Accelerator() lowerCAmelCase_ :Tuple = create_components() lowerCAmelCase_ :Union[str, Any] = None # This should work lowerCAmelCase_ :Tuple = accelerator.prepare( a_ , a_ , a_ , a_ , a_ , a_ ) self.assertTrue(dummy_obj is None ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :str = Accelerator() lowerCAmelCase_ :Optional[Any] = create_components() lowerCAmelCase_ :Optional[int] = [1, 2, 3] # This should work lowerCAmelCase_ :str = accelerator.prepare( a_ , a_ , a_ , a_ , a_ , a_ ) self.assertEqual( getattr(a_ , """_is_accelerate_prepared""" , a_ ) , a_ , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , ) self.assertEqual( getattr(a_ , """_is_accelerate_prepared""" , a_ ) , a_ , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , ) self.assertEqual( getattr(a_ , """_is_accelerate_prepared""" , a_ ) , a_ , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , ) self.assertEqual( getattr(a_ , """_is_accelerate_prepared""" , a_ ) , a_ , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , ) self.assertEqual( getattr(a_ , """_is_accelerate_prepared""" , a_ ) , a_ , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , ) self.assertEqual( getattr(a_ , """_is_accelerate_prepared""" , a_ ) , a_ , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , ) @slow @require_bnb def __lowerCAmelCase ( self ) -> Optional[Any]: from transformers import AutoModelForCausalLM lowerCAmelCase_ :Dict = AutoModelForCausalLM.from_pretrained( """EleutherAI/gpt-neo-125m""" , load_in_abit=a_ , device_map={"""""": 0} , ) lowerCAmelCase_ :Optional[Any] = Accelerator() # This should work lowerCAmelCase_ :Any = accelerator.prepare(a_ ) @slow @require_bnb def __lowerCAmelCase ( self ) -> int: from transformers import AutoModelForCausalLM lowerCAmelCase_ :Any = Accelerator() with init_empty_weights(): lowerCAmelCase_ :List[str] = AutoModelForCausalLM.from_pretrained( """EleutherAI/gpt-neo-125m""" , ) model.tie_weights() lowerCAmelCase_ :Union[str, Any] = infer_auto_device_map(a_ ) lowerCAmelCase_ :str = '''cpu''' lowerCAmelCase_ :Optional[int] = AutoModelForCausalLM.from_pretrained( """EleutherAI/gpt-neo-125m""" , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ ) # This should not work and get value error with self.assertRaises(a_ ): lowerCAmelCase_ :Dict = accelerator.prepare(a_ ) @slow @require_bnb @require_multi_gpu def __lowerCAmelCase ( self ) -> List[str]: from transformers import AutoModelForCausalLM lowerCAmelCase_ :str = {'''distributed_type''': DistributedType.MULTI_GPU} with init_empty_weights(): lowerCAmelCase_ :Any = AutoModelForCausalLM.from_pretrained( """EleutherAI/gpt-neo-125m""" , ) model.tie_weights() lowerCAmelCase_ :List[Any] = infer_auto_device_map(a_ ) lowerCAmelCase_ :Dict = 1 lowerCAmelCase_ :str = AutoModelForCausalLM.from_pretrained( """EleutherAI/gpt-neo-125m""" , load_in_abit=a_ , device_map=a_ , ) lowerCAmelCase_ :Any = Accelerator() # This should not work and get value error with self.assertRaises(a_ ): lowerCAmelCase_ :Tuple = accelerator.prepare(a_ ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def __lowerCAmelCase ( self ) -> Union[str, Any]: from transformers import AutoModelForCausalLM with init_empty_weights(): lowerCAmelCase_ :Dict = AutoModelForCausalLM.from_pretrained( """EleutherAI/gpt-neo-125m""" , ) lowerCAmelCase_ :Tuple = infer_auto_device_map(a_ ) lowerCAmelCase_ :Tuple = 1 lowerCAmelCase_ :List[Any] = AutoModelForCausalLM.from_pretrained( """EleutherAI/gpt-neo-125m""" , load_in_abit=a_ , device_map=a_ , ) lowerCAmelCase_ :Tuple = Accelerator() # This should work lowerCAmelCase_ :Dict = accelerator.prepare(a_ ) @require_cuda def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :List[Any] = torch.nn.Linear(10 , 10 ) lowerCAmelCase_ :List[str] = torch.optim.SGD(model.parameters() , lr=0.0_1 ) lowerCAmelCase_ :Optional[Any] = Accelerator(cpu=a_ ) lowerCAmelCase_ :str = accelerator.prepare(a_ )
351
"""simple docstring""" import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class _SCREAMING_SNAKE_CASE : def __init__( self , __A ) -> Union[str, Any]: if isinstance(__A , __A ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden lowerCAmelCase_ :Tuple = deepcopy(__A ) elif os.path.exists(__A ): with io.open(__A , """r""" , encoding="""utf-8""" ) as f: lowerCAmelCase_ :str = json.load(__A ) else: try: lowerCAmelCase_ :Dict = baseaa.urlsafe_baadecode(__A ).decode("""utf-8""" ) lowerCAmelCase_ :int = json.loads(__A ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" ) lowerCAmelCase_ :Optional[Any] = config self.set_stage_and_offload() def __lowerCAmelCase ( self ) -> Tuple: # zero stage - this is done as early as possible, before model is created, to allow # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object # during ``zero.Init()`` which needs to know the dtype, and some other hparams. lowerCAmelCase_ :Tuple = self.get_value("""zero_optimization.stage""" , -1 ) # offload lowerCAmelCase_ :Dict = False if self.is_zeroa() or self.is_zeroa(): lowerCAmelCase_ :Optional[int] = set(["""cpu""", """nvme"""] ) lowerCAmelCase_ :Union[str, Any] = set( [ self.get_value("""zero_optimization.offload_optimizer.device""" ), self.get_value("""zero_optimization.offload_param.device""" ), ] ) if len(offload_devices & offload_devices_valid ) > 0: lowerCAmelCase_ :Optional[int] = True def __lowerCAmelCase ( self , __A ) -> Optional[Any]: lowerCAmelCase_ :str = self.config # find the config node of interest if it exists lowerCAmelCase_ :Tuple = ds_key_long.split(""".""" ) lowerCAmelCase_ :List[str] = nodes.pop() for node in nodes: lowerCAmelCase_ :Tuple = config.get(__A ) if config is None: return None, ds_key return config, ds_key def __lowerCAmelCase ( self , __A , __A=None ) -> Optional[Any]: lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.find_config_node(__A ) if config is None: return default return config.get(__A , __A ) def __lowerCAmelCase ( self , __A , __A=False ) -> Optional[Any]: lowerCAmelCase_ :Tuple = self.config # find the config node of interest if it exists lowerCAmelCase_ :Union[str, Any] = ds_key_long.split(""".""" ) for node in nodes: lowerCAmelCase_ :int = config lowerCAmelCase_ :Any = config.get(__A ) if config is None: if must_exist: raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" ) else: return # if found remove it if parent_config is not None: parent_config.pop(__A ) def __lowerCAmelCase ( self , __A ) -> Union[str, Any]: lowerCAmelCase_ :Optional[int] = self.get_value(__A ) return False if value is None else bool(__A ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: lowerCAmelCase_ :List[str] = self.get_value(__A ) return False if value is None else not bool(__A ) def __lowerCAmelCase ( self ) -> str: return self._stage == 2 def __lowerCAmelCase ( self ) -> Union[str, Any]: return self._stage == 3 def __lowerCAmelCase ( self ) -> Union[str, Any]: return self._offload class _SCREAMING_SNAKE_CASE : def __init__( self , __A ) -> Optional[int]: lowerCAmelCase_ :Dict = engine def __lowerCAmelCase ( self , __A , **__A ) -> str: # runs backpropagation and handles mixed precision self.engine.backward(__A , **__A ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A ) -> List[str]: super().__init__(__A , device_placement=__A , scaler=__A ) lowerCAmelCase_ :List[str] = hasattr(self.optimizer , """overflow""" ) def __lowerCAmelCase ( self , __A=None ) -> Optional[Any]: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def __lowerCAmelCase ( self ) -> List[Any]: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def __lowerCAmelCase ( self ) -> int: if self.__has_overflow__: return self.optimizer.overflow return False class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A , __A ) -> Optional[int]: super().__init__(__A , __A ) def __lowerCAmelCase ( self ) -> Any: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class _SCREAMING_SNAKE_CASE : def __init__( self , __A , __A=0.0_0_1 , __A=0 , **__A ) -> List[Any]: lowerCAmelCase_ :str = params lowerCAmelCase_ :Any = lr lowerCAmelCase_ :List[Any] = weight_decay lowerCAmelCase_ :Any = kwargs class _SCREAMING_SNAKE_CASE : def __init__( self , __A , __A=None , __A=0 , **__A ) -> List[str]: lowerCAmelCase_ :Optional[int] = optimizer lowerCAmelCase_ :int = total_num_steps lowerCAmelCase_ :List[Any] = warmup_num_steps lowerCAmelCase_ :int = kwargs
1
0
"""simple docstring""" import math import sys def _snake_case ( lowercase__ : str ) -> str: '''simple docstring''' lowerCAmelCase_ :Tuple = '' try: with open(_A , """rb""" ) as binary_file: lowerCAmelCase_ :Dict = binary_file.read() for dat in data: lowerCAmelCase_ :Any = f"""{dat:08b}""" result += curr_byte return result except OSError: print("""File not accessible""" ) sys.exit() def _snake_case ( lowercase__ : str ) -> int: '''simple docstring''' lowerCAmelCase_ :List[str] = {'0': '0', '1': '1'} lowerCAmelCase_ :Optional[int] = '', '' lowerCAmelCase_ :Optional[Any] = len(_A ) for i in range(len(_A ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue lowerCAmelCase_ :Any = lexicon[curr_string] result += last_match_id lowerCAmelCase_ :Dict = last_match_id + '0' if math.loga(_A ).is_integer(): lowerCAmelCase_ :str = {} for curr_key in list(_A ): lowerCAmelCase_ :Any = lexicon.pop(_A ) lowerCAmelCase_ :Tuple = new_lex lowerCAmelCase_ :List[str] = last_match_id + '1' index += 1 lowerCAmelCase_ :Dict = '' return result def _snake_case ( lowercase__ : Dict , lowercase__ : int ) -> int: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = 8 try: with open(_A , """wb""" ) as opened_file: lowerCAmelCase_ :Tuple = [ to_write[i : i + byte_length] for i in range(0 , len(_A ) , _A ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append("""10000000""" ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(_A , 2 ).to_bytes(1 , byteorder="""big""" ) ) except OSError: print("""File not accessible""" ) sys.exit() def _snake_case ( lowercase__ : Tuple ) -> Any: '''simple docstring''' lowerCAmelCase_ :Union[str, Any] = 0 for letter in data_bits: if letter == "1": break counter += 1 lowerCAmelCase_ :Optional[Any] = data_bits[counter:] lowerCAmelCase_ :str = data_bits[counter + 1 :] return data_bits def _snake_case ( lowercase__ : Tuple , lowercase__ : Dict ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[str] = read_file_binary(_A ) lowerCAmelCase_ :List[str] = remove_prefix(_A ) lowerCAmelCase_ :str = decompress_data(_A ) write_file_binary(_A , _A ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
352
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Dict = ( "This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image." "It takes two arguments named `image` which should be the original image, and `label` which should be a text " "describing the elements what should be identified in the segmentation mask. The tool returns the mask." ) UpperCAmelCase_ :List[str] = "CIDAS/clipseg-rd64-refined" UpperCAmelCase_ :List[Any] = "image_segmenter" UpperCAmelCase_ :Optional[int] = CLIPSegForImageSegmentation UpperCAmelCase_ :Tuple = ["image", "text"] UpperCAmelCase_ :Dict = ["image"] def __init__( self , *__A , **__A ) -> Optional[Any]: requires_backends(self , ["""vision"""] ) super().__init__(*__A , **__A ) def __lowerCAmelCase ( self , __A , __A ) -> Any: return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" ) def __lowerCAmelCase ( self , __A ) -> Tuple: with torch.no_grad(): lowerCAmelCase_ :Dict = self.model(**__A ).logits return logits def __lowerCAmelCase ( self , __A ) -> Tuple: lowerCAmelCase_ :Optional[int] = outputs.cpu().detach().numpy() lowerCAmelCase_ :List[str] = 0 lowerCAmelCase_ :str = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
1
0
"""simple docstring""" from __future__ import annotations import typing from collections.abc import Iterable import numpy as np __UpperCAmelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 __UpperCAmelCase = typing.Union[np.floataa, int, float] # noqa: UP007 def _snake_case ( lowercase__ : Vector , lowercase__ : Vector ) -> VectorOut: '''simple docstring''' return np.sqrt(np.sum((np.asarray(lowercase__ ) - np.asarray(lowercase__ )) ** 2 ) ) def _snake_case ( lowercase__ : Vector , lowercase__ : Vector ) -> VectorOut: '''simple docstring''' return sum((va - va) ** 2 for va, va in zip(lowercase__ , lowercase__ ) ) ** (1 / 2) if __name__ == "__main__": def _snake_case ( ) -> None: '''simple docstring''' from timeit import timeit print("""Without Numpy""" ) print( timeit( """euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=1_0_0_0_0 , globals=globals() , ) ) print("""With Numpy""" ) print( timeit( """euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=1_0_0_0_0 , globals=globals() , ) ) benchmark()
353
"""simple docstring""" def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int: '''simple docstring''' if index == number_of_items: return 0 lowerCAmelCase_ :Any = 0 lowerCAmelCase_ :str = 0 lowerCAmelCase_ :Dict = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 ) if weights[index] <= max_weight: lowerCAmelCase_ :str = values[index] + knapsack( lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 ) return max(lowercase__ , lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
1
0
"""simple docstring""" def _snake_case ( lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : Dict=None ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :str = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: lowerCAmelCase_ :Any = True, True lowerCAmelCase_ :Optional[Any] = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) return path def _snake_case ( lowercase__ : List[Any] , lowercase__ : Dict ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Any = -1 for i in range(snake_case_ ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 lowerCAmelCase_ :int = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def _snake_case ( lowercase__ : int , lowercase__ : List[Any] ) -> List[str]: '''simple docstring''' lowerCAmelCase_ :int = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] lowerCAmelCase_ :int = check_circuit_or_path(snake_case_ , snake_case_ ) if check == 3: print("""graph is not Eulerian""" ) print("""no path""" ) return lowerCAmelCase_ :str = 1 if check == 2: lowerCAmelCase_ :Optional[Any] = odd_node print("""graph has a Euler path""" ) if check == 1: print("""graph has a Euler cycle""" ) lowerCAmelCase_ :Tuple = dfs(snake_case_ , snake_case_ , snake_case_ ) print(snake_case_ ) def _snake_case ( ) -> Any: '''simple docstring''' lowerCAmelCase_ :Optional[int] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} lowerCAmelCase_ :int = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} lowerCAmelCase_ :Union[str, Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} lowerCAmelCase_ :List[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]} lowerCAmelCase_ :int = { 1: [], 2: [] # all degree is zero } lowerCAmelCase_ :Any = 1_0 check_euler(snake_case_ , snake_case_ ) check_euler(snake_case_ , snake_case_ ) check_euler(snake_case_ , snake_case_ ) check_euler(snake_case_ , snake_case_ ) check_euler(snake_case_ , snake_case_ ) if __name__ == "__main__": main()
354
"""simple docstring""" from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def _snake_case ( lowercase__ : bool = True , *lowercase__ : Optional[int] , **lowercase__ : str ) -> Optional[Any]: '''simple docstring''' if not is_tqdm_available(): raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" ) lowerCAmelCase_ :Tuple = False if main_process_only: lowerCAmelCase_ :Dict = PartialState().local_process_index == 0 return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
1
0
"""simple docstring""" import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class _SCREAMING_SNAKE_CASE : def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.0_2 , __A=3 , __A=4 , __A=None , ) -> str: lowerCAmelCase_ :Union[str, Any] = parent lowerCAmelCase_ :List[Any] = batch_size lowerCAmelCase_ :List[Any] = seq_length lowerCAmelCase_ :List[str] = is_training lowerCAmelCase_ :List[Any] = use_input_mask lowerCAmelCase_ :Any = use_token_type_ids lowerCAmelCase_ :Dict = use_labels lowerCAmelCase_ :str = vocab_size lowerCAmelCase_ :Tuple = hidden_size lowerCAmelCase_ :Any = num_hidden_layers lowerCAmelCase_ :Union[str, Any] = num_attention_heads lowerCAmelCase_ :Any = intermediate_size lowerCAmelCase_ :List[Any] = hidden_act lowerCAmelCase_ :str = hidden_dropout_prob lowerCAmelCase_ :Union[str, Any] = attention_probs_dropout_prob lowerCAmelCase_ :Any = max_position_embeddings lowerCAmelCase_ :Any = type_vocab_size lowerCAmelCase_ :Union[str, Any] = type_sequence_label_size lowerCAmelCase_ :Dict = initializer_range lowerCAmelCase_ :List[str] = num_labels lowerCAmelCase_ :List[Any] = num_choices lowerCAmelCase_ :int = scope def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ :List[Any] = None if self.use_input_mask: lowerCAmelCase_ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase_ :List[str] = None if self.use_token_type_ids: lowerCAmelCase_ :int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase_ :Optional[int] = None lowerCAmelCase_ :List[Any] = None lowerCAmelCase_ :Optional[Any] = None if self.use_labels: lowerCAmelCase_ :int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase_ :Dict = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase_ :Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self ) -> Any: return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[Any]: lowerCAmelCase_ :Optional[Any] = NystromformerModel(config=__A ) model.to(__A ) model.eval() lowerCAmelCase_ :Dict = model(__A , attention_mask=__A , token_type_ids=__A ) lowerCAmelCase_ :Optional[int] = model(__A , token_type_ids=__A ) lowerCAmelCase_ :Tuple = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[int]: lowerCAmelCase_ :Optional[int] = NystromformerForMaskedLM(config=__A ) model.to(__A ) model.eval() lowerCAmelCase_ :Tuple = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Any: lowerCAmelCase_ :Tuple = NystromformerForQuestionAnswering(config=__A ) model.to(__A ) model.eval() lowerCAmelCase_ :Tuple = model( __A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Union[str, Any]: lowerCAmelCase_ :Union[str, Any] = self.num_labels lowerCAmelCase_ :Optional[int] = NystromformerForSequenceClassification(__A ) model.to(__A ) model.eval() lowerCAmelCase_ :Optional[Any] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> str: lowerCAmelCase_ :Union[str, Any] = self.num_labels lowerCAmelCase_ :Union[str, Any] = NystromformerForTokenClassification(config=__A ) model.to(__A ) model.eval() lowerCAmelCase_ :Tuple = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> str: lowerCAmelCase_ :Optional[int] = self.num_choices lowerCAmelCase_ :Tuple = NystromformerForMultipleChoice(config=__A ) model.to(__A ) model.eval() lowerCAmelCase_ :Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase_ :Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase_ :Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase_ :int = model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :Optional[int] = self.prepare_config_and_inputs() ( lowerCAmelCase_ ) :Optional[int] = config_and_inputs lowerCAmelCase_ :int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ): UpperCAmelCase_ :Tuple = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) UpperCAmelCase_ :Dict = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase_ :int = False UpperCAmelCase_ :Optional[int] = False def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :Optional[Any] = NystromformerModelTester(self ) lowerCAmelCase_ :Any = ConfigTester(self , config_class=__A , hidden_size=37 ) def __lowerCAmelCase ( self ) -> Any: self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase_ :str = type self.model_tester.create_and_check_model(*__A ) def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__A ) def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__A ) def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__A ) def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__A ) def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__A ) @slow def __lowerCAmelCase ( self ) -> Dict: for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ :List[str] = NystromformerModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Optional[int] = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) lowerCAmelCase_ :List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): lowerCAmelCase_ :Union[str, Any] = model(__A )[0] lowerCAmelCase_ :List[str] = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , __A ) lowerCAmelCase_ :Optional[Any] = torch.tensor( [[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) ) @slow def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :int = """the [MASK] of Belgium is Brussels""" lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) lowerCAmelCase_ :Union[str, Any] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) lowerCAmelCase_ :Optional[int] = tokenizer(__A , return_tensors="""pt""" ) with torch.no_grad(): lowerCAmelCase_ :Union[str, Any] = model(encoding.input_ids ).logits lowerCAmelCase_ :Union[str, Any] = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(__A ) , """capital""" )
355
"""simple docstring""" import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 __UpperCAmelCase = get_tests_dir('fixtures/dummy-config.json') class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :int = 0 def __lowerCAmelCase ( self ) -> List[str]: self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""bert-base-uncased""" ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :int = AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :int = AutoConfig.for_model("""roberta""" ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Tuple: with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. lowerCAmelCase_ :int = os.path.join(__A , """fake-roberta""" ) os.makedirs(__A , exist_ok=__A ) with open(os.path.join(__A , """config.json""" ) , """w""" ) as f: f.write(json.dumps({} ) ) lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A ) self.assertEqual(type(__A ) , __A ) def __lowerCAmelCase ( self ) -> Optional[int]: try: AutoConfig.register("""custom""" , __A ) # Wrong model type will raise an error with self.assertRaises(__A ): AutoConfig.register("""model""" , __A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__A ): AutoConfig.register("""bert""" , __A ) # Now that the config is registered, it can be used as any other config with the auto-API lowerCAmelCase_ :Union[str, Any] = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__A ) lowerCAmelCase_ :Optional[int] = AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def __lowerCAmelCase ( self ) -> Tuple: with self.assertRaisesRegex( __A , """bert-base is not a local folder and is not a valid model identifier""" ): lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""bert-base""" ) def __lowerCAmelCase ( self ) -> Any: with self.assertRaisesRegex( __A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , revision="""aaaaaa""" ) def __lowerCAmelCase ( self ) -> int: with self.assertRaisesRegex( __A , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ): lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" ) def __lowerCAmelCase ( self ) -> Tuple: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__A ): lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__A ): lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) lowerCAmelCase_ :str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) self.assertEqual(config.__class__.__name__ , """NewModelConfig""" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__A ) lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , trust_remote_code=__A ) self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" ) def __lowerCAmelCase ( self ) -> int: class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :int = "new-model" try: AutoConfig.register("""new-model""" , __A ) # If remote code is not set, the default is to use local lowerCAmelCase_ :Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" ) # If remote code is disabled, we load the local one. lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" ) # If remote is enabled, we load from the Hub lowerCAmelCase_ :Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) self.assertEqual(config.__class__.__name__ , """NewModelConfig""" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
1
0
import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed __UpperCAmelCase = logging.getLogger(__name__) def _snake_case ( lowercase__ : List[Any]=2 , lowercase__ : Tuple=3 , lowercase__ : Union[str, Any]=1_6 , lowercase__ : int = 1_0 , lowercase__ : int = 2 ) -> Union[str, Any]: '''simple docstring''' def get_dataset(lowercase__ : List[str] ): lowerCAmelCase_ :str = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(A__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) lowerCAmelCase_ :str = get_dataset(A__ ) lowerCAmelCase_ :List[Any] = get_dataset(A__ ) lowerCAmelCase_ :Optional[Any] = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 ) lowerCAmelCase_ :str = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 ) return (train_dataloader, valid_dataloader) def _snake_case ( lowercase__ : str , lowercase__ : Tuple , lowercase__ : List[str] , lowercase__ : str , lowercase__ : List[str] , lowercase__ : List[Any]=None ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = [] for epoch in range(A__ ): # Train quickly model.train() for batch in dataloader: lowerCAmelCase_ , lowerCAmelCase_ :List[str] = batch lowerCAmelCase_ :Optional[int] = model(A__ ) lowerCAmelCase_ :Union[str, Any] = torch.nn.functional.mse_loss(A__ , A__ ) accelerator.backward(A__ ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class _SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self ) -> Optional[Any]: super().__init__() lowerCAmelCase_ :List[Any] = nn.Parameter(torch.randn(1 ) ) lowerCAmelCase_ :List[Any] = nn.Parameter(torch.randn(1 ) ) def __lowerCAmelCase ( self , __A ) -> Tuple: return x * self.a + self.b class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[Any]: with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) lowerCAmelCase_ :str = DummyModel() lowerCAmelCase_ :Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = dummy_dataloaders() lowerCAmelCase_ :int = ProjectConfiguration(total_limit=1 , project_dir=UpperCamelCase_ , automatic_checkpoint_naming=UpperCamelCase_ ) # Train baseline lowerCAmelCase_ :Union[str, Any] = Accelerator(project_config=UpperCamelCase_ ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = accelerator.prepare( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def __lowerCAmelCase ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) lowerCAmelCase_ :Any = DummyModel() lowerCAmelCase_ :Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = dummy_dataloaders() # Train baseline lowerCAmelCase_ :List[Any] = Accelerator() lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = accelerator.prepare( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # Save initial lowerCAmelCase_ :Dict = os.path.join(UpperCamelCase_ , """initial""" ) accelerator.save_state(UpperCamelCase_ ) ((lowerCAmelCase_) , (lowerCAmelCase_)) :Optional[Any] = model.a.item(), model.b.item() lowerCAmelCase_ :Union[str, Any] = optimizer.state_dict() lowerCAmelCase_ :str = train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ((lowerCAmelCase_) , (lowerCAmelCase_)) :List[str] = model.a.item(), model.b.item() lowerCAmelCase_ :str = optimizer.state_dict() # Train partially set_seed(42 ) lowerCAmelCase_ :Any = DummyModel() lowerCAmelCase_ :Tuple = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) lowerCAmelCase_ , lowerCAmelCase_ :int = dummy_dataloaders() lowerCAmelCase_ :List[str] = Accelerator() lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Any = accelerator.prepare( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) accelerator.load_state(UpperCamelCase_ ) ((lowerCAmelCase_) , (lowerCAmelCase_)) :Dict = model.a.item(), model.b.item() lowerCAmelCase_ :Union[str, Any] = optimizer.state_dict() self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase_ :Union[str, Any] = train(2 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # Save everything lowerCAmelCase_ :Union[str, Any] = os.path.join(UpperCamelCase_ , """checkpoint""" ) accelerator.save_state(UpperCamelCase_ ) # Load everything back in and make sure all states work accelerator.load_state(UpperCamelCase_ ) test_rands += train(1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ((lowerCAmelCase_) , (lowerCAmelCase_)) :Dict = model.a.item(), model.b.item() lowerCAmelCase_ :Optional[Any] = optimizer.state_dict() self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def __lowerCAmelCase ( self ) -> List[Any]: with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) lowerCAmelCase_ :Any = DummyModel() lowerCAmelCase_ :Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) lowerCAmelCase_ , lowerCAmelCase_ :int = dummy_dataloaders() lowerCAmelCase_ :Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ ) # Train baseline lowerCAmelCase_ :Dict = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :int = accelerator.prepare( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # Save initial accelerator.save_state() ((lowerCAmelCase_) , (lowerCAmelCase_)) :Tuple = model.a.item(), model.b.item() lowerCAmelCase_ :Optional[Any] = optimizer.state_dict() lowerCAmelCase_ :Any = train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ((lowerCAmelCase_) , (lowerCAmelCase_)) :int = model.a.item(), model.b.item() lowerCAmelCase_ :Union[str, Any] = optimizer.state_dict() # Train partially set_seed(42 ) lowerCAmelCase_ :Tuple = DummyModel() lowerCAmelCase_ :Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = dummy_dataloaders() lowerCAmelCase_ :int = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCamelCase_ ) lowerCAmelCase_ :str = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :int = accelerator.prepare( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) accelerator.load_state(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_0""" ) ) ((lowerCAmelCase_) , (lowerCAmelCase_)) :Any = model.a.item(), model.b.item() lowerCAmelCase_ :List[str] = optimizer.state_dict() self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase_ :int = train(2 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_1""" ) ) test_rands += train(1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ((lowerCAmelCase_) , (lowerCAmelCase_)) :Any = model.a.item(), model.b.item() lowerCAmelCase_ :int = optimizer.state_dict() self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ :Dict = torch.tensor([1, 2, 3] ) lowerCAmelCase_ :Optional[Any] = torch.tensor([2, 3, 4] ) lowerCAmelCase_ :Optional[Any] = DummyModel() lowerCAmelCase_ :Dict = torch.optim.Adam(net.parameters() ) lowerCAmelCase_ :List[str] = Accelerator() with self.assertRaises(UpperCamelCase_ ) as ve: accelerator.register_for_checkpointing(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase_ :Dict = str(ve.exception ) self.assertTrue("""Item at index 0""" in message ) self.assertTrue("""Item at index 1""" in message ) self.assertFalse("""Item at index 2""" in message ) self.assertFalse("""Item at index 3""" in message ) def __lowerCAmelCase ( self ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) lowerCAmelCase_ :str = DummyModel() lowerCAmelCase_ :str = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) lowerCAmelCase_ :str = torch.optim.lr_scheduler.StepLR(UpperCamelCase_ , step_size=1 , gamma=0.9_9 ) lowerCAmelCase_ , lowerCAmelCase_ :Dict = dummy_dataloaders() lowerCAmelCase_ :Any = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ ) # Train baseline lowerCAmelCase_ :str = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Tuple = accelerator.prepare( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # Save initial accelerator.save_state() lowerCAmelCase_ :int = scheduler.state_dict() train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) self.assertNotEqual(UpperCamelCase_ , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_0""" ) ) self.assertEqual(UpperCamelCase_ , scheduler.state_dict() ) def __lowerCAmelCase ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) lowerCAmelCase_ :Optional[int] = DummyModel() lowerCAmelCase_ :List[Any] = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ , total_limit=2 ) # Train baseline lowerCAmelCase_ :Any = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ ) lowerCAmelCase_ :Tuple = accelerator.prepare(UpperCamelCase_ ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_9""" ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_10""" ) ) ) @require_cuda def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Dict = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() ) if __name__ == "__main__": __UpperCAmelCase = '/tmp/accelerate/state_checkpointing' __UpperCAmelCase = DummyModel() __UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1e-3) __UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) __UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders() __UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline __UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no') if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: __UpperCAmelCase = group['params'][0].device break assert param_device.type == accelerator.device.type __UpperCAmelCase = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu') for group in optimizer.param_groups: __UpperCAmelCase = group['params'][0].device break assert ( param_device.type == torch.device('cpu').type ), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device') for group in optimizer.param_groups: __UpperCAmelCase = group['params'][0].device break assert ( param_device.type == accelerator.device.type ), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match='Unsupported optimizer map location passed'): accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid') accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
356
"""simple docstring""" import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :str = GPTSanJapaneseTokenizer UpperCAmelCase_ :Optional[int] = False UpperCAmelCase_ :Optional[int] = {"do_clean_text": False, "add_prefix_space": False} def __lowerCAmelCase ( self ) -> Tuple: super().setUp() # fmt: off lowerCAmelCase_ :Dict = ["""ใ“ใ‚“""", """ใ“ใ‚“ใซ""", """ใซใกใฏ""", """ใฐใ‚“ใฏ""", """ไธ–็•Œ,ใ”บ็•Œ""", """ใ€""", """ใ€‚""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""] # fmt: on lowerCAmelCase_ :List[str] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # ๐Ÿ˜€ lowerCAmelCase_ :int = {"""unk_token""": """<unk>"""} lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) with open(self.emoji_file , """w""" ) as emoji_writer: emoji_writer.write(json.dumps(__A ) ) def __lowerCAmelCase ( self , **__A ) -> int: kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A ) def __lowerCAmelCase ( self , __A ) -> Dict: lowerCAmelCase_ :List[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \nใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \nใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€""" return input_text, output_text def __lowerCAmelCase ( self , __A ) -> str: lowerCAmelCase_ , lowerCAmelCase_ :Tuple = self.get_input_output_texts(__A ) lowerCAmelCase_ :List[str] = tokenizer.encode(__A , add_special_tokens=__A ) lowerCAmelCase_ :str = tokenizer.decode(__A , clean_up_tokenization_spaces=__A ) return text, ids def __lowerCAmelCase ( self ) -> str: pass # TODO add if relevant def __lowerCAmelCase ( self ) -> Dict: pass # TODO add if relevant def __lowerCAmelCase ( self ) -> int: pass # TODO add if relevant def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Union[str, Any] = self.get_tokenizer() # Testing tokenization lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ€€ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚""" lowerCAmelCase_ :Any = ["""ใ“ใ‚“""", """ใซใกใฏ""", """ใ€""", """ไธ–็•Œ""", """ใ€‚""", """<SP>""", """ใ“ใ‚“""", """ใฐใ‚“ใฏ""", """ใ€""", """ใ”บ็•Œ""", """ใ€‚"""] lowerCAmelCase_ :Tuple = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) # Testing conversion to ids without special tokens lowerCAmelCase_ :List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] lowerCAmelCase_ :List[str] = tokenizer.convert_tokens_to_ids(__A ) self.assertListEqual(__A , __A ) # Testing conversion to ids with special tokens lowerCAmelCase_ :Any = tokens + [tokenizer.unk_token] lowerCAmelCase_ :Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] lowerCAmelCase_ :Union[str, Any] = tokenizer.convert_tokens_to_ids(__A ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :int = self.get_tokenizer() # Testing tokenization lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใซใกใฏใ€<|bagoftoken|>ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€<|bagoftoken|>ใ”บ็•Œใ€‚""" lowerCAmelCase_ :str = """ใ“ใ‚“ใซใกใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚""" lowerCAmelCase_ :str = tokenizer.encode(__A ) lowerCAmelCase_ :Dict = tokenizer.decode(__A ) self.assertEqual(__A , __A ) @slow def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization lowerCAmelCase_ :Optional[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚""" lowerCAmelCase_ :Any = """ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :Optional[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :List[Any] = tokenizer.encode(prefix_text + input_text ) lowerCAmelCase_ :List[str] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text ) lowerCAmelCase_ :int = tokenizer.encode(__A , prefix_text=__A ) lowerCAmelCase_ :int = tokenizer.decode(__A ) lowerCAmelCase_ :Dict = tokenizer.decode(__A ) lowerCAmelCase_ :Tuple = tokenizer.decode(__A ) self.assertEqual(__A , __A ) self.assertEqual(__A , __A ) self.assertEqual(__A , __A ) @slow def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization lowerCAmelCase_ :List[Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚""" lowerCAmelCase_ :Optional[int] = """ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" lowerCAmelCase_ :List[str] = len(tokenizer.encode(__A ) ) - 2 lowerCAmelCase_ :Dict = len(tokenizer.encode(__A ) ) - 2 lowerCAmelCase_ :int = [1] + [0] * (len_prefix + len_text + 1) lowerCAmelCase_ :List[Any] = [1] * (len_prefix + len_text + 1) + [0] lowerCAmelCase_ :Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1) lowerCAmelCase_ :List[Any] = tokenizer(prefix_text + input_text ).token_type_ids lowerCAmelCase_ :List[str] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids lowerCAmelCase_ :List[Any] = tokenizer(__A , prefix_text=__A ).token_type_ids self.assertListEqual(__A , __A ) self.assertListEqual(__A , __A ) self.assertListEqual(__A , __A ) @slow def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) lowerCAmelCase_ :int = tokenizer.encode("""ใ‚ใƒณใ„ใƒฏ""" ) lowerCAmelCase_ :Optional[Any] = tokenizer.encode("""""" , prefix_text="""ใ‚ใƒณใ„ใƒฏ""" ) lowerCAmelCase_ :int = tokenizer.encode("""ใ„ใƒฏ""" , prefix_text="""ใ‚ใƒณ""" ) self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) ) self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) ) self.assertNotEqual(__A , __A ) self.assertNotEqual(__A , __A ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Union[str, Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) lowerCAmelCase_ :int = [["""ๆญฆ็”ฐไฟก็Ž„""", """ใฏใ€"""], ["""็น”็”ฐไฟก้•ท""", """ใฎ้…ไธ‹ใฎใ€"""]] lowerCAmelCase_ :Dict = tokenizer(__A , padding=__A ) lowerCAmelCase_ :Any = tokenizer.batch_encode_plus(__A , padding=__A ) # fmt: off lowerCAmelCase_ :int = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]] lowerCAmelCase_ :List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] lowerCAmelCase_ :int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , __A ) self.assertListEqual(x_token.token_type_ids , __A ) self.assertListEqual(x_token.attention_mask , __A ) self.assertListEqual(x_token_a.input_ids , __A ) self.assertListEqual(x_token_a.token_type_ids , __A ) self.assertListEqual(x_token_a.attention_mask , __A ) def __lowerCAmelCase ( self ) -> Tuple: # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def __lowerCAmelCase ( self ) -> str: # tokenizer has no padding token pass
1
0
"""simple docstring""" def _snake_case ( lowercase__ : Optional[Any] = 4_0_0_0_0_0_0 ) -> str: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = [] lowerCAmelCase_ , lowerCAmelCase_ :Dict = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(lowercase__ ) lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = b, a + b return sum(lowercase__ ) if __name__ == "__main__": print(F"""{solution() = }""")
357
"""simple docstring""" import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset __UpperCAmelCase = pd.read_csv( 'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/' 'position_salaries.csv' ) __UpperCAmelCase = dataset.iloc[:, 1:2].values __UpperCAmelCase = dataset.iloc[:, 2].values __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0) __UpperCAmelCase = PolynomialFeatures(degree=4) __UpperCAmelCase = poly_reg.fit_transform(X) __UpperCAmelCase = LinearRegression() pol_reg.fit(X_poly, y) def _snake_case ( ) -> str: '''simple docstring''' plt.scatter(lowercase__ , lowercase__ , color="""red""" ) plt.plot(lowercase__ , pol_reg.predict(poly_reg.fit_transform(lowercase__ ) ) , color="""blue""" ) plt.title("""Truth or Bluff (Linear Regression)""" ) plt.xlabel("""Position level""" ) plt.ylabel("""Salary""" ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
1
0
def _snake_case ( lowercase__ : str , lowercase__ : bool = False ) -> str: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): lowerCAmelCase_ :Dict = f"""Expected string as input, found {type(_UpperCamelCase )}""" raise ValueError(_UpperCamelCase ) if not isinstance(_UpperCamelCase , _UpperCamelCase ): lowerCAmelCase_ :Tuple = f"""Expected boolean as use_pascal parameter, found {type(_UpperCamelCase )}""" raise ValueError(_UpperCamelCase ) lowerCAmelCase_ :Dict = input_str.split("""_""" ) lowerCAmelCase_ :Tuple = 0 if use_pascal else 1 lowerCAmelCase_ :List[Any] = words[start_index:] lowerCAmelCase_ :int = [word[0].upper() + word[1:] for word in words_to_capitalize] lowerCAmelCase_ :str = """""" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
358
"""simple docstring""" from __future__ import annotations __UpperCAmelCase = 1.6021e-19 # units = C def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float , ) -> tuple[str, float]: '''simple docstring''' if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif conductivity < 0: raise ValueError("""Conductivity cannot be negative""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative""" ) elif mobility < 0: raise ValueError("""mobility cannot be negative""" ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
1
0
from math import factorial def _snake_case ( lowercase__ : int = 2_0 ) -> int: '''simple docstring''' lowerCAmelCase_ :str = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... lowerCAmelCase_ :Any = n // 2 return int(factorial(lowerCAmelCase__ ) / (factorial(lowerCAmelCase__ ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: __UpperCAmelCase = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number.')
359
"""simple docstring""" import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , *__A , **__A ) -> None: warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""" , __A , ) super().__init__(*__A , **__A )
1
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __UpperCAmelCase = { 'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['MobileViTFeatureExtractor'] __UpperCAmelCase = ['MobileViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MobileViTForImageClassification', 'MobileViTForSemanticSegmentation', 'MobileViTModel', 'MobileViTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFMobileViTForImageClassification', 'TFMobileViTForSemanticSegmentation', 'TFMobileViTModel', 'TFMobileViTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
360
"""simple docstring""" from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def _snake_case ( lowercase__ : str = "laptop" ) -> DataFrame: '''simple docstring''' lowerCAmelCase_ :Dict = f"""https://www.amazon.in/laptop/s?k={product}""" lowerCAmelCase_ :List[str] = { """User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""", """Accept-Language""": """en-US, en;q=0.5""", } lowerCAmelCase_ :List[Any] = BeautifulSoup(requests.get(lowercase__ , headers=lowercase__ ).text ) # Initialize a Pandas dataframe with the column titles lowerCAmelCase_ :Union[str, Any] = DataFrame( columns=[ """Product Title""", """Product Link""", """Current Price of the product""", """Product Rating""", """MRP of the product""", """Discount""", ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( """div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ): try: lowerCAmelCase_ :str = item.ha.text lowerCAmelCase_ :Dict = """https://www.amazon.in/""" + item.ha.a["""href"""] lowerCAmelCase_ :int = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text try: lowerCAmelCase_ :Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text except AttributeError: lowerCAmelCase_ :int = """Not available""" try: lowerCAmelCase_ :str = ( """โ‚น""" + item.find( """span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""โ‚น""" )[1] ) except AttributeError: lowerCAmelCase_ :Optional[Any] = """""" try: lowerCAmelCase_ :str = float( ( ( float(product_mrp.strip("""โ‚น""" ).replace(""",""" , """""" ) ) - float(product_price.strip("""โ‚น""" ).replace(""",""" , """""" ) ) ) / float(product_mrp.strip("""โ‚น""" ).replace(""",""" , """""" ) ) ) * 1_0_0 ) except ValueError: lowerCAmelCase_ :Union[str, Any] = float("""nan""" ) except AttributeError: pass lowerCAmelCase_ :Any = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] lowerCAmelCase_ :List[Any] = """ """ lowerCAmelCase_ :Tuple = """ """ data_frame.index += 1 return data_frame if __name__ == "__main__": __UpperCAmelCase = 'headphones' get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
1
0
"""simple docstring""" from abc import ABC, abstractmethod from argparse import ArgumentParser class _SCREAMING_SNAKE_CASE ( __snake_case ): @staticmethod @abstractmethod def __lowerCAmelCase ( __A ) -> int: raise NotImplementedError() @abstractmethod def __lowerCAmelCase ( self ) -> Tuple: raise NotImplementedError()
361
"""simple docstring""" import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[int]: lowerCAmelCase_ :Any = """laion/clap-htsat-unfused""" lowerCAmelCase_ :Optional[Any] = tempfile.mkdtemp() def __lowerCAmelCase ( self , **__A ) -> List[Any]: return RobertaTokenizer.from_pretrained(self.checkpoint , **__A ) def __lowerCAmelCase ( self , **__A ) -> Tuple: return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__A ) def __lowerCAmelCase ( self ) -> int: shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Optional[Any] = self.get_tokenizer() lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor() lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase_ :Optional[Any] = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __A ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __A ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Dict = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase_ :str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowerCAmelCase_ :Dict = self.get_feature_extractor(do_normalize=__A , padding_value=1.0 ) lowerCAmelCase_ :Union[str, Any] = ClapProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __A ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __A ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Dict = self.get_feature_extractor() lowerCAmelCase_ :str = self.get_tokenizer() lowerCAmelCase_ :List[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) lowerCAmelCase_ :Optional[Any] = floats_list((3, 1000) ) lowerCAmelCase_ :Optional[Any] = feature_extractor(__A , return_tensors="""np""" ) lowerCAmelCase_ :str = processor(audios=__A , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :List[Any] = self.get_feature_extractor() lowerCAmelCase_ :Any = self.get_tokenizer() lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) lowerCAmelCase_ :List[Any] = """This is a test string""" lowerCAmelCase_ :Dict = processor(text=__A ) lowerCAmelCase_ :List[str] = tokenizer(__A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :int = self.get_feature_extractor() lowerCAmelCase_ :Tuple = self.get_tokenizer() lowerCAmelCase_ :Optional[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) lowerCAmelCase_ :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase_ :Tuple = processor.batch_decode(__A ) lowerCAmelCase_ :Optional[Any] = tokenizer.batch_decode(__A ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor() lowerCAmelCase_ :Any = self.get_tokenizer() lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
1
0
"""simple docstring""" import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __UpperCAmelCase = 16 __UpperCAmelCase = 32 def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 ) -> int: '''simple docstring''' lowerCAmelCase_ :List[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowercase__ : Dict ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase_ :Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCAmelCase_ :List[Any] = datasets.map( UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase_ :Dict = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowercase__ : Optional[int] ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCAmelCase_ :int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCAmelCase_ :int = 1_6 elif accelerator.mixed_precision != "no": lowerCAmelCase_ :List[str] = 8 else: lowerCAmelCase_ :str = None return tokenizer.pad( UpperCamelCase__ , padding="""longest""" , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors="""pt""" , ) # Instantiate dataloaders. lowerCAmelCase_ :List[Any] = DataLoader( tokenized_datasets["""train"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ , drop_last=UpperCamelCase__ ) lowerCAmelCase_ :Dict = DataLoader( tokenized_datasets["""validation"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ , drop_last=(accelerator.mixed_precision == """fp8""") , ) return train_dataloader, eval_dataloader def _snake_case ( lowercase__ : Any , lowercase__ : Tuple ) -> int: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase_ :List[Any] = config["""lr"""] lowerCAmelCase_ :List[Any] = int(config["""num_epochs"""] ) lowerCAmelCase_ :List[Any] = int(config["""seed"""] ) lowerCAmelCase_ :int = int(config["""batch_size"""] ) lowerCAmelCase_ :Any = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation lowerCAmelCase_ :int = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowerCAmelCase_ :Dict = batch_size // MAX_GPU_BATCH_SIZE lowerCAmelCase_ :Dict = MAX_GPU_BATCH_SIZE set_seed(UpperCamelCase__ ) lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase_ :List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCAmelCase_ :Optional[Any] = model.to(accelerator.device ) # Instantiate optimizer lowerCAmelCase_ :Any = AdamW(params=model.parameters() , lr=UpperCamelCase__ ) # Instantiate scheduler lowerCAmelCase_ :Tuple = get_linear_schedule_with_warmup( optimizer=UpperCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Now we train the model for epoch in range(UpperCamelCase__ ): model.train() for step, batch in enumerate(UpperCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowerCAmelCase_ :Optional[int] = model(**UpperCamelCase__ ) lowerCAmelCase_ :Any = outputs.loss lowerCAmelCase_ :Optional[Any] = loss / gradient_accumulation_steps accelerator.backward(UpperCamelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(UpperCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase_ :str = model(**UpperCamelCase__ ) lowerCAmelCase_ :int = outputs.logits.argmax(dim=-1 ) lowerCAmelCase_ , lowerCAmelCase_ :Dict = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=UpperCamelCase__ , references=UpperCamelCase__ , ) lowerCAmelCase_ :Dict = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , UpperCamelCase__ ) def _snake_case ( ) -> int: '''simple docstring''' lowerCAmelCase_ :List[str] = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) lowerCAmelCase_ :Any = parser.parse_args() lowerCAmelCase_ :Optional[int] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6} training_function(UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": main()
362
"""simple docstring""" import os from math import logaa def _snake_case ( lowercase__ : str = "base_exp.txt" ) -> int: '''simple docstring''' lowerCAmelCase_ :float = 0 lowerCAmelCase_ :Union[str, Any] = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ): lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = list(map(lowercase__ , line.split(""",""" ) ) ) if x * logaa(lowercase__ ) > largest: lowerCAmelCase_ :Any = x * logaa(lowercase__ ) lowerCAmelCase_ :List[Any] = i + 1 return result if __name__ == "__main__": print(solution())
1
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { '''microsoft/swin-tiny-patch4-window7-224''': ( '''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json''' ), # See all Swin models at https://huggingface.co/models?filter=swin } class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ :Union[str, Any] = 'swin' UpperCAmelCase_ :Optional[Any] = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , __A=224 , __A=4 , __A=3 , __A=96 , __A=[2, 2, 6, 2] , __A=[3, 6, 12, 24] , __A=7 , __A=4.0 , __A=True , __A=0.0 , __A=0.0 , __A=0.1 , __A="gelu" , __A=False , __A=0.0_2 , __A=1E-5 , __A=32 , __A=None , __A=None , **__A , ) -> Dict: super().__init__(**_SCREAMING_SNAKE_CASE ) lowerCAmelCase_ :Any = image_size lowerCAmelCase_ :Any = patch_size lowerCAmelCase_ :Union[str, Any] = num_channels lowerCAmelCase_ :int = embed_dim lowerCAmelCase_ :Any = depths lowerCAmelCase_ :Tuple = len(_SCREAMING_SNAKE_CASE ) lowerCAmelCase_ :Optional[Any] = num_heads lowerCAmelCase_ :List[str] = window_size lowerCAmelCase_ :int = mlp_ratio lowerCAmelCase_ :Any = qkv_bias lowerCAmelCase_ :int = hidden_dropout_prob lowerCAmelCase_ :Any = attention_probs_dropout_prob lowerCAmelCase_ :Tuple = drop_path_rate lowerCAmelCase_ :Tuple = hidden_act lowerCAmelCase_ :Any = use_absolute_embeddings lowerCAmelCase_ :Optional[Any] = layer_norm_eps lowerCAmelCase_ :Tuple = initializer_range lowerCAmelCase_ :Any = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase_ :Optional[int] = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) ) lowerCAmelCase_ :Dict = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(_SCREAMING_SNAKE_CASE ) + 1 )] lowerCAmelCase_ :List[str] = get_aligned_output_features_output_indices( out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names ) class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ :List[Any] = version.parse("1.11" ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __lowerCAmelCase ( self ) -> float: return 1E-4
363
"""simple docstring""" import itertools import math def _snake_case ( lowercase__ : int ) -> bool: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _snake_case ( ) -> Dict: '''simple docstring''' lowerCAmelCase_ :List[Any] = 2 while True: if is_prime(lowercase__ ): yield num num += 1 def _snake_case ( lowercase__ : int = 1_0_0_0_1 ) -> int: '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , lowercase__ ) ) if __name__ == "__main__": print(F"""{solution() = }""")
1
0
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _SCREAMING_SNAKE_CASE ( snake_case__ ): UpperCAmelCase_ :Union[str, Any] = ["image_processor", "tokenizer"] UpperCAmelCase_ :str = "BridgeTowerImageProcessor" UpperCAmelCase_ :Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self , __A , __A ) -> int: super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) def __call__( self , __A , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ) -> List[Any]: lowerCAmelCase_ :List[Any] = self.tokenizer( text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , ) # add pixel_values + pixel_mask lowerCAmelCase_ :Optional[Any] = self.image_processor( UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ , do_center_crop=UpperCAmelCase_ , **UpperCAmelCase_ ) encoding.update(UpperCAmelCase_ ) return encoding def __lowerCAmelCase ( self , *__A , **__A ) -> Tuple: return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def __lowerCAmelCase ( self , *__A , **__A ) -> List[str]: return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :Optional[int] = self.tokenizer.model_input_names lowerCAmelCase_ :int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
364
"""simple docstring""" def _snake_case ( lowercase__ : int = 5_0 ) -> int: '''simple docstring''' lowerCAmelCase_ :int = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F"""{solution() = }""")
1
0
"""simple docstring""" import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class _SCREAMING_SNAKE_CASE ( ctypes.Structure ): UpperCAmelCase_ :List[Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)] def _snake_case ( ) -> str: '''simple docstring''' if os.name == "nt": lowerCAmelCase_ :Union[str, Any] = CursorInfo() lowerCAmelCase_ :Dict = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) ) lowerCAmelCase_ :List[Any] = False ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25l""" ) sys.stdout.flush() def _snake_case ( ) -> Optional[int]: '''simple docstring''' if os.name == "nt": lowerCAmelCase_ :Dict = CursorInfo() lowerCAmelCase_ :Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) ) lowerCAmelCase_ :Tuple = True ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE_ , ctypes.byref(SCREAMING_SNAKE_CASE_ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25h""" ) sys.stdout.flush() @contextmanager def _snake_case ( ) -> str: '''simple docstring''' try: hide_cursor() yield finally: show_cursor()
365
"""simple docstring""" # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ): UpperCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} UpperCAmelCase_ :Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCAmelCase_ :List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} ) UpperCAmelCase_ :Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS def __lowerCAmelCase ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ :Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) lowerCAmelCase_ :List[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , ) torch.manual_seed(0 ) lowerCAmelCase_ :List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowerCAmelCase_ :List[Any] = CLIPTextModel(__A ) lowerCAmelCase_ :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCAmelCase_ :Union[str, Any] = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def __lowerCAmelCase ( self , __A , __A=0 ) -> List[str]: if str(__A ).startswith("""mps""" ): lowerCAmelCase_ :Tuple = torch.manual_seed(__A ) else: lowerCAmelCase_ :Optional[int] = torch.Generator(device=__A ).manual_seed(__A ) lowerCAmelCase_ :List[Any] = 2 lowerCAmelCase_ :int = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ) lowerCAmelCase_ :Optional[int] = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A ) lowerCAmelCase_ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) ) lowerCAmelCase_ :Union[str, Any] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def __lowerCAmelCase ( self ) -> int: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def __lowerCAmelCase ( self ) -> List[str]: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ): UpperCAmelCase_ :List[str] = StableDiffusionControlNetImgaImgPipeline UpperCAmelCase_ :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCAmelCase_ :int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def __lowerCAmelCase ( self ) -> Optional[int]: torch.manual_seed(0 ) lowerCAmelCase_ :Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(__A ): if isinstance(__A , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) lowerCAmelCase_ :List[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__A ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__A ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowerCAmelCase_ :str = CLIPTextModel(__A ) lowerCAmelCase_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCAmelCase_ :Optional[Any] = MultiControlNetModel([controlneta, controlneta] ) lowerCAmelCase_ :List[Any] = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def __lowerCAmelCase ( self , __A , __A=0 ) -> str: if str(__A ).startswith("""mps""" ): lowerCAmelCase_ :Optional[Any] = torch.manual_seed(__A ) else: lowerCAmelCase_ :List[Any] = torch.Generator(device=__A ).manual_seed(__A ) lowerCAmelCase_ :Optional[Any] = 2 lowerCAmelCase_ :Optional[int] = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ), ] lowerCAmelCase_ :int = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A ) lowerCAmelCase_ :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) ) lowerCAmelCase_ :List[str] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :List[str] = self.get_dummy_components() lowerCAmelCase_ :Tuple = self.pipeline_class(**__A ) pipe.to(__A ) lowerCAmelCase_ :Union[str, Any] = 1_0.0 lowerCAmelCase_ :Union[str, Any] = 4 lowerCAmelCase_ :Tuple = self.get_dummy_inputs(__A ) lowerCAmelCase_ :List[str] = steps lowerCAmelCase_ :int = scale lowerCAmelCase_ :Union[str, Any] = pipe(**__A )[0] lowerCAmelCase_ :Any = self.get_dummy_inputs(__A ) lowerCAmelCase_ :str = steps lowerCAmelCase_ :str = scale lowerCAmelCase_ :Tuple = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Union[str, Any] = steps lowerCAmelCase_ :Union[str, Any] = scale lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] lowerCAmelCase_ :List[str] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Optional[int] = steps lowerCAmelCase_ :Tuple = scale lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def __lowerCAmelCase ( self ) -> Dict: return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __lowerCAmelCase ( self ) -> Tuple: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def __lowerCAmelCase ( self ) -> Optional[int]: self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :str = self.get_dummy_components() lowerCAmelCase_ :Tuple = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(__A ) except NotImplementedError: pass @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :Any = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" ) lowerCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , safety_checker=__A , controlnet=__A ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowerCAmelCase_ :List[Any] = """evil space-punk bird""" lowerCAmelCase_ :List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) ) lowerCAmelCase_ :int = load_image( """https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) ) lowerCAmelCase_ :Union[str, Any] = pipe( __A , __A , control_image=__A , generator=__A , output_type="""np""" , num_inference_steps=50 , strength=0.6 , ) lowerCAmelCase_ :Tuple = output.images[0] assert image.shape == (512, 512, 3) lowerCAmelCase_ :Tuple = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" ) assert np.abs(expected_image - image ).max() < 9E-2
1
0
"""simple docstring""" import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _SCREAMING_SNAKE_CASE ( UpperCamelCase__ ): def __lowerCAmelCase ( self , __A ) -> List[str]: with open(__lowerCamelCase , encoding="""utf-8""" ) as input_file: lowerCAmelCase_ :int = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" ) lowerCAmelCase_ :Union[str, Any] = input_file.read() lowerCAmelCase_ :Optional[int] = regexp.search(__lowerCamelCase ) return match def __lowerCAmelCase ( self , __A ) -> str: with open(__lowerCamelCase , encoding="""utf-8""" ) as input_file: lowerCAmelCase_ :Any = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL ) lowerCAmelCase_ :Union[str, Any] = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` lowerCAmelCase_ :Optional[Any] = regexp.finditer(__lowerCamelCase ) lowerCAmelCase_ :Optional[int] = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Optional[int] = Path("""./datasets""" ) lowerCAmelCase_ :Tuple = list(dataset_paths.absolute().glob("""**/*.py""" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__lowerCamelCase ) ): raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" ) def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :str = Path("""./datasets""" ) lowerCAmelCase_ :int = list(dataset_paths.absolute().glob("""**/*.py""" ) ) for dataset in dataset_files: if self._no_print_statements(str(__lowerCamelCase ) ): raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
366
"""simple docstring""" from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ ): UpperCAmelCase_ :List[str] = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"] @register_to_config def __init__( self , __A , __A , __A = None , __A = 5_0257 , __A = 1024 , __A = 768 , __A = 12 , __A = 12 , __A = None , __A = "gelu_new" , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 1E-5 , __A = 0.0_2 , __A = True , __A = True , __A = False , __A = False , ) -> Optional[Any]: super().__init__() lowerCAmelCase_ :List[str] = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and""" f""" `n_embd`: {n_embd} are not equal.""" ) lowerCAmelCase_ :Union[str, Any] = prefix_inner_dim lowerCAmelCase_ :str = prefix_hidden_dim lowerCAmelCase_ :str = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) lowerCAmelCase_ :List[Any] = ( nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity() ) lowerCAmelCase_ :Any = GPTaConfig( vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , ) lowerCAmelCase_ :Any = GPTaLMHeadModel(__A ) def __lowerCAmelCase ( self , __A , __A , __A = None , __A = None , ) -> List[str]: lowerCAmelCase_ :str = self.transformer.transformer.wte(__A ) lowerCAmelCase_ :Any = self.encode_prefix(__A ) lowerCAmelCase_ :Optional[Any] = self.decode_prefix(__A ) lowerCAmelCase_ :Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: lowerCAmelCase_ :int = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) lowerCAmelCase_ :Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 ) lowerCAmelCase_ :Tuple = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def __lowerCAmelCase ( self , __A , __A ) -> torch.Tensor: return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A ) def __lowerCAmelCase ( self , __A ) -> Optional[int]: return self.encode_prefix(__A ) @torch.no_grad() def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[int]: lowerCAmelCase_ :Tuple = torch.split(__A , 1 , dim=0 ) lowerCAmelCase_ :Optional[int] = [] lowerCAmelCase_ :List[str] = [] for feature in features: lowerCAmelCase_ :Tuple = self.decode_prefix(feature.to(__A ) ) # back to the clip feature # Only support beam search for now lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.generate_beam( input_embeds=__A , device=__A , eos_token_id=__A ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) lowerCAmelCase_ :Tuple = torch.stack(__A ) lowerCAmelCase_ :int = torch.stack(__A ) return generated_tokens, generated_seq_lengths @torch.no_grad() def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A = 5 , __A = 67 , __A = 1.0 , __A = None , ) -> Union[str, Any]: lowerCAmelCase_ :Optional[int] = eos_token_id lowerCAmelCase_ :Optional[int] = None lowerCAmelCase_ :Any = None lowerCAmelCase_ :int = torch.ones(__A , device=__A , dtype=torch.int ) lowerCAmelCase_ :Optional[int] = torch.zeros(__A , device=__A , dtype=torch.bool ) if input_embeds is not None: lowerCAmelCase_ :List[str] = input_embeds else: lowerCAmelCase_ :Union[str, Any] = self.transformer.transformer.wte(__A ) for i in range(__A ): lowerCAmelCase_ :Optional[int] = self.transformer(inputs_embeds=__A ) lowerCAmelCase_ :str = outputs.logits lowerCAmelCase_ :str = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) lowerCAmelCase_ :Dict = logits.softmax(-1 ).log() if scores is None: lowerCAmelCase_ , lowerCAmelCase_ :Any = logits.topk(__A , -1 ) lowerCAmelCase_ :Union[str, Any] = generated.expand(__A , *generated.shape[1:] ) lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: lowerCAmelCase_ :List[str] = next_tokens else: lowerCAmelCase_ :List[Any] = tokens.expand(__A , *tokens.shape[1:] ) lowerCAmelCase_ :Any = torch.cat((tokens, next_tokens) , dim=1 ) else: lowerCAmelCase_ :List[Any] = -float(np.inf ) lowerCAmelCase_ :int = 0 lowerCAmelCase_ :Optional[int] = scores[:, None] + logits seq_lengths[~is_stopped] += 1 lowerCAmelCase_ :List[Any] = scores_sum / seq_lengths[:, None] lowerCAmelCase_ , lowerCAmelCase_ :Tuple = scores_sum_average.view(-1 ).topk(__A , -1 ) lowerCAmelCase_ :Optional[Any] = next_tokens // scores_sum.shape[1] lowerCAmelCase_ :Dict = seq_lengths[next_tokens_source] lowerCAmelCase_ :Tuple = next_tokens % scores_sum.shape[1] lowerCAmelCase_ :Optional[Any] = next_tokens.unsqueeze(1 ) lowerCAmelCase_ :str = tokens[next_tokens_source] lowerCAmelCase_ :List[Any] = torch.cat((tokens, next_tokens) , dim=1 ) lowerCAmelCase_ :Dict = generated[next_tokens_source] lowerCAmelCase_ :Dict = scores_sum_average * seq_lengths lowerCAmelCase_ :Tuple = is_stopped[next_tokens_source] lowerCAmelCase_ :str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) lowerCAmelCase_ :List[Any] = torch.cat((generated, next_token_embed) , dim=1 ) lowerCAmelCase_ :Optional[int] = is_stopped + next_tokens.eq(__A ).squeeze() if is_stopped.all(): break lowerCAmelCase_ :str = scores / seq_lengths lowerCAmelCase_ :Optional[int] = scores.argsort(descending=__A ) # tokens tensors are already padded to max_seq_length lowerCAmelCase_ :Optional[Any] = [tokens[i] for i in order] lowerCAmelCase_ :Dict = torch.stack(__A , dim=0 ) lowerCAmelCase_ :Tuple = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
1
0
"""simple docstring""" def _snake_case ( lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : Tuple ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :List[Any] = [False] * len(__lowerCamelCase ) lowerCAmelCase_ :Optional[int] = [] queue.append(__lowerCamelCase ) lowerCAmelCase_ :Dict = True while queue: lowerCAmelCase_ :str = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__lowerCamelCase ) lowerCAmelCase_ :List[Any] = True lowerCAmelCase_ :int = u return visited[t] def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] , lowercase__ : Dict ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Optional[int] = [-1] * (len(__lowerCamelCase )) lowerCAmelCase_ :Optional[int] = 0 while bfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): lowerCAmelCase_ :List[str] = float("""Inf""" ) lowerCAmelCase_ :List[str] = sink while s != source: # Find the minimum value in select path lowerCAmelCase_ :Optional[Any] = min(__lowerCamelCase , graph[parent[s]][s] ) lowerCAmelCase_ :int = parent[s] max_flow += path_flow lowerCAmelCase_ :List[Any] = sink while v != source: lowerCAmelCase_ :Union[str, Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase_ :Union[str, Any] = parent[v] return max_flow __UpperCAmelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __UpperCAmelCase , __UpperCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
367
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json', # See all DETR models at https://huggingface.co/models?filter=detr } class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "detr" UpperCAmelCase_ :str = ["past_key_values"] UpperCAmelCase_ :Tuple = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , __A=True , __A=None , __A=3 , __A=100 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=1 , __A=5 , __A=2 , __A=1 , __A=1 , __A=5 , __A=2 , __A=0.1 , **__A , ) -> List[Any]: if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) lowerCAmelCase_ :int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(__A , __A ): lowerCAmelCase_ :str = backbone_config.get("""model_type""" ) lowerCAmelCase_ :List[Any] = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase_ :Optional[Any] = config_class.from_dict(__A ) # set timm attributes to None lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = None, None, None lowerCAmelCase_ :Tuple = use_timm_backbone lowerCAmelCase_ :Optional[int] = backbone_config lowerCAmelCase_ :Optional[int] = num_channels lowerCAmelCase_ :int = num_queries lowerCAmelCase_ :List[Any] = d_model lowerCAmelCase_ :Optional[int] = encoder_ffn_dim lowerCAmelCase_ :Tuple = encoder_layers lowerCAmelCase_ :int = encoder_attention_heads lowerCAmelCase_ :Optional[Any] = decoder_ffn_dim lowerCAmelCase_ :List[str] = decoder_layers lowerCAmelCase_ :Dict = decoder_attention_heads lowerCAmelCase_ :Dict = dropout lowerCAmelCase_ :Tuple = attention_dropout lowerCAmelCase_ :Union[str, Any] = activation_dropout lowerCAmelCase_ :Any = activation_function lowerCAmelCase_ :List[str] = init_std lowerCAmelCase_ :Optional[int] = init_xavier_std lowerCAmelCase_ :int = encoder_layerdrop lowerCAmelCase_ :Union[str, Any] = decoder_layerdrop lowerCAmelCase_ :List[str] = encoder_layers lowerCAmelCase_ :Union[str, Any] = auxiliary_loss lowerCAmelCase_ :str = position_embedding_type lowerCAmelCase_ :List[Any] = backbone lowerCAmelCase_ :str = use_pretrained_backbone lowerCAmelCase_ :str = dilation # Hungarian matcher lowerCAmelCase_ :List[Any] = class_cost lowerCAmelCase_ :Union[str, Any] = bbox_cost lowerCAmelCase_ :Tuple = giou_cost # Loss coefficients lowerCAmelCase_ :Optional[int] = mask_loss_coefficient lowerCAmelCase_ :Union[str, Any] = dice_loss_coefficient lowerCAmelCase_ :Tuple = bbox_loss_coefficient lowerCAmelCase_ :Tuple = giou_loss_coefficient lowerCAmelCase_ :Dict = eos_coefficient super().__init__(is_encoder_decoder=__A , **__A ) @property def __lowerCAmelCase ( self ) -> int: return self.encoder_attention_heads @property def __lowerCAmelCase ( self ) -> int: return self.d_model @classmethod def __lowerCAmelCase ( cls , __A , **__A ) -> Any: return cls(backbone_config=__A , **__A ) def __lowerCAmelCase ( self ) -> Dict[str, any]: lowerCAmelCase_ :List[str] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: lowerCAmelCase_ :Dict = self.backbone_config.to_dict() lowerCAmelCase_ :str = self.__class__.model_type return output class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :List[Any] = version.parse("1.11" ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def __lowerCAmelCase ( self ) -> float: return 1E-5 @property def __lowerCAmelCase ( self ) -> int: return 12
1
0