>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\\\"kandinsky-community/kandinsky-2-2-prior\\\")\n >>> pipe_prior.to(\\\"cuda\\\")\n >>> prompt = \\\"red cat, 4k photo\\\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\\\"kandinsky-community/kandinsky-2-2-decoder\\\")\n >>> pipe.to(\\\"cuda\\\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\\\"cat.png\\\")\n ```\n\"\"\"\r\n\r\n\r\ndef lowercase\t\t\t(\t\t\t\tA_ , A_ , A_=8 )-> List[Any]:\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n a\t\t\t\t\t\t:\t\tint \t\t\t\t\t\t\t=\t\t\theight // scale_factor**2\r\n if height % scale_factor**2 != 0:\r\n new_height += 1\r\n a\t\t\t\t\t\t:\t\tList[str] \t\t\t\t\t\t\t=\t\t\twidth // scale_factor**2\r\n if width % scale_factor**2 != 0:\r\n new_width += 1\r\n return new_height * scale_factor, new_width * scale_factor\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\t\t_A ( _a ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n def __init__(\t\t\t\t\tself\t\t\t\t: Tuple\t\t\t\t, __UpperCAmelCase\t\t\t\t: UNetaDConditionModel\t\t\t\t, __UpperCAmelCase\t\t\t\t: DDPMScheduler\t\t\t\t, __UpperCAmelCase\t\t\t\t: VQModel\t\t\t\t, ):\r\n super().__init__()\r\n\r\n self.register_modules(\r\n unet=__UpperCAmelCase\t\t\t\t, scheduler=__UpperCAmelCase\t\t\t\t, movq=__UpperCAmelCase\t\t\t\t, )\r\n a\t\t\t\t\t\t:\t\tint \t\t\t\t\t\t\t=\t\t\t2 ** (len(self.movq.config.block_out_channels) - 1)\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: Optional[Any]\t\t\t\t, __UpperCAmelCase\t\t\t\t: Union[str, Any]\t\t\t\t, __UpperCAmelCase\t\t\t\t: Optional[int]\t\t\t\t, __UpperCAmelCase\t\t\t\t: Union[str, Any]\t\t\t\t, __UpperCAmelCase\t\t\t\t: Any\t\t\t\t, __UpperCAmelCase\t\t\t\t: List[str]\t\t\t\t, __UpperCAmelCase\t\t\t\t: List[str]):\r\n if latents is None:\r\n a\t\t\t\t\t\t:\t\tAny \t\t\t\t\t\t\t=\t\t\trandn_tensor(__UpperCAmelCase\t\t\t\t, generator=__UpperCAmelCase\t\t\t\t, device=__UpperCAmelCase\t\t\t\t, dtype=__UpperCAmelCase)\r\n else:\r\n if latents.shape != shape:\r\n raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''')\r\n a\t\t\t\t\t\t:\t\tDict \t\t\t\t\t\t\t=\t\t\tlatents.to(__UpperCAmelCase)\r\n\r\n a\t\t\t\t\t\t:\t\tDict \t\t\t\t\t\t\t=\t\t\tlatents * scheduler.init_noise_sigma\r\n return latents\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: Any\t\t\t\t, __UpperCAmelCase\t\t\t\t: List[Any]=0):\r\n if is_accelerate_available():\r\n from accelerate import cpu_offload\r\n else:\r\n raise ImportError(\"Please install accelerate via `pip install accelerate`\")\r\n\r\n a\t\t\t\t\t\t:\t\tDict \t\t\t\t\t\t\t=\t\t\ttorch.device(f'''cuda:{gpu_id}''')\r\n\r\n a\t\t\t\t\t\t:\t\tAny \t\t\t\t\t\t\t=\t\t\t[\r\n self.unet,\r\n self.movq,\r\n ]\r\n for cpu_offloaded_model in models:\r\n if cpu_offloaded_model is not None:\r\n cpu_offload(__UpperCAmelCase\t\t\t\t, __UpperCAmelCase)\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: List[Any]\t\t\t\t, __UpperCAmelCase\t\t\t\t: Union[str, Any]=0):\r\n if is_accelerate_available() and is_accelerate_version(\">=\"\t\t\t\t, \"0.17.0.dev0\"):\r\n from accelerate import cpu_offload_with_hook\r\n else:\r\n raise ImportError(\"`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.\")\r\n\r\n a\t\t\t\t\t\t:\t\tList[Any] \t\t\t\t\t\t\t=\t\t\ttorch.device(f'''cuda:{gpu_id}''')\r\n\r\n if self.device.type != \"cpu\":\r\n self.to(\"cpu\"\t\t\t\t, silence_dtype_warnings=__UpperCAmelCase)\r\n torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)\r\n\r\n a\t\t\t\t\t\t:\t\tAny \t\t\t\t\t\t\t=\t\t\tNone\r\n for cpu_offloaded_model in [self.unet, self.movq]:\r\n a\t\t\t\t\t\t\t,\t\ta\t\t\t\t\t\t:\t\tTuple \t\t\t\t\t\t\t=\t\t\tcpu_offload_with_hook(__UpperCAmelCase\t\t\t\t, __UpperCAmelCase\t\t\t\t, prev_module_hook=__UpperCAmelCase)\r\n\r\n # We'll offload the last model manually.\r\n a\t\t\t\t\t\t:\t\tDict \t\t\t\t\t\t\t=\t\t\thook\r\n\r\n\r\n\r\n\r\n\r\n\r\n @property\r\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: Optional[int]):\r\n if not hasattr(self.unet\t\t\t\t, \"_hf_hook\"):\r\n return self.device\r\n for module in self.unet.modules():\r\n if (\r\n hasattr(__UpperCAmelCase\t\t\t\t, \"_hf_hook\")\r\n and hasattr(module._hf_hook\t\t\t\t, \"execution_device\")\r\n and module._hf_hook.execution_device is not None\r\n ):\r\n return torch.device(module._hf_hook.execution_device)\r\n return self.device\r\n\r\n\r\n\r\n\r\n\r\n\r\n @torch.no_grad()\r\n @replace_example_docstring(__UpperCAmelCase)\r\n def __call__(\t\t\t\t\tself\t\t\t\t: Optional[Any]\t\t\t\t, __UpperCAmelCase\t\t\t\t: Union[torch.FloatTensor, List[torch.FloatTensor]]\t\t\t\t, __UpperCAmelCase\t\t\t\t: Union[torch.FloatTensor, List[torch.FloatTensor]]\t\t\t\t, __UpperCAmelCase\t\t\t\t: int = 512\t\t\t\t, __UpperCAmelCase\t\t\t\t: int = 512\t\t\t\t, __UpperCAmelCase\t\t\t\t: int = 100\t\t\t\t, __UpperCAmelCase\t\t\t\t: float = 4.0\t\t\t\t, __UpperCAmelCase\t\t\t\t: int = 1\t\t\t\t, __UpperCAmelCase\t\t\t\t: Optional[Union[torch.Generator, List[torch.Generator]]] = None\t\t\t\t, __UpperCAmelCase\t\t\t\t: Optional[torch.FloatTensor] = None\t\t\t\t, __UpperCAmelCase\t\t\t\t: Optional[str] = \"pil\"\t\t\t\t, __UpperCAmelCase\t\t\t\t: bool = True\t\t\t\t, ):\r\n a\t\t\t\t\t\t:\t\tOptional[int] \t\t\t\t\t\t\t=\t\t\tself._execution_device\r\n\r\n a\t\t\t\t\t\t:\t\tint \t\t\t\t\t\t\t=\t\t\tguidance_scale > 1.0\r\n\r\n if isinstance(__UpperCAmelCase\t\t\t\t, __UpperCAmelCase):\r\n a\t\t\t\t\t\t:\t\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\ttorch.cat(__UpperCAmelCase\t\t\t\t, dim=0)\r\n a\t\t\t\t\t\t:\t\tint \t\t\t\t\t\t\t=\t\t\timage_embeds.shape[0] * num_images_per_prompt\r\n if isinstance(__UpperCAmelCase\t\t\t\t, __UpperCAmelCase):\r\n a\t\t\t\t\t\t:\t\tList[str] \t\t\t\t\t\t\t=\t\t\ttorch.cat(__UpperCAmelCase\t\t\t\t, dim=0)\r\n\r\n if do_classifier_free_guidance:\r\n a\t\t\t\t\t\t:\t\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\timage_embeds.repeat_interleave(__UpperCAmelCase\t\t\t\t, dim=0)\r\n a\t\t\t\t\t\t:\t\tTuple \t\t\t\t\t\t\t=\t\t\tnegative_image_embeds.repeat_interleave(__UpperCAmelCase\t\t\t\t, dim=0)\r\n\r\n a\t\t\t\t\t\t:\t\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\ttorch.cat([negative_image_embeds, image_embeds]\t\t\t\t, dim=0).to(dtype=self.unet.dtype\t\t\t\t, device=__UpperCAmelCase)\r\n\r\n self.scheduler.set_timesteps(__UpperCAmelCase\t\t\t\t, device=__UpperCAmelCase)\r\n a\t\t\t\t\t\t:\t\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\tself.scheduler.timesteps\r\n\r\n a\t\t\t\t\t\t:\t\tAny \t\t\t\t\t\t\t=\t\t\tself.unet.config.in_channels\r\n\r\n a\t\t\t\t\t\t\t,\t\ta\t\t\t\t\t\t:\t\tstr \t\t\t\t\t\t\t=\t\t\tdownscale_height_and_width(__UpperCAmelCase\t\t\t\t, __UpperCAmelCase\t\t\t\t, self.movq_scale_factor)\r\n\r\n # create initial latent\r\n a\t\t\t\t\t\t:\t\tAny \t\t\t\t\t\t\t=\t\t\tself.prepare_latents(\r\n (batch_size, num_channels_latents, height, width)\t\t\t\t, image_embeds.dtype\t\t\t\t, __UpperCAmelCase\t\t\t\t, __UpperCAmelCase\t\t\t\t, __UpperCAmelCase\t\t\t\t, self.scheduler\t\t\t\t, )\r\n\r\n for i, t in enumerate(self.progress_bar(__UpperCAmelCase)):\r\n # expand the latents if we are doing classifier free guidance\r\n a\t\t\t\t\t\t:\t\tstr \t\t\t\t\t\t\t=\t\t\ttorch.cat([latents] * 2) if do_classifier_free_guidance else latents\r\n\r\n a\t\t\t\t\t\t:\t\tDict \t\t\t\t\t\t\t=\t\t\t{\"image_embeds\": image_embeds}\r\n a\t\t\t\t\t\t:\t\tTuple \t\t\t\t\t\t\t=\t\t\tself.unet(\r\n sample=__UpperCAmelCase\t\t\t\t, timestep=__UpperCAmelCase\t\t\t\t, encoder_hidden_states=__UpperCAmelCase\t\t\t\t, added_cond_kwargs=__UpperCAmelCase\t\t\t\t, return_dict=__UpperCAmelCase\t\t\t\t, )[0]\r\n\r\n if do_classifier_free_guidance:\r\n a\t\t\t\t\t\t\t,\t\ta\t\t\t\t\t\t:\t\tint \t\t\t\t\t\t\t=\t\t\tnoise_pred.split(latents.shape[1]\t\t\t\t, dim=1)\r\n a\t\t\t\t\t\t\t,\t\ta\t\t\t\t\t\t:\t\tAny \t\t\t\t\t\t\t=\t\t\tnoise_pred.chunk(2)\r\n a\t\t\t\t\t\t\t,\t\ta\t\t\t\t\t\t:\t\tint \t\t\t\t\t\t\t=\t\t\tvariance_pred.chunk(2)\r\n a\t\t\t\t\t\t:\t\tAny \t\t\t\t\t\t\t=\t\t\tnoise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\r\n a\t\t\t\t\t\t:\t\tstr \t\t\t\t\t\t\t=\t\t\ttorch.cat([noise_pred, variance_pred_text]\t\t\t\t, dim=1)\r\n\r\n if not (\r\n hasattr(self.scheduler.config\t\t\t\t, \"variance_type\")\r\n and self.scheduler.config.variance_type in [\"learned\", \"learned_range\"]\r\n ):\r\n a\t\t\t\t\t\t\t,\t\ta\t\t\t\t\t\t:\t\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\tnoise_pred.split(latents.shape[1]\t\t\t\t, dim=1)\r\n\r\n # compute the previous noisy sample x_t -> x_t-1\r\n a\t\t\t\t\t\t:\t\tAny \t\t\t\t\t\t\t=\t\t\tself.scheduler.step(\r\n __UpperCAmelCase\t\t\t\t, __UpperCAmelCase\t\t\t\t, __UpperCAmelCase\t\t\t\t, generator=__UpperCAmelCase\t\t\t\t, )[0]\r\n # post-processing\r\n a\t\t\t\t\t\t:\t\tTuple \t\t\t\t\t\t\t=\t\t\tself.movq.decode(__UpperCAmelCase\t\t\t\t, force_not_quantize=__UpperCAmelCase)[\"sample\"]\r\n\r\n if output_type not in [\"pt\", \"np\", \"pil\"]:\r\n raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''')\r\n\r\n if output_type in [\"np\", \"pil\"]:\r\n a\t\t\t\t\t\t:\t\tOptional[Any] \t\t\t\t\t\t\t=\t\t\timage * 0.5 + 0.5\r\n a\t\t\t\t\t\t:\t\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\timage.clamp(0\t\t\t\t, 1)\r\n a\t\t\t\t\t\t:\t\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\timage.cpu().permute(0\t\t\t\t, 2\t\t\t\t, 3\t\t\t\t, 1).float().numpy()\r\n\r\n if output_type == \"pil\":\r\n a\t\t\t\t\t\t:\t\tOptional[int] \t\t\t\t\t\t\t=\t\t\tself.numpy_to_pil(__UpperCAmelCase)\r\n\r\n if not return_dict:\r\n return (image,)\r\n\r\n return ImagePipelineOutput(images=__UpperCAmelCase)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":40,"string":"40"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport itertools\r\nfrom dataclasses import dataclass\r\nfrom typing import List, Optional\r\n\r\nimport pyarrow as pa\r\nimport pyarrow.parquet as pq\r\n\r\nimport datasets\r\nfrom datasets.table import table_cast\r\n\r\n\r\n__lowercase \t\t\t\t\t= datasets.utils.logging.get_logger(__name__)\r\n\r\n\r\n\r\n\r\n\r\n\r\n@dataclass\r\nclass \t\t\t\t\t\t_A ( datasets.BuilderConfig ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n UpperCAmelCase :\t\t\t\t\t\t\tint\t= 1_0_0_0_0\r\n UpperCAmelCase :\t\t\t\t\t\t\tOptional[List[str]]\t= None\r\n UpperCAmelCase :\t\t\t\t\t\t\tOptional[datasets.Features]\t= None\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\t\t_A ( datasets.ArrowBasedBuilder ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n UpperCAmelCase :\t\t\t\t\t\t\tstr\t= ParquetConfig\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: Tuple):\r\n return datasets.DatasetInfo(features=self.config.features)\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: List[Any]\t\t\t\t, __UpperCAmelCase\t\t\t\t: str):\r\n if not self.config.data_files:\r\n raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''')\r\n a\t\t\t\t\t\t:\t\tstr \t\t\t\t\t\t\t=\t\t\tdl_manager.download_and_extract(self.config.data_files)\r\n if isinstance(__UpperCAmelCase\t\t\t\t, (str, list, tuple)):\r\n a\t\t\t\t\t\t:\t\tDict \t\t\t\t\t\t\t=\t\t\tdata_files\r\n if isinstance(__UpperCAmelCase\t\t\t\t, __UpperCAmelCase):\r\n a\t\t\t\t\t\t:\t\tstr \t\t\t\t\t\t\t=\t\t\t[files]\r\n # Use `dl_manager.iter_files` to skip hidden files in an extracted archive\r\n a\t\t\t\t\t\t:\t\tList[Any] \t\t\t\t\t\t\t=\t\t\t[dl_manager.iter_files(__UpperCAmelCase) for file in files]\r\n return [datasets.SplitGenerator(name=datasets.Split.TRAIN\t\t\t\t, gen_kwargs={\"files\": files})]\r\n a\t\t\t\t\t\t:\t\tDict \t\t\t\t\t\t\t=\t\t\t[]\r\n for split_name, files in data_files.items():\r\n if isinstance(__UpperCAmelCase\t\t\t\t, __UpperCAmelCase):\r\n a\t\t\t\t\t\t:\t\tOptional[int] \t\t\t\t\t\t\t=\t\t\t[files]\r\n # Use `dl_manager.iter_files` to skip hidden files in an extracted archive\r\n a\t\t\t\t\t\t:\t\tTuple \t\t\t\t\t\t\t=\t\t\t[dl_manager.iter_files(__UpperCAmelCase) for file in files]\r\n # Infer features is they are stoed in the arrow schema\r\n if self.info.features is None:\r\n for file in itertools.chain.from_iterable(__UpperCAmelCase):\r\n with open(__UpperCAmelCase\t\t\t\t, \"rb\") as f:\r\n a\t\t\t\t\t\t:\t\tTuple \t\t\t\t\t\t\t=\t\t\tdatasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase))\r\n break\r\n splits.append(datasets.SplitGenerator(name=__UpperCAmelCase\t\t\t\t, gen_kwargs={\"files\": files}))\r\n return splits\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: List[str]\t\t\t\t, __UpperCAmelCase\t\t\t\t: pa.Table):\r\n if self.info.features is not None:\r\n # more expensive cast to support nested features with keys in a different order\r\n # allows str <-> int/float or str to Audio for example\r\n a\t\t\t\t\t\t:\t\tOptional[int] \t\t\t\t\t\t\t=\t\t\ttable_cast(__UpperCAmelCase\t\t\t\t, self.info.features.arrow_schema)\r\n return pa_table\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: Union[str, Any]\t\t\t\t, __UpperCAmelCase\t\t\t\t: int):\r\n a\t\t\t\t\t\t:\t\tTuple \t\t\t\t\t\t\t=\t\t\tself.info.features.arrow_schema if self.info.features is not None else None\r\n if self.info.features is not None and self.config.columns is not None:\r\n if sorted(field.name for field in schema) != sorted(self.config.columns):\r\n raise ValueError(\r\n f'''Tried to load parquet data with columns \\'{self.config.columns}\\' with mismatching features \\'{self.info.features}\\'''')\r\n for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)):\r\n with open(__UpperCAmelCase\t\t\t\t, \"rb\") as f:\r\n a\t\t\t\t\t\t:\t\tTuple \t\t\t\t\t\t\t=\t\t\tpq.ParquetFile(__UpperCAmelCase)\r\n try:\r\n for batch_idx, record_batch in enumerate(\r\n parquet_file.iter_batches(batch_size=self.config.batch_size\t\t\t\t, columns=self.config.columns)):\r\n a\t\t\t\t\t\t:\t\tOptional[Any] \t\t\t\t\t\t\t=\t\t\tpa.Table.from_batches([record_batch])\r\n # Uncomment for debugging (will print the Arrow table size and elements)\r\n # logger.warning(f\"pa_table: {pa_table} num rows: {pa_table.num_rows}\")\r\n # logger.warning('\\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))\r\n yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase)\r\n except ValueError as e:\r\n logger.error(f'''Failed to read file \\'{file}\\' with error {type(__UpperCAmelCase)}: {e}''')\r\n raise\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":40,"string":"40"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":809,"cells":{"code":{"kind":"string","value":"\r'''simple docstring'''\r\r\r\rimport re\rimport string\rfrom collections import Counter\r\rimport sacrebleu\rimport sacremoses\rfrom packaging import version\r\rimport datasets\r\r\r_lowercase =\t\t\"\"\"\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \\\"A Call for Clarity in Reporting {BLEU} Scores\\\",\n author = \\\"Post, Matt\\\",\n booktitle = \\\"Proceedings of the Third Conference on Machine Translation: Research Papers\\\",\n month = oct,\n year = \\\"2018\\\",\n address = \\\"Belgium, Brussels\\\",\n publisher = \\\"Association for Computational Linguistics\\\",\n url = \\\"https://www.aclweb.org/anthology/W18-6319\\\",\n pages = \\\"186--191\\\",\n}\n\"\"\"\r\r_lowercase =\t\t\"\"\"\\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n\"\"\"\r\r\r_lowercase =\t\t\"\"\"\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\\\"About 95 species are currently accepted .\\\"]\n >>> predictions=[\\\"About 95 you now get in .\\\"]\n >>> references=[[\\\"About 95 species are currently known .\\\"]]\n >>> wiki_split = datasets.load_metric(\\\"wiki_split\\\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n\"\"\"\r\r\rdef A\t\t\t\t(__lowerCamelCase\t\t\t\t:Dict\t\t):\r\r\r\r\r\r\r\r\t\t\t\t\t\tdef remove_articles(__lowerCamelCase\t\t\t\t:Optional[Any]\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tre.compile(r\"\"\"\\b(a|an|the)\\b\"\"\"\t\t\t, re.UNICODE\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\treturn re.sub(UpperCamelCase__\t\t\t, \"\"\" \"\"\"\t\t\t, UpperCamelCase__\t\t)\r\r\t\t\t\t\t\tdef white_space_fix(__lowerCamelCase\t\t\t\t:Any\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\treturn \" \".join(text.split()\t\t)\r\r\t\t\t\t\t\tdef remove_punc(__lowerCamelCase\t\t\t\t:int\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tset(string.punctuation\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\treturn \"\".join(ch for ch in text if ch not in exclude\t\t)\r\r\t\t\t\t\t\tdef lower(__lowerCamelCase\t\t\t\t:Tuple\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\treturn text.lower()\r\r\t\t\t\t\t\treturn white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase__\t\t)\t\t)\t\t)\t\t)\r\r\rdef A\t\t\t\t(__lowerCamelCase\t\t\t\t:Optional[Any]\t\t\t, __lowerCamelCase\t\t\t\t:Dict\t\t):\r\t\t\t\t\t\treturn int(normalize_answer(UpperCamelCase__\t\t) == normalize_answer(UpperCamelCase__\t\t)\t\t)\r\r\rdef A\t\t\t\t(__lowerCamelCase\t\t\t\t:Any\t\t\t, __lowerCamelCase\t\t\t\t:Union[str, Any]\t\t):\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t[any(compute_exact(UpperCamelCase__\t\t\t, UpperCamelCase__\t\t) for ref in refs\t\t) for pred, refs in zip(UpperCamelCase__\t\t\t, UpperCamelCase__\t\t)]\r\t\t\t\t\t\treturn (sum(UpperCamelCase__\t\t) / len(UpperCamelCase__\t\t)) * 100\r\r\rdef A\t\t\t\t(__lowerCamelCase\t\t\t\t:Union[str, Any]\t\t\t, __lowerCamelCase\t\t\t\t:Any\t\t\t, __lowerCamelCase\t\t\t\t:str\t\t\t, __lowerCamelCase\t\t\t\t:str\t\t):\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t[rgram for rgrams in rgramslist for rgram in rgrams]\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tCounter(UpperCamelCase__\t\t)\r\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tCounter(UpperCamelCase__\t\t)\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tCounter()\r\t\t\t\t\t\tfor sgram, scount in sgramcounter.items():\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tscount * numref\r\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tCounter(UpperCamelCase__\t\t)\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tCounter()\r\t\t\t\t\t\tfor cgram, ccount in cgramcounter.items():\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tccount * numref\r\r\t\t\t\t\t\t# KEEP\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tsgramcounter_rep & cgramcounter_rep\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tkeepgramcounter_rep & rgramcounter\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tsgramcounter_rep & rgramcounter\r\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t0\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t0\r\t\t\t\t\t\tfor keepgram in keepgramcountergood_rep:\r\t\t\t\t\t\t\t\t\t\t\t\tkeeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]\r\t\t\t\t\t\t\t\t\t\t\t\t# Fix an alleged bug [2] in the keep score computation.\r\t\t\t\t\t\t\t\t\t\t\t\t# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]\r\t\t\t\t\t\t\t\t\t\t\t\tkeeptmpscorea += keepgramcountergood_rep[keepgram]\r\t\t\t\t\t\t# Define 0/0=1 instead of 0 to give higher scores for predictions that match\r\t\t\t\t\t\t# a target exactly.\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t1\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t1\r\t\t\t\t\t\tif len(UpperCamelCase__\t\t) > 0:\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tkeeptmpscorea / len(UpperCamelCase__\t\t)\r\t\t\t\t\t\tif len(UpperCamelCase__\t\t) > 0:\r\t\t\t\t\t\t\t\t\t\t\t\t# Fix an alleged bug [2] in the keep score computation.\r\t\t\t\t\t\t\t\t\t\t\t\t# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tkeeptmpscorea / sum(keepgramcounterall_rep.values()\t\t)\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t0\r\t\t\t\t\t\tif keepscore_precision > 0 or keepscore_recall > 0:\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)\r\r\t\t\t\t\t\t# DELETION\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tsgramcounter_rep - cgramcounter_rep\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tdelgramcounter_rep - rgramcounter\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tsgramcounter_rep - rgramcounter\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t0\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t0\r\t\t\t\t\t\tfor delgram in delgramcountergood_rep:\r\t\t\t\t\t\t\t\t\t\t\t\tdeltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]\r\t\t\t\t\t\t\t\t\t\t\t\tdeltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]\r\t\t\t\t\t\t# Define 0/0=1 instead of 0 to give higher scores for predictions that match\r\t\t\t\t\t\t# a target exactly.\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t1\r\t\t\t\t\t\tif len(UpperCamelCase__\t\t) > 0:\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tdeltmpscorea / len(UpperCamelCase__\t\t)\r\r\t\t\t\t\t\t# ADDITION\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tset(UpperCamelCase__\t\t) - set(UpperCamelCase__\t\t)\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tset(UpperCamelCase__\t\t) & set(UpperCamelCase__\t\t)\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tset(UpperCamelCase__\t\t) - set(UpperCamelCase__\t\t)\r\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t0\r\t\t\t\t\t\tfor addgram in addgramcountergood:\r\t\t\t\t\t\t\t\t\t\t\t\taddtmpscore += 1\r\r\t\t\t\t\t\t# Define 0/0=1 instead of 0 to give higher scores for predictions that match\r\t\t\t\t\t\t# a target exactly.\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t1\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t1\r\t\t\t\t\t\tif len(UpperCamelCase__\t\t) > 0:\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\taddtmpscore / len(UpperCamelCase__\t\t)\r\t\t\t\t\t\tif len(UpperCamelCase__\t\t) > 0:\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\taddtmpscore / len(UpperCamelCase__\t\t)\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t0\r\t\t\t\t\t\tif addscore_precision > 0 or addscore_recall > 0:\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)\r\r\t\t\t\t\t\treturn (keepscore, delscore_precision, addscore)\r\r\rdef A\t\t\t\t(__lowerCamelCase\t\t\t\t:Any\t\t\t, __lowerCamelCase\t\t\t\t:List[Any]\t\t\t, __lowerCamelCase\t\t\t\t:Optional[int]\t\t):\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tlen(UpperCamelCase__\t\t)\r\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tssent.split(\"\"\" \"\"\"\t\t)\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tcsent.split(\"\"\" \"\"\"\t\t)\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t[]\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t[]\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t[]\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t[]\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t[]\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t[]\r\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t[]\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t[]\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t[]\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t[]\r\t\t\t\t\t\tfor rsent in rsents:\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\trsent.split(\"\"\" \"\"\"\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t[]\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t[]\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t[]\r\t\t\t\t\t\t\t\t\t\t\t\tragramslist.append(UpperCamelCase__\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tfor i in range(0\t\t\t, len(UpperCamelCase__\t\t) - 1\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif i < len(UpperCamelCase__\t\t) - 1:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tragrams[i] + \"\"\" \"\"\" + ragrams[i + 1]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tragrams.append(UpperCamelCase__\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif i < len(UpperCamelCase__\t\t) - 2:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tragrams[i] + \"\"\" \"\"\" + ragrams[i + 1] + \"\"\" \"\"\" + ragrams[i + 2]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tragrams.append(UpperCamelCase__\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif i < len(UpperCamelCase__\t\t) - 3:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tragrams[i] + \"\"\" \"\"\" + ragrams[i + 1] + \"\"\" \"\"\" + ragrams[i + 2] + \"\"\" \"\"\" + ragrams[i + 3]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tragrams.append(UpperCamelCase__\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tragramslist.append(UpperCamelCase__\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tragramslist.append(UpperCamelCase__\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tragramslist.append(UpperCamelCase__\t\t)\r\r\t\t\t\t\t\tfor i in range(0\t\t\t, len(UpperCamelCase__\t\t) - 1\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\tif i < len(UpperCamelCase__\t\t) - 1:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tsagrams[i] + \"\"\" \"\"\" + sagrams[i + 1]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsagrams.append(UpperCamelCase__\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tif i < len(UpperCamelCase__\t\t) - 2:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tsagrams[i] + \"\"\" \"\"\" + sagrams[i + 1] + \"\"\" \"\"\" + sagrams[i + 2]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsagrams.append(UpperCamelCase__\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tif i < len(UpperCamelCase__\t\t) - 3:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tsagrams[i] + \"\"\" \"\"\" + sagrams[i + 1] + \"\"\" \"\"\" + sagrams[i + 2] + \"\"\" \"\"\" + sagrams[i + 3]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsagrams.append(UpperCamelCase__\t\t)\r\r\t\t\t\t\t\tfor i in range(0\t\t\t, len(UpperCamelCase__\t\t) - 1\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\tif i < len(UpperCamelCase__\t\t) - 1:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tcagrams[i] + \"\"\" \"\"\" + cagrams[i + 1]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcagrams.append(UpperCamelCase__\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tif i < len(UpperCamelCase__\t\t) - 2:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tcagrams[i] + \"\"\" \"\"\" + cagrams[i + 1] + \"\"\" \"\"\" + cagrams[i + 2]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcagrams.append(UpperCamelCase__\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tif i < len(UpperCamelCase__\t\t) - 3:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tcagrams[i] + \"\"\" \"\"\" + cagrams[i + 1] + \"\"\" \"\"\" + cagrams[i + 2] + \"\"\" \"\"\" + cagrams[i + 3]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcagrams.append(UpperCamelCase__\t\t)\r\r\t\t\t\t\t\t((_lowerCAmelCase)\t\t\t\t\t\t, (_lowerCAmelCase)\t\t\t\t\t\t, (_lowerCAmelCase)) \t\t\t\t\t\t=\t\t\tSARIngram(UpperCamelCase__\t\t\t, UpperCamelCase__\t\t\t, UpperCamelCase__\t\t\t, UpperCamelCase__\t\t)\r\t\t\t\t\t\t((_lowerCAmelCase)\t\t\t\t\t\t, (_lowerCAmelCase)\t\t\t\t\t\t, (_lowerCAmelCase)) \t\t\t\t\t\t=\t\t\tSARIngram(UpperCamelCase__\t\t\t, UpperCamelCase__\t\t\t, UpperCamelCase__\t\t\t, UpperCamelCase__\t\t)\r\t\t\t\t\t\t((_lowerCAmelCase)\t\t\t\t\t\t, (_lowerCAmelCase)\t\t\t\t\t\t, (_lowerCAmelCase)) \t\t\t\t\t\t=\t\t\tSARIngram(UpperCamelCase__\t\t\t, UpperCamelCase__\t\t\t, UpperCamelCase__\t\t\t, UpperCamelCase__\t\t)\r\t\t\t\t\t\t((_lowerCAmelCase)\t\t\t\t\t\t, (_lowerCAmelCase)\t\t\t\t\t\t, (_lowerCAmelCase)) \t\t\t\t\t\t=\t\t\tSARIngram(UpperCamelCase__\t\t\t, UpperCamelCase__\t\t\t, UpperCamelCase__\t\t\t, UpperCamelCase__\t\t)\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tsum([keepascore, keepascore, keepascore, keepascore]\t\t) / 4\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tsum([delascore, delascore, delascore, delascore]\t\t) / 4\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tsum([addascore, addascore, addascore, addascore]\t\t) / 4\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t(avgkeepscore + avgdelscore + avgaddscore) / 3\r\t\t\t\t\t\treturn finalscore\r\r\rdef A\t\t\t\t(__lowerCamelCase\t\t\t\t:List[str]\t\t\t, __lowerCamelCase\t\t\t\t:bool = True\t\t\t, __lowerCamelCase\t\t\t\t:str = \"13a\"\t\t\t, __lowerCamelCase\t\t\t\t:bool = True\t\t):\r\r\r\r\r\r\r\r\t\t\t\t\t\tif lowercase:\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tsentence.lower()\r\r\t\t\t\t\t\tif tokenizer in [\"13a\", \"intl\"]:\r\t\t\t\t\t\t\t\t\t\t\t\tif version.parse(sacrebleu.__version__\t\t).major >= 2:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tsacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase__\t\t)()(UpperCamelCase__\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tsacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase__\t\t)\r\t\t\t\t\t\telif tokenizer == \"moses\":\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tsacremoses.MosesTokenizer().tokenize(UpperCamelCase__\t\t\t, return_str=UpperCamelCase__\t\t\t, escape=UpperCamelCase__\t\t)\r\t\t\t\t\t\telif tokenizer == \"penn\":\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tsacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase__\t\t\t, return_str=UpperCamelCase__\t\t)\r\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tsentence\r\r\t\t\t\t\t\tif not return_str:\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tnormalized_sent.split()\r\r\t\t\t\t\t\treturn normalized_sent\r\r\rdef A\t\t\t\t(__lowerCamelCase\t\t\t\t:int\t\t\t, __lowerCamelCase\t\t\t\t:Tuple\t\t\t, __lowerCamelCase\t\t\t\t:Union[str, Any]\t\t):\r\r\r\r\r\r\r\r\t\t\t\t\t\tif not (len(UpperCamelCase__\t\t) == len(UpperCamelCase__\t\t) == len(UpperCamelCase__\t\t)):\r\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"Sources length must match predictions and references lengths.\"\"\"\t\t)\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t0\r\t\t\t\t\t\tfor src, pred, refs in zip(UpperCamelCase__\t\t\t, UpperCamelCase__\t\t\t, UpperCamelCase__\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\tsari_score += SARIsent(normalize(UpperCamelCase__\t\t)\t\t\t, normalize(UpperCamelCase__\t\t)\t\t\t, [normalize(UpperCamelCase__\t\t) for sent in refs]\t\t)\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tsari_score / len(UpperCamelCase__\t\t)\r\t\t\t\t\t\treturn 100 * sari_score\r\r\rdef A\t\t\t\t(__lowerCamelCase\t\t\t\t:Union[str, Any]\t\t\t, __lowerCamelCase\t\t\t\t:int\t\t\t, __lowerCamelCase\t\t\t\t:Union[str, Any]=\"exp\"\t\t\t, __lowerCamelCase\t\t\t\t:List[str]=None\t\t\t, __lowerCamelCase\t\t\t\t:List[str]=False\t\t\t, __lowerCamelCase\t\t\t\t:Dict=False\t\t\t, __lowerCamelCase\t\t\t\t:str=False\t\t\t, ):\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tlen(references[0]\t\t)\r\t\t\t\t\t\tif any(len(UpperCamelCase__\t\t) != references_per_prediction for refs in references\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"Sacrebleu requires the same number of references for each prediction\"\"\"\t\t)\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t[[refs[i] for refs in references] for i in range(UpperCamelCase__\t\t)]\r\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tsacrebleu.corpus_bleu(\r\t\t\t\t\t\t UpperCamelCase__\t\t\t, UpperCamelCase__\t\t\t, smooth_method=UpperCamelCase__\t\t\t, smooth_value=UpperCamelCase__\t\t\t, force=UpperCamelCase__\t\t\t, lowercase=UpperCamelCase__\t\t\t, use_effective_order=UpperCamelCase__\t\t\t, )\r\t\t\t\t\t\treturn output.score\r\r\r\r\r\r@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION\t,\t\t\t\t\t\t\t_KWARGS_DESCRIPTION\t\t\t)\rclass UpperCAmelCase_\t\t\t\t\t(\t\tdatasets.Metric\t\t\t):\r\r\r\r\r\r\r\t\t\t\t\t\t'''simple docstring'''\r\r\r\r\r\r\r\t\t\t\t\t\tdef _lowercase ( self\t\t\t\t):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\t\t\t\t\t\t\t\t\t\t\t\treturn datasets.MetricInfo(\r\t\t\t\t\t\t\t\t\t\t\t\t description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(\r\t\t\t\t\t\t\t\t\t\t\t\t {\r\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"predictions\"\"\": datasets.Value(\"\"\"string\"\"\" , id=\"\"\"sequence\"\"\"\t\t\t\t),\r\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"references\"\"\": datasets.Sequence(datasets.Value(\"\"\"string\"\"\" , id=\"\"\"sequence\"\"\"\t\t\t\t) , id=\"\"\"references\"\"\"\t\t\t\t),\r\t\t\t\t\t\t\t\t\t\t\t\t }\t\t\t\t) , codebase_urls=[\r\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py\"\"\",\r\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"https://github.com/cocoxu/simplification/blob/master/SARI.py\"\"\",\r\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py\"\"\",\r\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"https://github.com/mjpost/sacreBLEU\"\"\",\r\t\t\t\t\t\t\t\t\t\t\t\t ] , reference_urls=[\r\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"https://www.aclweb.org/anthology/Q16-1029.pdf\"\"\",\r\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"https://github.com/mjpost/sacreBLEU\"\"\",\r\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"https://en.wikipedia.org/wiki/BLEU\"\"\",\r\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213\"\"\",\r\t\t\t\t\t\t\t\t\t\t\t\t ] , )\r\r\r\r\r\t\t\t\t\t\tdef _lowercase ( self , _lowercase , _lowercase , _lowercase\t\t\t\t):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t{}\r\t\t\t\t\t\t\t\t\t\t\t\tresult.update({\"\"\"sari\"\"\": compute_sari(sources=_a , predictions=_a , references=_a\t\t\t\t)}\t\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tresult.update({\"\"\"sacrebleu\"\"\": compute_sacrebleu(predictions=_a , references=_a\t\t\t\t)}\t\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tresult.update({\"\"\"exact\"\"\": compute_em(predictions=_a , references=_a\t\t\t\t)}\t\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\treturn result\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":363,"string":"363"},"style_context":{"kind":"string","value":"\r'''simple docstring'''\r\r\r\rfrom collections import OrderedDict\rfrom typing import Mapping\r\rfrom ...configuration_utils import PretrainedConfig\rfrom ...onnx import OnnxConfig\rfrom ...utils import logging\r\r\r_lowercase =\t\tlogging.get_logger(__name__)\r\r_lowercase =\t\t{\r \"\"\"YituTech/conv-bert-base\"\"\": \"\"\"https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json\"\"\",\r \"\"\"YituTech/conv-bert-medium-small\"\"\": (\r \"\"\"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json\"\"\"\r ),\r \"\"\"YituTech/conv-bert-small\"\"\": \"\"\"https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json\"\"\",\r # See all ConvBERT models at https://huggingface.co/models?filter=convbert\r}\r\r\r\r\rclass UpperCAmelCase_\t\t\t\t\t(\t\t_SCREAMING_SNAKE_CASE\t\t\t):\r\r\r\r\r\r\r\t\t\t\t\t\t'''simple docstring'''\r\r\r\r\r\r\r\t\t\t\t\t\t_lowercase\t\t:\t\t\t\tList[Any]\t\t\t\t\t\t\t\t\t= '''convbert'''\r\r\r\r\r\t\t\t\t\t\tdef __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase=\"gelu\" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase=768 , _lowercase=2 , _lowercase=9 , _lowercase=1 , _lowercase=None , **_lowercase , ):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\t\t\t\t\t\t\t\t\t\t\t\tsuper().__init__(\r\t\t\t\t\t\t\t\t\t\t\t\t pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )\r\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tvocab_size\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\thidden_size\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tnum_hidden_layers\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tnum_attention_heads\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tintermediate_size\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\thidden_act\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\thidden_dropout_prob\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tattention_probs_dropout_prob\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tmax_position_embeddings\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\ttype_vocab_size\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tinitializer_range\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tlayer_norm_eps\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tembedding_size\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\thead_ratio\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tconv_kernel_size\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tnum_groups\r\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\tclassifier_dropout\r\r\r\r\r\r\rclass UpperCAmelCase_\t\t\t\t\t(\t\t_SCREAMING_SNAKE_CASE\t\t\t):\r\r\r\r\r\r\r\t\t\t\t\t\t'''simple docstring'''\r\r\r\r\r\r\r\t\t\t\t\t\t@property\r\t\t\t\t\t\tdef _lowercase ( self\t\t\t\t):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\t\t\t\t\t\t\t\t\t\t\t\tif self.task == \"multiple-choice\":\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t{0: \"\"\"batch\"\"\", 1: \"\"\"choice\"\"\", 2: \"\"\"sequence\"\"\"}\r\t\t\t\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase \t\t\t\t\t\t=\t\t\t{0: \"\"\"batch\"\"\", 1: \"\"\"sequence\"\"\"}\r\t\t\t\t\t\t\t\t\t\t\t\treturn OrderedDict(\r\t\t\t\t\t\t\t\t\t\t\t\t [\r\t\t\t\t\t\t\t\t\t\t\t\t (\"\"\"input_ids\"\"\", dynamic_axis),\r\t\t\t\t\t\t\t\t\t\t\t\t (\"\"\"attention_mask\"\"\", dynamic_axis),\r\t\t\t\t\t\t\t\t\t\t\t\t (\"\"\"token_type_ids\"\"\", dynamic_axis),\r\t\t\t\t\t\t\t\t\t\t\t\t ]\t\t\t\t)\r"},"style_context_codestyle":{"kind":"number","value":229,"string":"229"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":810,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\n\n\"\"\"simple docstring\"\"\"\nfrom typing import TYPE_CHECKING\n\nfrom ...utils import (\n OptionalDependencyNotAvailable,\n _LazyModule,\n is_flax_available,\n is_tensorflow_text_available,\n is_tf_available,\n is_tokenizers_available,\n is_torch_available,\n)\n\n\nlowerCamelCase_\t\t\t\t\t= {\n '''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],\n '''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],\n}\n\ntry:\n\t\t\tif not is_tokenizers_available():\n\t\t\t\t\t\traise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n\t\t\tpass\nelse:\n\t\t\tlowerCamelCase_\t\t\t\t\t= ['''BertTokenizerFast''']\n\ntry:\n\t\t\tif not is_torch_available():\n\t\t\t\t\t\traise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n\t\t\tpass\nelse:\n\t\t\tlowerCamelCase_\t\t\t\t\t= [\n\t\t\t '''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',\n\t\t\t '''BertForMaskedLM''',\n\t\t\t '''BertForMultipleChoice''',\n\t\t\t '''BertForNextSentencePrediction''',\n\t\t\t '''BertForPreTraining''',\n\t\t\t '''BertForQuestionAnswering''',\n\t\t\t '''BertForSequenceClassification''',\n\t\t\t '''BertForTokenClassification''',\n\t\t\t '''BertLayer''',\n\t\t\t '''BertLMHeadModel''',\n\t\t\t '''BertModel''',\n\t\t\t '''BertPreTrainedModel''',\n\t\t\t '''load_tf_weights_in_bert''',\n\t\t\t]\n\ntry:\n\t\t\tif not is_tf_available():\n\t\t\t\t\t\traise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n\t\t\tpass\nelse:\n\t\t\tlowerCamelCase_\t\t\t\t\t= [\n\t\t\t '''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',\n\t\t\t '''TFBertEmbeddings''',\n\t\t\t '''TFBertForMaskedLM''',\n\t\t\t '''TFBertForMultipleChoice''',\n\t\t\t '''TFBertForNextSentencePrediction''',\n\t\t\t '''TFBertForPreTraining''',\n\t\t\t '''TFBertForQuestionAnswering''',\n\t\t\t '''TFBertForSequenceClassification''',\n\t\t\t '''TFBertForTokenClassification''',\n\t\t\t '''TFBertLMHeadModel''',\n\t\t\t '''TFBertMainLayer''',\n\t\t\t '''TFBertModel''',\n\t\t\t '''TFBertPreTrainedModel''',\n\t\t\t]\ntry:\n\t\t\tif not is_tensorflow_text_available():\n\t\t\t\t\t\traise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n\t\t\tpass\nelse:\n\t\t\tlowerCamelCase_\t\t\t\t\t= ['''TFBertTokenizer''']\n\ntry:\n\t\t\tif not is_flax_available():\n\t\t\t\t\t\traise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n\t\t\tpass\nelse:\n\t\t\tlowerCamelCase_\t\t\t\t\t= [\n\t\t\t '''FlaxBertForCausalLM''',\n\t\t\t '''FlaxBertForMaskedLM''',\n\t\t\t '''FlaxBertForMultipleChoice''',\n\t\t\t '''FlaxBertForNextSentencePrediction''',\n\t\t\t '''FlaxBertForPreTraining''',\n\t\t\t '''FlaxBertForQuestionAnswering''',\n\t\t\t '''FlaxBertForSequenceClassification''',\n\t\t\t '''FlaxBertForTokenClassification''',\n\t\t\t '''FlaxBertModel''',\n\t\t\t '''FlaxBertPreTrainedModel''',\n\t\t\t]\n\nif TYPE_CHECKING:\n\t\t\tfrom .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig\n\t\t\tfrom .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer\n\n\t\t\ttry:\n\t\t\t\t\t\tif not is_tokenizers_available():\n\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\n\t\t\texcept OptionalDependencyNotAvailable:\n\t\t\t\t\t\tpass\n\t\t\telse:\n\t\t\t\t\t\tfrom .tokenization_bert_fast import BertTokenizerFast\n\n\t\t\ttry:\n\t\t\t\t\t\tif not is_torch_available():\n\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\n\t\t\texcept OptionalDependencyNotAvailable:\n\t\t\t\t\t\tpass\n\t\t\telse:\n\t\t\t\t\t\tfrom .modeling_bert import (\n\t\t\t\t\t\t BERT_PRETRAINED_MODEL_ARCHIVE_LIST,\n\t\t\t\t\t\t BertForMaskedLM,\n\t\t\t\t\t\t BertForMultipleChoice,\n\t\t\t\t\t\t BertForNextSentencePrediction,\n\t\t\t\t\t\t BertForPreTraining,\n\t\t\t\t\t\t BertForQuestionAnswering,\n\t\t\t\t\t\t BertForSequenceClassification,\n\t\t\t\t\t\t BertForTokenClassification,\n\t\t\t\t\t\t BertLayer,\n\t\t\t\t\t\t BertLMHeadModel,\n\t\t\t\t\t\t BertModel,\n\t\t\t\t\t\t BertPreTrainedModel,\n\t\t\t\t\t\t load_tf_weights_in_bert,\n\t\t\t\t\t\t)\n\n\t\t\ttry:\n\t\t\t\t\t\tif not is_tf_available():\n\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\n\t\t\texcept OptionalDependencyNotAvailable:\n\t\t\t\t\t\tpass\n\t\t\telse:\n\t\t\t\t\t\tfrom .modeling_tf_bert import (\n\t\t\t\t\t\t TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,\n\t\t\t\t\t\t TFBertEmbeddings,\n\t\t\t\t\t\t TFBertForMaskedLM,\n\t\t\t\t\t\t TFBertForMultipleChoice,\n\t\t\t\t\t\t TFBertForNextSentencePrediction,\n\t\t\t\t\t\t TFBertForPreTraining,\n\t\t\t\t\t\t TFBertForQuestionAnswering,\n\t\t\t\t\t\t TFBertForSequenceClassification,\n\t\t\t\t\t\t TFBertForTokenClassification,\n\t\t\t\t\t\t TFBertLMHeadModel,\n\t\t\t\t\t\t TFBertMainLayer,\n\t\t\t\t\t\t TFBertModel,\n\t\t\t\t\t\t TFBertPreTrainedModel,\n\t\t\t\t\t\t)\n\n\t\t\ttry:\n\t\t\t\t\t\tif not is_tensorflow_text_available():\n\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\n\t\t\texcept OptionalDependencyNotAvailable:\n\t\t\t\t\t\tpass\n\t\t\telse:\n\t\t\t\t\t\tfrom .tokenization_bert_tf import TFBertTokenizer\n\n\t\t\ttry:\n\t\t\t\t\t\tif not is_flax_available():\n\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\n\t\t\texcept OptionalDependencyNotAvailable:\n\t\t\t\t\t\tpass\n\t\t\telse:\n\t\t\t\t\t\tfrom .modeling_flax_bert import (\n\t\t\t\t\t\t FlaxBertForCausalLM,\n\t\t\t\t\t\t FlaxBertForMaskedLM,\n\t\t\t\t\t\t FlaxBertForMultipleChoice,\n\t\t\t\t\t\t FlaxBertForNextSentencePrediction,\n\t\t\t\t\t\t FlaxBertForPreTraining,\n\t\t\t\t\t\t FlaxBertForQuestionAnswering,\n\t\t\t\t\t\t FlaxBertForSequenceClassification,\n\t\t\t\t\t\t FlaxBertForTokenClassification,\n\t\t\t\t\t\t FlaxBertModel,\n\t\t\t\t\t\t FlaxBertPreTrainedModel,\n\t\t\t\t\t\t)\n\nelse:\n\t\t\timport sys\n\n\t\t\tlowerCamelCase_\t\t\t\t\t= _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)\n\n\n\n"},"code_codestyle":{"kind":"number","value":268,"string":"268"},"style_context":{"kind":"string","value":"\n\n\n\n\n\n\n\"\"\"simple docstring\"\"\"\nimport copy\nimport os\n\nimport cva\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\n\n\n\n\n\nclass \t\t\tUpperCamelCase_ :\n\n\n\n\n\n\tdef __init__( self\t\t\t\t\t: str\t\t\t\t\t\t)\t\t\t\t\t\t->\t\t\tDict:\n\t\t\t\tUpperCAmelCase_ : List[Any]\t\t\t =\t\t\t\t\t\"\"\n\t\t\t\tUpperCAmelCase_ : int\t\t\t =\t\t\t\t\t\"\"\n\t\t\t\tUpperCAmelCase_ : Dict\t\t\t =\t\t\t\t\t[]\n\t\t\t\tUpperCAmelCase_ : int\t\t\t =\t\t\t\t\t0\n\t\t\t\tUpperCAmelCase_ : List[Any]\t\t\t =\t\t\t\t\t256\n\t\t\t\tUpperCAmelCase_ : Dict\t\t\t =\t\t\t\t\t0\n\t\t\t\tUpperCAmelCase_ : List[Any]\t\t\t =\t\t\t\t\t0\n\t\t\t\tUpperCAmelCase_ : str\t\t\t =\t\t\t\t\t0\n\t\t\t\tUpperCAmelCase_ : List[str]\t\t\t =\t\t\t\t\t0\n\n\n\n\n\n\tdef _SCREAMING_SNAKE_CASE ( self\t\t\t\t\t: List[str]\t,\tlowerCAmelCase_\t\t\t\t\t: Dict\t\t\t\t\t\t)\t\t\t\t\t\t->\t\t\tOptional[Any]:\n\t\t\t\tUpperCAmelCase_ : Dict\t\t\t =\t\t\t\t\tcva.imread(lowerCAmelCase_\t,\t0\t\t\t\t\t\t)\n\t\t\t\tUpperCAmelCase_ : Union[str, Any]\t\t\t =\t\t\t\t\tcopy.deepcopy(self.img\t\t\t\t\t\t)\n\t\t\t\tUpperCAmelCase_ ,\tUpperCAmelCase_ ,\tUpperCAmelCase_ : List[Any]\t\t\t =\t\t\t\t\tplt.hist(self.img.ravel()\t,\t256\t,\t[0, 256]\t,\tlabel=\"x\"\t\t\t\t\t\t)\n\t\t\t\tUpperCAmelCase_ : List[Any]\t\t\t =\t\t\t\t\tnp.sum(lowerCAmelCase_\t\t\t\t\t\t)\n\t\t\t\tfor i in range(len(lowerCAmelCase_\t\t\t\t\t\t)\t\t\t\t\t\t):\n\t\t\t\t\t\t\tUpperCAmelCase_ : List[Any]\t\t\t =\t\t\t\t\tx[i] / self.k\n\t\t\t\t\t\t\tself.sk += prk\n\t\t\t\t\t\t\tUpperCAmelCase_ : Optional[Any]\t\t\t =\t\t\t\t\t(self.L - 1) * self.sk\n\t\t\t\t\t\t\tif self.rem != 0:\n\t\t\t\t\t\t\t\t\t\tUpperCAmelCase_ : Any\t\t\t =\t\t\t\t\tint(last % last\t\t\t\t\t\t)\n\t\t\t\t\t\t\tUpperCAmelCase_ : List[str]\t\t\t =\t\t\t\t\tint(last + 1 if self.rem >= 0.5 else last\t\t\t\t\t\t)\n\t\t\t\t\t\t\tself.last_list.append(lowerCAmelCase_\t\t\t\t\t\t)\n\t\t\t\t\t\t\tUpperCAmelCase_ : Optional[Any]\t\t\t =\t\t\t\t\tint(np.ma.count(self.img\t\t\t\t\t\t) / self.img[1].size\t\t\t\t\t\t)\n\t\t\t\t\t\t\tUpperCAmelCase_ : Dict\t\t\t =\t\t\t\t\tself.img[1].size\n\t\t\t\tfor i in range(self.number_of_cols\t\t\t\t\t\t):\n\t\t\t\t\t\t\tfor j in range(self.number_of_rows\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\tUpperCAmelCase_ : Any\t\t\t =\t\t\t\t\tself.img[j][i]\n\t\t\t\t\t\t\t\t\t\tif num != self.last_list[num]:\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase_ : Tuple\t\t\t =\t\t\t\t\tself.last_list[num]\n\t\t\t\tcva.imwrite(\"output_data/output.jpg\"\t,\tself.img\t\t\t\t\t\t)\n\n\n\n\n\n\tdef _SCREAMING_SNAKE_CASE ( self\t\t\t\t\t: List[Any]\t\t\t\t\t\t)\t\t\t\t\t\t->\t\t\tUnion[str, Any]:\n\t\t\t\tplt.hist(self.img.ravel()\t,\t256\t,\t[0, 256]\t\t\t\t\t\t)\n\n\n\n\n\n\tdef _SCREAMING_SNAKE_CASE ( self\t\t\t\t\t: Union[str, Any]\t\t\t\t\t\t)\t\t\t\t\t\t->\t\t\tstr:\n\t\t\t\tcva.imshow(\"Output-Image\"\t,\tself.img\t\t\t\t\t\t)\n\t\t\t\tcva.imshow(\"Input-Image\"\t,\tself.original_image\t\t\t\t\t\t)\n\t\t\t\tcva.waitKey(5_000\t\t\t\t\t\t)\n\t\t\t\tcva.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n\t\t\tlowerCamelCase_\t\t\t\t\t= os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')\n\t\t\tlowerCamelCase_\t\t\t\t\t= ConstantStretch()\n\t\t\tstretcher.stretch(file_path)\n\t\t\tstretcher.plot_histogram()\n\t\t\tstretcher.show_image()\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":268,"string":"268"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":811,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef UpperCamelCase__\t\t\t\t(\t\tlowerCAmelCase = 4_00_00_00 ):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n _lowerCAmelCase\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\t[]\r\n _lowerCAmelCase\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\t0, 1\r\n while b <= n:\r\n if b % 2 == 0:\r\n even_fibs.append(lowerCAmelCase )\r\n _lowerCAmelCase\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\tb, a + b\r\n return sum(lowerCAmelCase )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(F\"\"\"{solution() = }\"\"\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":358,"string":"358"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom pathlib import Path\r\n\r\nimport cva\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\ndef UpperCamelCase__\t\t\t\t(\t\tlowerCAmelCase\t\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t\t, lowerCAmelCase ):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n _lowerCAmelCase\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\tcva.getAffineTransform(lowerCAmelCase\t\t\t\t\t\t\t, lowerCAmelCase )\r\n return cva.warpAffine(lowerCAmelCase\t\t\t\t\t\t\t, lowerCAmelCase\t\t\t\t\t\t\t, (rows, cols) )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # read original image\r\n A__ : Any =cva.imread(\r\n str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')\r\n )\r\n # turn image in gray scale value\r\n A__ : Any =cva.cvtColor(image, cva.COLOR_BGR2GRAY)\r\n # get image shape\r\n A__ ,\t\t\t\t\t\tA__ : Tuple =gray_img.shape\r\n\r\n # set different points to rotate image\r\n A__ : Optional[int] =np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa)\r\n A__ : Tuple =np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa)\r\n A__ : List[str] =np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa)\r\n A__ : Dict =np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa)\r\n\r\n # add all rotated images in a list\r\n A__ : List[Any] =[\r\n gray_img,\r\n get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),\r\n get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),\r\n get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),\r\n ]\r\n\r\n # plot different image rotations\r\n A__ : Tuple =plt.figure(1)\r\n A__ : int =['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']\r\n for i, image in enumerate(images):\r\n plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')\r\n plt.title(titles[i])\r\n plt.axis('''off''')\r\n plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)\r\n plt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":220,"string":"220"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":812,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport torch\r\n\r\nfrom diffusers import DDPMScheduler\r\n\r\nfrom .test_schedulers import SchedulerCommonTest\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass lowerCAmelCase__\t( lowercase\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\t\t\tlowerCamelCase__ =\t\t\t(DDPMScheduler,)\r\n\t\t\tdef \t\t\t\t\tA_ ( self , **lowercase ):\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tDict = {\r\n\t\t\t\t\t\t\t 'num_train_timesteps': 1000,\r\n\t\t\t\t\t\t\t 'beta_start': 0.00_01,\r\n\t\t\t\t\t\t\t 'beta_end': 0.02,\r\n\t\t\t\t\t\t\t 'beta_schedule': 'linear',\r\n\t\t\t\t\t\t\t 'variance_type': 'fixed_small',\r\n\t\t\t\t\t\t\t 'clip_sample': True,\r\n\t\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\t\tconfig.update(**lowercase )\r\n\t\t\t\t\t\t\treturn config\r\n\t\t\tdef \t\t\t\t\tA_ ( self ):\r\n\t\t\t\t\t\t\tfor timesteps in [1, 5, 100, 1000]:\r\n\t\t\t\t\t\t\t\t\t\t\tself.check_over_configs(num_train_timesteps=lowercase )\r\n\t\t\tdef \t\t\t\t\tA_ ( self ):\r\n\t\t\t\t\t\t\tfor beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):\r\n\t\t\t\t\t\t\t\t\t\t\tself.check_over_configs(beta_start=lowercase , beta_end=lowercase )\r\n\t\t\tdef \t\t\t\t\tA_ ( self ):\r\n\t\t\t\t\t\t\tfor schedule in [\"linear\", \"squaredcos_cap_v2\"]:\r\n\t\t\t\t\t\t\t\t\t\t\tself.check_over_configs(beta_schedule=lowercase )\r\n\t\t\tdef \t\t\t\t\tA_ ( self ):\r\n\t\t\t\t\t\t\tfor variance in [\"fixed_small\", \"fixed_large\", \"other\"]:\r\n\t\t\t\t\t\t\t\t\t\t\tself.check_over_configs(variance_type=lowercase )\r\n\t\t\tdef \t\t\t\t\tA_ ( self ):\r\n\t\t\t\t\t\t\tfor clip_sample in [True, False]:\r\n\t\t\t\t\t\t\t\t\t\t\tself.check_over_configs(clip_sample=lowercase )\r\n\t\t\tdef \t\t\t\t\tA_ ( self ):\r\n\t\t\t\t\t\t\tself.check_over_configs(thresholding=lowercase )\r\n\t\t\t\t\t\t\tfor threshold in [0.5, 1.0, 2.0]:\r\n\t\t\t\t\t\t\t\t\t\t\tfor prediction_type in [\"epsilon\", \"sample\", \"v_prediction\"]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.check_over_configs(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t thresholding=lowercase , prediction_type=lowercase , sample_max_value=lowercase , )\r\n\t\t\tdef \t\t\t\t\tA_ ( self ):\r\n\t\t\t\t\t\t\tfor prediction_type in [\"epsilon\", \"sample\", \"v_prediction\"]:\r\n\t\t\t\t\t\t\t\t\t\t\tself.check_over_configs(prediction_type=lowercase )\r\n\t\t\tdef \t\t\t\t\tA_ ( self ):\r\n\t\t\t\t\t\t\tfor t in [0, 500, 999]:\r\n\t\t\t\t\t\t\t\t\t\t\tself.check_over_forward(time_step=lowercase )\r\n\t\t\tdef \t\t\t\t\tA_ ( self ):\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tUnion[str, Any] = self.scheduler_classes[0]\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tint = self.get_scheduler_config()\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tList[Any] = scheduler_class(**lowercase )\r\n\r\n\t\t\t\t\t\t\tassert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5\r\n\t\t\t\t\t\t\tassert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_09_79 ) ) < 1E-5\r\n\t\t\t\t\t\t\tassert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5\r\n\t\t\tdef \t\t\t\t\tA_ ( self ):\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tOptional[int] = self.scheduler_classes[0]\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tint = self.get_scheduler_config()\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tOptional[int] = scheduler_class(**lowercase )\r\n\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tTuple = len(lowercase )\r\n\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tint = self.dummy_model()\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tTuple = self.dummy_sample_deter\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tTuple = torch.manual_seed(0 )\r\n\r\n\t\t\t\t\t\t\tfor t in reversed(range(lowercase ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t# 1. predict noise residual\r\n\t\t\t\t\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tTuple = model(lowercase , lowercase )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t# 2. predict previous mean of sample x_t-1\r\n\t\t\t\t\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tstr = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t# if t > 0:\r\n\t\t\t\t\t\t\t\t\t\t\t# noise = self.dummy_sample_deter\r\n\t\t\t\t\t\t\t\t\t\t\t# variance = scheduler.get_variance(t) ** (0.5) * noise\r\n\t\t\t\t\t\t\t\t\t\t\t#\r\n\t\t\t\t\t\t\t\t\t\t\t# sample = pred_prev_sample + variance\r\n\t\t\t\t\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tList[str] = pred_prev_sample\r\n\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tList[Any] = torch.sum(torch.abs(lowercase ) )\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tDict = torch.mean(torch.abs(lowercase ) )\r\n\r\n\t\t\t\t\t\t\tassert abs(result_sum.item() - 2_58.96_06 ) < 1E-2\r\n\t\t\t\t\t\t\tassert abs(result_mean.item() - 0.33_72 ) < 1E-3\r\n\t\t\tdef \t\t\t\t\tA_ ( self ):\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tstr = self.scheduler_classes[0]\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tint = self.get_scheduler_config(prediction_type='v_prediction' )\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tList[Any] = scheduler_class(**lowercase )\r\n\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tList[Any] = len(lowercase )\r\n\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tint = self.dummy_model()\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tAny = self.dummy_sample_deter\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tAny = torch.manual_seed(0 )\r\n\r\n\t\t\t\t\t\t\tfor t in reversed(range(lowercase ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t# 1. predict noise residual\r\n\t\t\t\t\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tUnion[str, Any] = model(lowercase , lowercase )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t# 2. predict previous mean of sample x_t-1\r\n\t\t\t\t\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tAny = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t# if t > 0:\r\n\t\t\t\t\t\t\t\t\t\t\t# noise = self.dummy_sample_deter\r\n\t\t\t\t\t\t\t\t\t\t\t# variance = scheduler.get_variance(t) ** (0.5) * noise\r\n\t\t\t\t\t\t\t\t\t\t\t#\r\n\t\t\t\t\t\t\t\t\t\t\t# sample = pred_prev_sample + variance\r\n\t\t\t\t\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tOptional[int] = pred_prev_sample\r\n\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tList[str] = torch.sum(torch.abs(lowercase ) )\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tUnion[str, Any] = torch.mean(torch.abs(lowercase ) )\r\n\r\n\t\t\t\t\t\t\tassert abs(result_sum.item() - 2_02.02_96 ) < 1E-2\r\n\t\t\t\t\t\t\tassert abs(result_mean.item() - 0.26_31 ) < 1E-3\r\n\t\t\tdef \t\t\t\t\tA_ ( self ):\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tList[Any] = self.scheduler_classes[0]\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tList[str] = self.get_scheduler_config()\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tAny = scheduler_class(**lowercase )\r\n\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tDict = [100, 87, 50, 1, 0]\r\n\r\n\t\t\t\t\t\t\tscheduler.set_timesteps(timesteps=lowercase )\r\n\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tAny = scheduler.timesteps\r\n\r\n\t\t\t\t\t\t\tfor i, timestep in enumerate(lowercase ):\r\n\t\t\t\t\t\t\t\t\t\t\tif i == len(lowercase ) - 1:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tOptional[int] = -1\r\n\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tOptional[Any] = timesteps[i + 1]\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tOptional[int] = scheduler.previous_timestep(lowercase )\r\n\t\t\t\t\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tTuple = prev_t.item()\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(lowercase , lowercase )\r\n\t\t\tdef \t\t\t\t\tA_ ( self ):\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tUnion[str, Any] = self.scheduler_classes[0]\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tstr = self.get_scheduler_config()\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tTuple = scheduler_class(**lowercase )\r\n\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tList[Any] = [100, 87, 50, 51, 0]\r\n\r\n\t\t\t\t\t\t\twith self.assertRaises(lowercase , msg='`custom_timesteps` must be in descending order.' ):\r\n\t\t\t\t\t\t\t\t\t\t\tscheduler.set_timesteps(timesteps=lowercase )\r\n\t\t\tdef \t\t\t\t\tA_ ( self ):\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tstr = self.scheduler_classes[0]\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tList[Any] = self.get_scheduler_config()\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tint = scheduler_class(**lowercase )\r\n\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tint = [100, 87, 50, 1, 0]\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tOptional[int] = len(lowercase )\r\n\r\n\t\t\t\t\t\t\twith self.assertRaises(lowercase , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):\r\n\t\t\t\t\t\t\t\t\t\t\tscheduler.set_timesteps(num_inference_steps=lowercase , timesteps=lowercase )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t\t\tA_ ( self ):\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tOptional[int] = self.scheduler_classes[0]\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tint = self.get_scheduler_config()\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tList[Any] = scheduler_class(**lowercase )\r\n\r\n\t\t\t\t\t\t\t_lowerCamelCase :\t\t\tint = [scheduler.config.num_train_timesteps]\r\n\r\n\t\t\t\t\t\t\twith self.assertRaises(\r\n\t\t\t\t\t\t\t lowercase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):\r\n\t\t\t\t\t\t\t\t\t\t\tscheduler.set_timesteps(timesteps=lowercase )"},"code_codestyle":{"kind":"number","value":96,"string":"96"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available\r\n\r\n\r\nlowercase__ \t\t\t\t\t\t=\t\t\t\t\t\t{\"\"\"configuration_vit_msn\"\"\": [\"\"\"VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP\"\"\", \"\"\"ViTMSNConfig\"\"\"]}\r\n\r\ntry:\r\n\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\tpass\r\nelse:\r\n\t\t\tlowercase__ \t\t\t\t\t\t=\t\t\t\t\t\t[\r\n\t\t\t \"\"\"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST\"\"\",\r\n\t\t\t \"\"\"ViTMSNModel\"\"\",\r\n\t\t\t \"\"\"ViTMSNForImageClassification\"\"\",\r\n\t\t\t \"\"\"ViTMSNPreTrainedModel\"\"\",\r\n\t\t\t]\r\n\r\nif TYPE_CHECKING:\r\n\t\t\tfrom .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig\r\n\r\n\t\t\ttry:\r\n\t\t\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\tpass\r\n\t\t\telse:\r\n\t\t\t\t\t\tfrom .modeling_vit_msn import (\r\n\t\t\t\t\t\t VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t ViTMSNForImageClassification,\r\n\t\t\t\t\t\t ViTMSNModel,\r\n\t\t\t\t\t\t ViTMSNPreTrainedModel,\r\n\t\t\t\t\t\t)\r\n\r\nelse:\r\n\t\t\timport sys\r\n\r\n\t\t\tlowercase__ \t\t\t\t\t\t=\t\t\t\t\t\t_LazyModule(__name__, globals()[\"\"\"__file__\"\"\"], _import_structure, module_spec=__spec__)"},"style_context_codestyle":{"kind":"number","value":96,"string":"96"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":813,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport unittest\r\n\r\nimport numpy as np\r\n\r\nfrom diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline\r\nfrom diffusers.utils.testing_utils import (\r\n is_onnx_available,\r\n load_image,\r\n nightly,\r\n require_onnxruntime,\r\n require_torch_gpu,\r\n)\r\n\r\nfrom ..test_pipelines_onnx_common import OnnxPipelineTesterMixin\r\n\r\n\r\nif is_onnx_available():\r\n\timport onnxruntime as ort\r\n\r\n\r\n\r\nclass A__ ( __SCREAMING_SNAKE_CASE ,\t\tunittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\tpass\r\n\r\n\r\n\r\n\r\n\r\n\r\n@nightly\r\n@require_onnxruntime\r\n@require_torch_gpu\r\nclass A__ ( unittest.TestCase\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t@property\r\n\tdef \t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE (\t\t\t\t\tself:\t\tstr) ->\tint:\r\n\r\n\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\treturn (\r\n\t\t\t\t \"CUDAExecutionProvider\",\r\n\t\t\t\t {\r\n\t\t\t\t \"gpu_mem_limit\": \"15000000000\", # 15GB\r\n\t\t\t\t \"arena_extend_strategy\": \"kSameAsRequested\",\r\n\t\t\t\t },\r\n\t\t\t\t)\r\n\r\n\t@property\r\n\tdef \t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE (\t\t\t\t\tself:\t\tUnion[str, Any]) ->\tstr:\r\n\r\n\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t__lowerCAmelCase\t\t\t\t: int\t\t\t\t\t\t\t\t\t\t\t\t=\tort.SessionOptions()\r\n\t\t\t\t__lowerCAmelCase\t\t\t\t: Tuple\t\t\t\t\t\t\t\t\t\t\t\t=\tFalse\r\n\t\t\t\treturn options\r\n\r\n\tdef \t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE (\t\t\t\t\tself:\t\tUnion[str, Any]) ->\tList[Any]:\r\n\r\n\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t__lowerCAmelCase\t\t\t\t: List[str]\t\t\t\t\t\t\t\t\t\t\t\t=\tload_image(\r\n\t\t\t\t \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\r\n\t\t\t\t \"/in_paint/overture-creations-5sI6fQgYIuo.png\")\r\n\t\t\t\t__lowerCAmelCase\t\t\t\t: int\t\t\t\t\t\t\t\t\t\t\t\t=\tload_image(\r\n\t\t\t\t \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\r\n\t\t\t\t \"/in_paint/overture-creations-5sI6fQgYIuo_mask.png\")\r\n\t\t\t\t__lowerCAmelCase\t\t\t\t: Any\t\t\t\t\t\t\t\t\t\t\t\t=\tOnnxStableDiffusionInpaintPipeline.from_pretrained(\r\n\t\t\t\t \"runwayml/stable-diffusion-inpainting\"\t, revision=\"onnx\"\t, safety_checker=_SCREAMING_SNAKE_CASE\t, feature_extractor=_SCREAMING_SNAKE_CASE\t, provider=self.gpu_provider\t, sess_options=self.gpu_options\t, )\r\n\t\t\t\tpipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)\r\n\r\n\t\t\t\t__lowerCAmelCase\t\t\t\t: Any\t\t\t\t\t\t\t\t\t\t\t\t=\t\"A red cat sitting on a park bench\"\r\n\r\n\t\t\t\t__lowerCAmelCase\t\t\t\t: Optional[int]\t\t\t\t\t\t\t\t\t\t\t\t=\tnp.random.RandomState(0)\r\n\t\t\t\t__lowerCAmelCase\t\t\t\t: str\t\t\t\t\t\t\t\t\t\t\t\t=\tpipe(\r\n\t\t\t\t prompt=_SCREAMING_SNAKE_CASE\t, image=_SCREAMING_SNAKE_CASE\t, mask_image=_SCREAMING_SNAKE_CASE\t, guidance_scale=7.5\t, num_inference_steps=10\t, generator=_SCREAMING_SNAKE_CASE\t, output_type=\"np\"\t, )\r\n\t\t\t\t__lowerCAmelCase\t\t\t\t: Optional[int]\t\t\t\t\t\t\t\t\t\t\t\t=\toutput.images\r\n\t\t\t\t__lowerCAmelCase\t\t\t\t: List[Any]\t\t\t\t\t\t\t\t\t\t\t\t=\timages[0, 255:258, 255:258, -1]\r\n\r\n\t\t\t\tassert images.shape == (1, 512, 512, 3)\r\n\t\t\t\t__lowerCAmelCase\t\t\t\t: Dict\t\t\t\t\t\t\t\t\t\t\t\t=\tnp.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464])\r\n\r\n\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef \t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE (\t\t\t\t\tself:\t\tstr) ->\tOptional[int]:\r\n\r\n\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\t\t\t\t__lowerCAmelCase\t\t\t\t: Optional[int]\t\t\t\t\t\t\t\t\t\t\t\t=\tload_image(\r\n\t\t\t\t \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\r\n\t\t\t\t \"/in_paint/overture-creations-5sI6fQgYIuo.png\")\r\n\t\t\t\t__lowerCAmelCase\t\t\t\t: int\t\t\t\t\t\t\t\t\t\t\t\t=\tload_image(\r\n\t\t\t\t \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\r\n\t\t\t\t \"/in_paint/overture-creations-5sI6fQgYIuo_mask.png\")\r\n\t\t\t\t__lowerCAmelCase\t\t\t\t: List[Any]\t\t\t\t\t\t\t\t\t\t\t\t=\tLMSDiscreteScheduler.from_pretrained(\r\n\t\t\t\t \"runwayml/stable-diffusion-inpainting\"\t, subfolder=\"scheduler\"\t, revision=\"onnx\")\r\n\t\t\t\t__lowerCAmelCase\t\t\t\t: List[str]\t\t\t\t\t\t\t\t\t\t\t\t=\tOnnxStableDiffusionInpaintPipeline.from_pretrained(\r\n\t\t\t\t \"runwayml/stable-diffusion-inpainting\"\t, revision=\"onnx\"\t, scheduler=_SCREAMING_SNAKE_CASE\t, safety_checker=_SCREAMING_SNAKE_CASE\t, feature_extractor=_SCREAMING_SNAKE_CASE\t, provider=self.gpu_provider\t, sess_options=self.gpu_options\t, )\r\n\t\t\t\tpipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)\r\n\r\n\t\t\t\t__lowerCAmelCase\t\t\t\t: Any\t\t\t\t\t\t\t\t\t\t\t\t=\t\"A red cat sitting on a park bench\"\r\n\r\n\t\t\t\t__lowerCAmelCase\t\t\t\t: Optional[int]\t\t\t\t\t\t\t\t\t\t\t\t=\tnp.random.RandomState(0)\r\n\t\t\t\t__lowerCAmelCase\t\t\t\t: Optional[int]\t\t\t\t\t\t\t\t\t\t\t\t=\tpipe(\r\n\t\t\t\t prompt=_SCREAMING_SNAKE_CASE\t, image=_SCREAMING_SNAKE_CASE\t, mask_image=_SCREAMING_SNAKE_CASE\t, guidance_scale=7.5\t, num_inference_steps=20\t, generator=_SCREAMING_SNAKE_CASE\t, output_type=\"np\"\t, )\r\n\t\t\t\t__lowerCAmelCase\t\t\t\t: List[str]\t\t\t\t\t\t\t\t\t\t\t\t=\toutput.images\r\n\t\t\t\t__lowerCAmelCase\t\t\t\t: Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t=\timages[0, 255:258, 255:258, -1]\r\n\r\n\t\t\t\tassert images.shape == (1, 512, 512, 3)\r\n\t\t\t\t__lowerCAmelCase\t\t\t\t: Tuple\t\t\t\t\t\t\t\t\t\t\t\t=\tnp.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125])\r\n\r\n\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3"},"code_codestyle":{"kind":"number","value":58,"string":"58"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom argparse import ArgumentParser\r\n\r\nfrom datasets.commands.convert import ConvertCommand\r\nfrom datasets.commands.dummy_data import DummyDataCommand\r\nfrom datasets.commands.env import EnvironmentCommand\r\nfrom datasets.commands.run_beam import RunBeamCommand\r\nfrom datasets.commands.test import TestCommand\r\nfrom datasets.utils.logging import set_verbosity_info\r\n\r\n\r\ndef _lowercase ( __snake_case )\t\t\t\t\t\t-> Dict:\r\n\t\t\treturn {key.lstrip(\"-\" ): value for key, value in zip(unknown_args[::2] ,unknown_args[1::2] )}\r\n\r\n\r\ndef _lowercase ( )\t\t\t\t\t\t-> Union[str, Any]:\r\n\t\t\t__lowerCAmelCase\t\t\t\t: List[str]\t\t\t\t\t\t\t\t\t\t\t\t=\tArgumentParser(\r\n\t\t\t \"HuggingFace Datasets CLI tool\" ,usage=\"datasets-cli []\" ,allow_abbrev=__snake_case )\r\n\t\t\t__lowerCAmelCase\t\t\t\t: str\t\t\t\t\t\t\t\t\t\t\t\t=\tparser.add_subparsers(help=\"datasets-cli command helpers\" )\r\n\t\t\tset_verbosity_info()\r\n\r\n\t\t\t# Register commands\r\n\t\t\tConvertCommand.register_subcommand(__snake_case )\r\n\t\t\tEnvironmentCommand.register_subcommand(__snake_case )\r\n\t\t\tTestCommand.register_subcommand(__snake_case )\r\n\t\t\tRunBeamCommand.register_subcommand(__snake_case )\r\n\t\t\tDummyDataCommand.register_subcommand(__snake_case )\r\n\r\n\t\t\t# Parse args\r\n\t\t\t__lowerCAmelCase\t\t\t\t\t\t, __lowerCAmelCase\t\t\t\t: Any\t\t\t\t\t\t\t\t\t\t\t\t=\tparser.parse_known_args()\r\n\t\t\tif not hasattr(__snake_case ,\"func\" ):\r\n\t\t\t\t\t\tparser.print_help()\r\n\t\t\t\t\t\texit(1 )\r\n\t\t\t__lowerCAmelCase\t\t\t\t: List[Any]\t\t\t\t\t\t\t\t\t\t\t\t=\tparse_unknown_args(__snake_case )\r\n\r\n\t\t\t# Run\r\n\t\t\t__lowerCAmelCase\t\t\t\t: Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t=\targs.func(__snake_case ,**__snake_case )\r\n\t\t\tservice.run()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()"},"style_context_codestyle":{"kind":"number","value":58,"string":"58"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":814,"cells":{"code":{"kind":"string","value":"\r\r\r\r'''simple docstring'''\r\r\r\rdef _A\t\t\t\t( _lowerCAmelCase , _lowerCAmelCase ):\r\r\r\r\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\t\t\t\t\t\t\twhile a != 0:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t, __lowercase\t\t\t\t\t\t=b % a, a\r\t\t\t\t\t\t\treturn b\r\r\r\rdef _A\t\t\t\t( _lowerCAmelCase , _lowerCAmelCase ):\r\r\r\r\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\t\t\t\t\t\t\tif gcd(__lowerCamelCase , __lowerCamelCase ) != 1:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t\t=f\"\"\"mod inverse of {a!r} and {m!r} does not exist\"\"\"\r\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(__lowerCamelCase )\r\t\t\t\t\t\t\t__lowercase\t\t, __lowercase\t\t, __lowercase\t\t\t\t\t\t=1, 0, a\r\t\t\t\t\t\t\t__lowercase\t\t, __lowercase\t\t, __lowercase\t\t\t\t\t\t=0, 1, m\r\t\t\t\t\t\t\twhile va != 0:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t\t=ua // va\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t, __lowercase\t\t, __lowercase\t\t, __lowercase\t\t, __lowercase\t\t, __lowercase\t\t\t\t\t\t=(ua - q * va), (ua - q * va), (ua - q * va), va, va, va\r\t\t\t\t\t\t\treturn ua % m\r\r\r\r"},"code_codestyle":{"kind":"number","value":166,"string":"166"},"style_context":{"kind":"string","value":"\rfrom typing import TYPE_CHECKING\r\rfrom ...utils import (\r OptionalDependencyNotAvailable,\r _LazyModule,\r is_tokenizers_available,\r is_torch_available,\r is_vision_available,\r)\r\r\r__UpperCAmelCase\t\t\t\t\t\t\t=\t\t\t{\r \"configuration_layoutlmv2\": [\"LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP\", \"LayoutLMv2Config\"],\r \"processing_layoutlmv2\": [\"LayoutLMv2Processor\"],\r \"tokenization_layoutlmv2\": [\"LayoutLMv2Tokenizer\"],\r}\r\rtry:\r if not is_tokenizers_available():\r raise OptionalDependencyNotAvailable()\rexcept OptionalDependencyNotAvailable:\r pass\relse:\r __UpperCAmelCase\t\t\t\t\t\t\t=\t\t\t[\"LayoutLMv2TokenizerFast\"]\r\rtry:\r if not is_vision_available():\r raise OptionalDependencyNotAvailable()\rexcept OptionalDependencyNotAvailable:\r pass\relse:\r __UpperCAmelCase\t\t\t\t\t\t\t=\t\t\t[\"LayoutLMv2FeatureExtractor\"]\r __UpperCAmelCase\t\t\t\t\t\t\t=\t\t\t[\"LayoutLMv2ImageProcessor\"]\r\rtry:\r if not is_torch_available():\r raise OptionalDependencyNotAvailable()\rexcept OptionalDependencyNotAvailable:\r pass\relse:\r __UpperCAmelCase\t\t\t\t\t\t\t=\t\t\t[\r \"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST\",\r \"LayoutLMv2ForQuestionAnswering\",\r \"LayoutLMv2ForSequenceClassification\",\r \"LayoutLMv2ForTokenClassification\",\r \"LayoutLMv2Layer\",\r \"LayoutLMv2Model\",\r \"LayoutLMv2PreTrainedModel\",\r ]\r\rif TYPE_CHECKING:\r from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig\r from .processing_layoutlmva import LayoutLMvaProcessor\r from .tokenization_layoutlmva import LayoutLMvaTokenizer\r\r try:\r if not is_tokenizers_available():\r raise OptionalDependencyNotAvailable()\r except OptionalDependencyNotAvailable:\r pass\r else:\r from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast\r\r try:\r if not is_vision_available():\r raise OptionalDependencyNotAvailable()\r except OptionalDependencyNotAvailable:\r pass\r else:\r from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor\r\r try:\r if not is_torch_available():\r raise OptionalDependencyNotAvailable()\r except OptionalDependencyNotAvailable:\r pass\r else:\r from .modeling_layoutlmva import (\r LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,\r LayoutLMvaForQuestionAnswering,\r LayoutLMvaForSequenceClassification,\r LayoutLMvaForTokenClassification,\r LayoutLMvaLayer,\r LayoutLMvaModel,\r LayoutLMvaPreTrainedModel,\r )\relse:\r import sys\r\r __UpperCAmelCase\t\t\t\t\t\t\t=\t\t\t_LazyModule(__name__, globals()[\"__file__\"], _import_structure, module_spec=__spec__)\r\r\r"},"style_context_codestyle":{"kind":"number","value":299,"string":"299"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":815,"cells":{"code":{"kind":"string","value":"\r\n\r\nimport inspect\r\nimport unittest\r\n\r\nfrom huggingface_hub import hf_hub_download\r\n\r\nfrom transformers import ConvNextConfig, UperNetConfig\r\nfrom transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device\r\nfrom transformers.utils import is_torch_available, is_vision_available\r\n\r\nfrom ...test_configuration_common import ConfigTester\r\nfrom ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor\r\nfrom ...test_pipeline_mixin import PipelineTesterMixin\r\n\r\n\r\nif is_torch_available():\r\n import torch\r\n\r\n from transformers import UperNetForSemanticSegmentation\r\n from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST\r\n\r\n\r\nif is_vision_available():\r\n from PIL import Image\r\n\r\n from transformers import AutoImageProcessor\r\n\r\n\r\n\r\nclass SCREAMING_SNAKE_CASE :\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __init__( self\t\t\t: Any\t,\t\t\t\t\t\t\tlowerCAmelCase\t\t\t: List[str]\t,\t\t\t\t\t\t\tlowerCAmelCase\t\t\t: Optional[int]=13\t,\t\t\t\t\t\t\tlowerCAmelCase\t\t\t: Union[str, Any]=32\t,\t\t\t\t\t\t\tlowerCAmelCase\t\t\t: List[Any]=3\t,\t\t\t\t\t\t\tlowerCAmelCase\t\t\t: int=4\t,\t\t\t\t\t\t\tlowerCAmelCase\t\t\t: int=[10, 20, 30, 40]\t,\t\t\t\t\t\t\tlowerCAmelCase\t\t\t: List[Any]=[2, 2, 3, 2]\t,\t\t\t\t\t\t\tlowerCAmelCase\t\t\t: Any=True\t,\t\t\t\t\t\t\tlowerCAmelCase\t\t\t: List[Any]=True\t,\t\t\t\t\t\t\tlowerCAmelCase\t\t\t: Dict=37\t,\t\t\t\t\t\t\tlowerCAmelCase\t\t\t: List[Any]=\"gelu\"\t,\t\t\t\t\t\t\tlowerCAmelCase\t\t\t: str=10\t,\t\t\t\t\t\t\tlowerCAmelCase\t\t\t: List[Any]=0.02\t,\t\t\t\t\t\t\tlowerCAmelCase\t\t\t: str=[\"stage2\", \"stage3\", \"stage4\"]\t,\t\t\t\t\t\t\tlowerCAmelCase\t\t\t: List[Any]=3\t,\t\t\t\t\t\t\tlowerCAmelCase\t\t\t: Tuple=None\t,\t\t\t\t\t\t\t)\t\t\t\t\t\t\t-> List[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n __lowerCAmelCase : int =\t\t\t\t\t\tparent\r\n __lowerCAmelCase : List[Any] =\t\t\t\t\t\tbatch_size\r\n __lowerCAmelCase : Optional[int] =\t\t\t\t\t\timage_size\r\n __lowerCAmelCase : Tuple =\t\t\t\t\t\tnum_channels\r\n __lowerCAmelCase : str =\t\t\t\t\t\tnum_stages\r\n __lowerCAmelCase : str =\t\t\t\t\t\thidden_sizes\r\n __lowerCAmelCase : Optional[Any] =\t\t\t\t\t\tdepths\r\n __lowerCAmelCase : Optional[Any] =\t\t\t\t\t\tis_training\r\n __lowerCAmelCase : int =\t\t\t\t\t\tuse_labels\r\n __lowerCAmelCase : int =\t\t\t\t\t\tintermediate_size\r\n __lowerCAmelCase : Optional[int] =\t\t\t\t\t\thidden_act\r\n __lowerCAmelCase : List[Any] =\t\t\t\t\t\ttype_sequence_label_size\r\n __lowerCAmelCase : Optional[int] =\t\t\t\t\t\tinitializer_range\r\n __lowerCAmelCase : Dict =\t\t\t\t\t\tout_features\r\n __lowerCAmelCase : List[Any] =\t\t\t\t\t\tnum_labels\r\n __lowerCAmelCase : Union[str, Any] =\t\t\t\t\t\tscope\r\n __lowerCAmelCase : Union[str, Any] =\t\t\t\t\t\tnum_stages\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \tSCREAMING_SNAKE_CASE ( self\t\t\t: List[str] )\t\t\t\t\t\t\t-> List[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n __lowerCAmelCase : List[str] =\t\t\t\t\t\tfloats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )\r\n\r\n __lowerCAmelCase : Tuple =\t\t\t\t\t\tNone\r\n if self.use_labels:\r\n __lowerCAmelCase : Tuple =\t\t\t\t\t\tids_tensor([self.batch_size]\t,\t\t\t\t\t\t\tself.type_sequence_label_size )\r\n\r\n __lowerCAmelCase : List[Any] =\t\t\t\t\t\tself.get_config()\r\n\r\n return config, pixel_values, labels\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \tSCREAMING_SNAKE_CASE ( self\t\t\t: Tuple )\t\t\t\t\t\t\t-> List[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n return ConvNextConfig(\r\n num_channels=self.num_channels\t,\t\t\t\t\t\t\tnum_stages=self.num_stages\t,\t\t\t\t\t\t\thidden_sizes=self.hidden_sizes\t,\t\t\t\t\t\t\tdepths=self.depths\t,\t\t\t\t\t\t\tis_training=self.is_training\t,\t\t\t\t\t\t\tintermediate_size=self.intermediate_size\t,\t\t\t\t\t\t\thidden_act=self.hidden_act\t,\t\t\t\t\t\t\tout_features=self.out_features\t,\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \tSCREAMING_SNAKE_CASE ( self\t\t\t: Dict )\t\t\t\t\t\t\t-> List[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n return UperNetConfig(\r\n backbone_config=self.get_backbone_config()\t,\t\t\t\t\t\t\thidden_size=5_12\t,\t\t\t\t\t\t\tpool_scales=[1, 2, 3, 6]\t,\t\t\t\t\t\t\tuse_auxiliary_head=lowerCAmelCase\t,\t\t\t\t\t\t\tauxiliary_loss_weight=0.4\t,\t\t\t\t\t\t\tauxiliary_in_channels=40\t,\t\t\t\t\t\t\tauxiliary_channels=2_56\t,\t\t\t\t\t\t\tauxiliary_num_convs=1\t,\t\t\t\t\t\t\tauxiliary_concat_input=lowerCAmelCase\t,\t\t\t\t\t\t\tloss_ignore_index=2_55\t,\t\t\t\t\t\t\tnum_labels=self.num_labels\t,\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \tSCREAMING_SNAKE_CASE ( self\t\t\t: Dict\t,\t\t\t\t\t\t\tlowerCAmelCase\t\t\t: List[str]\t,\t\t\t\t\t\t\tlowerCAmelCase\t\t\t: str\t,\t\t\t\t\t\t\tlowerCAmelCase\t\t\t: List[Any] )\t\t\t\t\t\t\t-> Dict:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n __lowerCAmelCase : List[Any] =\t\t\t\t\t\tUperNetForSemanticSegmentation(config=lowerCAmelCase )\r\n model.to(lowerCAmelCase )\r\n model.eval()\r\n __lowerCAmelCase : Tuple =\t\t\t\t\t\tmodel(lowerCAmelCase )\r\n self.parent.assertEqual(\r\n result.logits.shape\t,\t\t\t\t\t\t\t(self.batch_size, self.num_labels, self.image_size, self.image_size) )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \tSCREAMING_SNAKE_CASE ( self\t\t\t: Optional[Any] )\t\t\t\t\t\t\t-> Any:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n __lowerCAmelCase : List[Any] =\t\t\t\t\t\tself.prepare_config_and_inputs()\r\n (\r\n (\r\n __lowerCAmelCase\r\n ) ,(\r\n __lowerCAmelCase\r\n ) ,(\r\n __lowerCAmelCase\r\n ) ,\r\n ) : Dict =\t\t\t\t\t\tconfig_and_inputs\r\n __lowerCAmelCase : Optional[Any] =\t\t\t\t\t\t{\"\"\"pixel_values\"\"\": pixel_values}\r\n return config, inputs_dict\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_torch\r\nclass SCREAMING_SNAKE_CASE ( a_ ,\t\t\t\t\t\t\ta_ ,\t\t\t\t\t\t\tunittest.TestCase\t\t\t):\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowerCamelCase : Any\t\t\t\t\t\t\t\t\t\t=(UperNetForSemanticSegmentation,) if is_torch_available() else ()\r\n lowerCamelCase : List[str]\t\t\t\t\t\t\t\t\t\t={\"image-segmentation\": UperNetForSemanticSegmentation} if is_torch_available() else {}\r\n lowerCamelCase : Dict\t\t\t\t\t\t\t\t\t\t=False\r\n lowerCamelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t=False\r\n lowerCamelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t=False\r\n lowerCamelCase : str\t\t\t\t\t\t\t\t\t\t=False\r\n lowerCamelCase : List[Any]\t\t\t\t\t\t\t\t\t\t=False\r\n lowerCamelCase : Optional[Any]\t\t\t\t\t\t\t\t\t\t=False\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \tSCREAMING_SNAKE_CASE ( self\t\t\t: List[str] )\t\t\t\t\t\t\t-> Dict:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n __lowerCAmelCase : List[Any] =\t\t\t\t\t\tUperNetModelTester(self )\r\n __lowerCAmelCase : Dict =\t\t\t\t\t\tConfigTester(self\t,\t\t\t\t\t\t\tconfig_class=lowerCAmelCase\t,\t\t\t\t\t\t\thas_text_modality=lowerCAmelCase\t,\t\t\t\t\t\t\thidden_size=37 )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \tSCREAMING_SNAKE_CASE ( self\t\t\t: Tuple )\t\t\t\t\t\t\t-> Optional[int]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n self.create_and_test_config_common_properties()\r\n self.config_tester.create_and_test_config_to_json_string()\r\n self.config_tester.create_and_test_config_to_json_file()\r\n self.config_tester.create_and_test_config_from_and_save_pretrained()\r\n self.config_tester.create_and_test_config_with_num_labels()\r\n self.config_tester.check_config_can_be_init_without_params()\r\n self.config_tester.check_config_arguments_init()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \tSCREAMING_SNAKE_CASE ( self\t\t\t: Dict )\t\t\t\t\t\t\t-> str:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n return\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \tSCREAMING_SNAKE_CASE ( self\t\t\t: List[str] )\t\t\t\t\t\t\t-> Dict:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n __lowerCAmelCase ,__lowerCAmelCase : int =\t\t\t\t\t\tself.model_tester.prepare_config_and_inputs_for_common()\r\n\r\n for model_class in self.all_model_classes:\r\n __lowerCAmelCase : Optional[int] =\t\t\t\t\t\tmodel_class(lowerCAmelCase )\r\n __lowerCAmelCase : int =\t\t\t\t\t\tinspect.signature(model.forward )\r\n # signature.parameters is an OrderedDict => so arg_names order is deterministic\r\n __lowerCAmelCase : str =\t\t\t\t\t\t[*signature.parameters.keys()]\r\n\r\n __lowerCAmelCase : Optional[int] =\t\t\t\t\t\t[\"\"\"pixel_values\"\"\"]\r\n self.assertListEqual(arg_names[:1]\t,\t\t\t\t\t\t\tlowerCAmelCase )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \tSCREAMING_SNAKE_CASE ( self\t\t\t: Union[str, Any] )\t\t\t\t\t\t\t-> Optional[int]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n __lowerCAmelCase : Dict =\t\t\t\t\t\tself.model_tester.prepare_config_and_inputs()\r\n self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n @unittest.skip(reason=\"\"\"UperNet does not use inputs_embeds\"\"\" )\r\n def \tSCREAMING_SNAKE_CASE ( self\t\t\t: List[Any] )\t\t\t\t\t\t\t-> Dict:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n @unittest.skip(reason=\"\"\"UperNet does not support input and output embeddings\"\"\" )\r\n def \tSCREAMING_SNAKE_CASE ( self\t\t\t: int )\t\t\t\t\t\t\t-> Optional[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n @unittest.skip(reason=\"\"\"UperNet does not have a base model\"\"\" )\r\n def \tSCREAMING_SNAKE_CASE ( self\t\t\t: Optional[Any] )\t\t\t\t\t\t\t-> List[str]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n @unittest.skip(reason=\"\"\"UperNet does not have a base model\"\"\" )\r\n def \tSCREAMING_SNAKE_CASE ( self\t\t\t: List[Any] )\t\t\t\t\t\t\t-> List[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n @require_torch_multi_gpu\r\n @unittest.skip(reason=\"\"\"UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`\"\"\" )\r\n def \tSCREAMING_SNAKE_CASE ( self\t\t\t: Tuple )\t\t\t\t\t\t\t-> int:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n @unittest.skip(\"\"\"Will be fixed soon by reducing the size of the model used for common tests.\"\"\" )\r\n def \tSCREAMING_SNAKE_CASE ( self\t\t\t: Dict )\t\t\t\t\t\t\t-> List[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \tSCREAMING_SNAKE_CASE ( self\t\t\t: str )\t\t\t\t\t\t\t-> List[str]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def check_hidden_states_output(lowerCAmelCase\t\t\t: Union[str, Any]\t,\t\t\t\t\t\t\tlowerCAmelCase\t\t\t: List[str]\t,\t\t\t\t\t\t\tlowerCAmelCase\t\t\t: List[Any] ):\r\n __lowerCAmelCase : int =\t\t\t\t\t\tmodel_class(lowerCAmelCase )\r\n model.to(lowerCAmelCase )\r\n model.eval()\r\n\r\n with torch.no_grad():\r\n __lowerCAmelCase : Optional[int] =\t\t\t\t\t\tmodel(**self._prepare_for_class(lowerCAmelCase\t,\t\t\t\t\t\t\tlowerCAmelCase ) )\r\n\r\n __lowerCAmelCase : str =\t\t\t\t\t\toutputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states\r\n\r\n __lowerCAmelCase : Optional[Any] =\t\t\t\t\t\tself.model_tester.num_stages\r\n self.assertEqual(len(lowerCAmelCase )\t,\t\t\t\t\t\t\texpected_num_stages + 1 )\r\n\r\n # ConvNext's feature maps are of shape (batch_size, num_channels, height, width)\r\n self.assertListEqual(\r\n list(hidden_states[0].shape[-2:] )\t,\t\t\t\t\t\t\t[self.model_tester.image_size // 4, self.model_tester.image_size // 4]\t,\t\t\t\t\t\t\t)\r\n\r\n __lowerCAmelCase ,__lowerCAmelCase : Optional[Any] =\t\t\t\t\t\tself.model_tester.prepare_config_and_inputs_for_common()\r\n\r\n for model_class in self.all_model_classes:\r\n __lowerCAmelCase : Dict =\t\t\t\t\t\tTrue\r\n check_hidden_states_output(lowerCAmelCase\t,\t\t\t\t\t\t\tlowerCAmelCase\t,\t\t\t\t\t\t\tlowerCAmelCase )\r\n\r\n # check that output_hidden_states also work using config\r\n del inputs_dict[\"output_hidden_states\"]\r\n __lowerCAmelCase : str =\t\t\t\t\t\tTrue\r\n\r\n check_hidden_states_output(lowerCAmelCase\t,\t\t\t\t\t\t\tlowerCAmelCase\t,\t\t\t\t\t\t\tlowerCAmelCase )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \tSCREAMING_SNAKE_CASE ( self\t\t\t: Dict )\t\t\t\t\t\t\t-> List[str]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n __lowerCAmelCase ,__lowerCAmelCase : str =\t\t\t\t\t\tself.model_tester.prepare_config_and_inputs_for_common()\r\n\r\n __lowerCAmelCase : Any =\t\t\t\t\t\t_config_zero_init(lowerCAmelCase )\r\n __lowerCAmelCase : List[str] =\t\t\t\t\t\t_config_zero_init(configs_no_init.backbone_config )\r\n for model_class in self.all_model_classes:\r\n __lowerCAmelCase : str =\t\t\t\t\t\tmodel_class(config=lowerCAmelCase )\r\n for name, param in model.named_parameters():\r\n if param.requires_grad:\r\n self.assertIn(\r\n ((param.data.mean() * 1e9).round() / 1e9).item()\t,\t\t\t\t\t\t\t[0.0, 1.0]\t,\t\t\t\t\t\t\tmsg=f'''Parameter {name} of model {model_class} seems not properly initialized'''\t,\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n @unittest.skip(reason=\"\"\"UperNet does not have tied weights\"\"\" )\r\n def \tSCREAMING_SNAKE_CASE ( self\t\t\t: List[Any] )\t\t\t\t\t\t\t-> Dict:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n @slow\r\n def \tSCREAMING_SNAKE_CASE ( self\t\t\t: Optional[Any] )\t\t\t\t\t\t\t-> Union[str, Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\r\n __lowerCAmelCase : int =\t\t\t\t\t\tUperNetForSemanticSegmentation.from_pretrained(lowerCAmelCase )\r\n self.assertIsNotNone(lowerCAmelCase )\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef snake_case_\t\t\t\t() -> int:\r\n __lowerCAmelCase : int =\t\t\t\t\t\thf_hub_download(\r\n repo_id=\"\"\"hf-internal-testing/fixtures_ade20k\"\"\" , repo_type=\"\"\"dataset\"\"\" , filename=\"\"\"ADE_val_00000001.jpg\"\"\"\t\t\t\t\t)\r\n __lowerCAmelCase : str =\t\t\t\t\t\tImage.open(__A\t\t\t\t\t).convert(\"\"\"RGB\"\"\"\t\t\t\t\t)\r\n return image\r\n\r\n\r\n\r\n@require_torch\r\n@require_vision\r\n@slow\r\nclass SCREAMING_SNAKE_CASE ( unittest.TestCase\t\t\t):\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \tSCREAMING_SNAKE_CASE ( self\t\t\t: List[Any] )\t\t\t\t\t\t\t-> int:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n __lowerCAmelCase : str =\t\t\t\t\t\tAutoImageProcessor.from_pretrained(\"\"\"openmmlab/upernet-swin-tiny\"\"\" )\r\n __lowerCAmelCase : Any =\t\t\t\t\t\tUperNetForSemanticSegmentation.from_pretrained(\"\"\"openmmlab/upernet-swin-tiny\"\"\" ).to(lowerCAmelCase )\r\n\r\n __lowerCAmelCase : List[str] =\t\t\t\t\t\tprepare_img()\r\n __lowerCAmelCase : int =\t\t\t\t\t\tprocessor(images=lowerCAmelCase\t,\t\t\t\t\t\t\treturn_tensors=\"\"\"pt\"\"\" ).to(lowerCAmelCase )\r\n\r\n with torch.no_grad():\r\n __lowerCAmelCase : Any =\t\t\t\t\t\tmodel(**lowerCAmelCase )\r\n\r\n __lowerCAmelCase : Any =\t\t\t\t\t\ttorch.Size((1, model.config.num_labels, 5_12, 5_12) )\r\n self.assertEqual(outputs.logits.shape\t,\t\t\t\t\t\t\tlowerCAmelCase )\r\n\r\n __lowerCAmelCase : Optional[int] =\t\t\t\t\t\ttorch.tensor(\r\n [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(lowerCAmelCase )\r\n self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3]\t,\t\t\t\t\t\t\tlowerCAmelCase\t,\t\t\t\t\t\t\tatol=1e-4 ) )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \tSCREAMING_SNAKE_CASE ( self\t\t\t: Optional[Any] )\t\t\t\t\t\t\t-> Union[str, Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n __lowerCAmelCase : Tuple =\t\t\t\t\t\tAutoImageProcessor.from_pretrained(\"\"\"openmmlab/upernet-convnext-tiny\"\"\" )\r\n __lowerCAmelCase : Optional[int] =\t\t\t\t\t\tUperNetForSemanticSegmentation.from_pretrained(\"\"\"openmmlab/upernet-convnext-tiny\"\"\" ).to(lowerCAmelCase )\r\n\r\n __lowerCAmelCase : Optional[Any] =\t\t\t\t\t\tprepare_img()\r\n __lowerCAmelCase : List[Any] =\t\t\t\t\t\tprocessor(images=lowerCAmelCase\t,\t\t\t\t\t\t\treturn_tensors=\"\"\"pt\"\"\" ).to(lowerCAmelCase )\r\n\r\n with torch.no_grad():\r\n __lowerCAmelCase : Union[str, Any] =\t\t\t\t\t\tmodel(**lowerCAmelCase )\r\n\r\n __lowerCAmelCase : List[Any] =\t\t\t\t\t\ttorch.Size((1, model.config.num_labels, 5_12, 5_12) )\r\n self.assertEqual(outputs.logits.shape\t,\t\t\t\t\t\t\tlowerCAmelCase )\r\n\r\n __lowerCAmelCase : List[str] =\t\t\t\t\t\ttorch.tensor(\r\n [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(lowerCAmelCase )\r\n self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3]\t,\t\t\t\t\t\t\tlowerCAmelCase\t,\t\t\t\t\t\t\tatol=1e-4 ) )\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":139,"string":"139"},"style_context":{"kind":"string","value":"\r\n\r\nfrom pathlib import Path\r\n\r\nimport fire\r\n\r\n\r\n\r\n\r\n\r\ndef snake_case_\t\t\t\t(__A\t\t\t\t\t:\t\t\t\t\t\tstr , __A\t\t\t\t\t:\t\t\t\t\t\tstr , __A\t\t\t\t\t:\t\t\t\t\t\tint\t\t\t\t\t) -> Any:\r\n __lowerCAmelCase : Tuple =\t\t\t\t\t\tPath(__A\t\t\t\t\t)\r\n __lowerCAmelCase : Tuple =\t\t\t\t\t\tPath(__A\t\t\t\t\t)\r\n dest_dir.mkdir(exist_ok=__A\t\t\t\t\t)\r\n for path in src_dir.iterdir():\r\n __lowerCAmelCase : str =\t\t\t\t\t\t[x.rstrip() for x in list(path.open().readlines()\t\t\t\t\t)][:n]\r\n __lowerCAmelCase : Dict =\t\t\t\t\t\tdest_dir.joinpath(path.name\t\t\t\t\t)\r\n print(__A\t\t\t\t\t)\r\n dest_path.open(\"\"\"w\"\"\"\t\t\t\t\t).write(\"\"\"\\n\"\"\".join(__A\t\t\t\t\t)\t\t\t\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n fire.Fire(minify)\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":139,"string":"139"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":816,"cells":{"code":{"kind":"string","value":"\"\"\"simple docstring\"\"\"\n\n\nfrom ...configuration_utils import PretrainedConfig\nfrom ...utils import logging\n\n\n__UpperCamelCase\t\t\t\t\t\t\t\t\t=\t\t\tlogging.get_logger(__name__)\n\n__UpperCamelCase\t\t\t\t\t\t\t\t\t=\t\t\t{\n '''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',\n '''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',\n '''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',\n '''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',\n '''funnel-transformer/intermediate''': (\n '''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''\n ),\n '''funnel-transformer/intermediate-base''': (\n '''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''\n ),\n '''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',\n '''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',\n '''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',\n '''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',\n}\n\n\n\n\nclass UpperCamelCase\t\t\t\t\t(\t\t\t\tlowerCAmelCase__ ):\n SCREAMING_SNAKE_CASE_\t\t\t= \"funnel\"\n SCREAMING_SNAKE_CASE_\t\t\t= {\n \"hidden_size\": \"d_model\",\n \"num_attention_heads\": \"n_head\",\n }\n\n\n\n\n\n\n\n def __init__( self,\t\t\t\t\t\t\tlowerCAmelCase__=3_0522,\t\t\t\t\t\t\tlowerCAmelCase__=[4, 4, 4],\t\t\t\t\t\t\tlowerCAmelCase__=None,\t\t\t\t\t\t\tlowerCAmelCase__=2,\t\t\t\t\t\t\tlowerCAmelCase__=768,\t\t\t\t\t\t\tlowerCAmelCase__=12,\t\t\t\t\t\t\tlowerCAmelCase__=64,\t\t\t\t\t\t\tlowerCAmelCase__=3072,\t\t\t\t\t\t\tlowerCAmelCase__=\"gelu_new\",\t\t\t\t\t\t\tlowerCAmelCase__=0.1,\t\t\t\t\t\t\tlowerCAmelCase__=0.1,\t\t\t\t\t\t\tlowerCAmelCase__=0.0,\t\t\t\t\t\t\tlowerCAmelCase__=0.1,\t\t\t\t\t\t\tlowerCAmelCase__=None,\t\t\t\t\t\t\tlowerCAmelCase__=1e-9,\t\t\t\t\t\t\tlowerCAmelCase__=\"mean\",\t\t\t\t\t\t\tlowerCAmelCase__=\"relative_shift\",\t\t\t\t\t\t\tlowerCAmelCase__=True,\t\t\t\t\t\t\tlowerCAmelCase__=True,\t\t\t\t\t\t\tlowerCAmelCase__=True,\t\t\t\t\t\t\t**lowerCAmelCase__,\t\t\t\t\t\t\t) -> Union[str, Any]:\n snake_case_\t\t\t\t\t =\t\t\t\t\tvocab_size\n snake_case_\t\t\t\t\t =\t\t\t\t\tblock_sizes\n snake_case_\t\t\t\t\t =\t\t\t\t\t[1] * len(lowerCAmelCase__) if block_repeats is None else block_repeats\n assert len(lowerCAmelCase__) == len(\n self.block_repeats), \"`block_sizes` and `block_repeats` should have the same length.\"\n snake_case_\t\t\t\t\t =\t\t\t\t\tnum_decoder_layers\n snake_case_\t\t\t\t\t =\t\t\t\t\td_model\n snake_case_\t\t\t\t\t =\t\t\t\t\tn_head\n snake_case_\t\t\t\t\t =\t\t\t\t\td_head\n snake_case_\t\t\t\t\t =\t\t\t\t\td_inner\n snake_case_\t\t\t\t\t =\t\t\t\t\thidden_act\n snake_case_\t\t\t\t\t =\t\t\t\t\thidden_dropout\n snake_case_\t\t\t\t\t =\t\t\t\t\tattention_dropout\n snake_case_\t\t\t\t\t =\t\t\t\t\tactivation_dropout\n snake_case_\t\t\t\t\t =\t\t\t\t\tinitializer_range\n snake_case_\t\t\t\t\t =\t\t\t\t\tinitializer_std\n snake_case_\t\t\t\t\t =\t\t\t\t\tlayer_norm_eps\n assert pooling_type in [\n \"mean\",\n \"max\",\n ], f'Got {pooling_type} for `pooling_type` but only \\'mean\\' and \\'max\\' are supported.'\n snake_case_\t\t\t\t\t =\t\t\t\t\tpooling_type\n assert attention_type in [\n \"relative_shift\",\n \"factorized\",\n ], f'Got {attention_type} for `attention_type` but only \\'relative_shift\\' and \\'factorized\\' are supported.'\n snake_case_\t\t\t\t\t =\t\t\t\t\tattention_type\n snake_case_\t\t\t\t\t =\t\t\t\t\tseparate_cls\n snake_case_\t\t\t\t\t =\t\t\t\t\ttruncate_seq\n snake_case_\t\t\t\t\t =\t\t\t\t\tpool_q_only\n\n super().__init__(**lowerCAmelCase__)\n\n\n\n\n\n\n\n @property\n def \t\ta_\t\t\t\t\t\t( self) -> Optional[Any]:\n return sum(self.block_sizes)\n\n\n\n\n\n\n\n @num_hidden_layers.setter\n def \t\ta_\t\t\t\t\t\t( self,\t\t\t\t\t\t\tlowerCAmelCase__) -> Tuple:\n raise NotImplementedError(\n 'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.')\n\n\n\n\n\n\n\n @property\n def \t\ta_\t\t\t\t\t\t( self) -> Optional[int]:\n return len(self.block_sizes)\n\n\n\n\n\n\n\n @num_blocks.setter\n def \t\ta_\t\t\t\t\t\t( self,\t\t\t\t\t\t\tlowerCAmelCase__) -> List[Any]:\n raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.')\n\n"},"code_codestyle":{"kind":"number","value":69,"string":"69"},"style_context":{"kind":"string","value":"\n\n\n\n\n\nimport pytest\n\nfrom datasets.parallel import ParallelBackendConfig, parallel_backend\nfrom datasets.utils.py_utils import map_nested\n\nfrom .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows\n\n\n\n\n\ndef lowerCAmelCase__(__snake_case\t\t\t\t\t\t\t)\t\t\t\t-> int: # picklable for multiprocessing\n\n\n\n\n\n\n\n\t'''simple docstring'''\n\n\n\n\n\n\n\n\treturn i + 1\n\n\n\n\n\n@require_dill_gt_0_3_2\n@require_joblibspark\n@require_not_windows\ndef lowerCAmelCase__()\t\t\t\t-> Any:\n\n\n\n\n\n\n\n\t'''simple docstring'''\n\n\n\n\n\n\n\n\twith parallel_backend('''spark'''\t\t\t\t\t\t\t):\n\t\tassert ParallelBackendConfig.backend_name == \"spark\"\n\n\tlowerCamelCase__\t\t\t\t\t\t\t\t=\t\t[1, 2, 3]\n\twith pytest.raises(__snake_case\t\t\t\t\t\t\t):\n\t\twith parallel_backend('''unsupported backend'''\t\t\t\t\t\t\t):\n\t\t\tmap_nested(__snake_case ,__snake_case ,num_proc=2\t\t\t\t\t\t\t)\n\n\twith pytest.raises(__snake_case\t\t\t\t\t\t\t):\n\t\twith parallel_backend('''unsupported backend'''\t\t\t\t\t\t\t):\n\t\t\tmap_nested(__snake_case ,__snake_case ,num_proc=-1\t\t\t\t\t\t\t)\n\n\n\n\n\n@require_dill_gt_0_3_2\n@require_joblibspark\n@require_not_windows\n@pytest.mark.parametrize('''num_proc''' ,[2, -1]\t\t\t\t\t\t\t)\ndef lowerCAmelCase__(__snake_case\t\t\t\t\t\t\t)\t\t\t\t-> Tuple:\n\n\n\n\n\n\n\n\t'''simple docstring'''\n\n\n\n\n\n\n\n\tlowerCamelCase__\t\t\t\t\t\t\t\t=\t\t[1, 2]\n\tlowerCamelCase__\t\t\t\t\t\t\t\t=\t\t{'''a''': 1, '''b''': 2}\n\tlowerCamelCase__\t\t\t\t\t\t\t\t=\t\t{'''a''': [1, 2], '''b''': [3, 4]}\n\tlowerCamelCase__\t\t\t\t\t\t\t\t=\t\t{'''a''': {'''1''': 1}, '''b''': 2}\n\tlowerCamelCase__\t\t\t\t\t\t\t\t=\t\t{'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}\n\tlowerCamelCase__\t\t\t\t\t\t\t\t=\t\t[2, 3]\n\tlowerCamelCase__\t\t\t\t\t\t\t\t=\t\t{'''a''': 2, '''b''': 3}\n\tlowerCamelCase__\t\t\t\t\t\t\t\t=\t\t{'''a''': [2, 3], '''b''': [4, 5]}\n\tlowerCamelCase__\t\t\t\t\t\t\t\t=\t\t{'''a''': {'''1''': 2}, '''b''': 3}\n\tlowerCamelCase__\t\t\t\t\t\t\t\t=\t\t{'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}\n\n\twith parallel_backend('''spark'''\t\t\t\t\t\t\t):\n\t\tassert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case\t\t\t\t\t\t\t) == expected_map_nested_sa\n\t\tassert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case\t\t\t\t\t\t\t) == expected_map_nested_sa\n\t\tassert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case\t\t\t\t\t\t\t) == expected_map_nested_sa\n\t\tassert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case\t\t\t\t\t\t\t) == expected_map_nested_sa\n\t\tassert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case\t\t\t\t\t\t\t) == expected_map_nested_sa\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":209,"string":"209"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":817,"cells":{"code":{"kind":"string","value":"\r\r'''simple docstring'''\r\r\r\rimport os\rfrom glob import glob\r\rimport imageio\rimport torch\rimport torchvision\rimport wandb\rfrom img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan\rfrom loaders import load_vqgan\rfrom PIL import Image\rfrom torch import nn\r\rfrom transformers import CLIPModel, CLIPTokenizerFast\rfrom utils import get_device, get_timestamp, show_pil\r\r\r\rclass lowerCamelCase\t\t\t:\r\r\r\r\t\t\t'''simple docstring'''\r\r\r\r\r\r\r\t\t\tdef __init__(\t\t\t\tself\t:\t\t\t\t\t\tList[Any]\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tstr = \"cpu\"\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tstr = \"openai/clip-vit-large-patch14\"\t\t\t) ->\t\tNone:\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Dict =device\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Optional[Any] =CLIPTokenizerFast.from_pretrained(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: List[Any] =[0.48145466, 0.4578275, 0.40821073]\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Optional[int] =[0.26862954, 0.26130258, 0.27577711]\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Union[str, Any] =torchvision.transforms.Normalize(self.image_mean\t\t, self.image_std\t\t\t)\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Dict =torchvision.transforms.Resize(2_24\t\t\t)\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: List[Any] =torchvision.transforms.CenterCrop(2_24\t\t\t)\r\r\r\t\t\tdef lowercase__\t\t\t\t\t\t(\t\t\t\tself\t:\t\t\t\t\t\tDict\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tstr\t\t\t) ->\t\tint:\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Union[str, Any] =self.resize(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Union[str, Any] =self.center_crop(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: List[Any] =self.normalize(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\treturn images\r\r\r\r\t\t\tdef __call__(\t\t\t\tself\t:\t\t\t\t\t\tDict\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tList[str]=None\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tList[Any]=None\t\t, **lowerCAmelCase_\t:\t\t\t\t\t\tList[str]\t\t\t) ->\t\tOptional[Any]:\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: int =self.tokenizer(text=lowerCAmelCase_\t\t, **lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Optional[int] =self.preprocess_img(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: List[str] ={key: value.to(self.device\t\t\t) for (key, value) in encoding.items()}\r\t\t\t\t\t\t\t\t\treturn encoding\r\r\r\rclass lowerCamelCase\t\t\t( nn.Module\t\t\t\t\t\t):\r\r\r\r\t\t\t'''simple docstring'''\r\r\r\r\r\r\r\t\t\tdef __init__(\t\t\t\tself\t:\t\t\t\t\t\tUnion[str, Any]\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tint=10\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tDict=0.01\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tList[str]=None\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tOptional[Any]=None\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tDict=None\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tTuple=None\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tTuple=None\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tList[str]=None\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tDict=False\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tList[Any]=True\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tOptional[Any]=\"image\"\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tOptional[Any]=True\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tTuple=False\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tint=False\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tList[str]=False\t\t, ) ->\t\tNone:\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\t\t\t\t\t\t\t\t\tsuper().__init__()\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Optional[Any] =None\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Any =device if device else get_device()\r\t\t\t\t\t\t\t\t\tif vqgan:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Union[str, Any] =vqgan\r\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: str =load_vqgan(self.device\t\t, conf_path=lowerCAmelCase_\t\t, ckpt_path=lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\tself.vqgan.eval()\r\t\t\t\t\t\t\t\t\tif clip:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Optional[int] =clip\r\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Dict =CLIPModel.from_pretrained(\"\"\"openai/clip-vit-base-patch32\"\"\"\t\t\t)\r\t\t\t\t\t\t\t\t\tself.clip.to(self.device\t\t\t)\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: str =ProcessorGradientFlow(device=self.device\t\t\t)\r\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Union[str, Any] =iterations\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: str =lr\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: List[str] =log\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Any =make_grid\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Dict =return_val\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: str =quantize\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: str =self.vqgan.decoder.z_shape\r\r\r\t\t\tdef lowercase__\t\t\t\t\t\t(\t\t\t\tself\t:\t\t\t\t\t\tTuple\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tint=None\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tList[str]=None\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tOptional[int]=5\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tList[str]=True\t\t\t) ->\t\tAny:\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Optional[Any] =[]\r\t\t\t\t\t\t\t\t\tif output_path is None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: str =\"\"\"./animation.gif\"\"\"\r\t\t\t\t\t\t\t\t\tif input_path is None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: List[Any] =self.save_path\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Any =sorted(glob(input_path + \"\"\"/*\"\"\"\t\t\t)\t\t\t)\r\t\t\t\t\t\t\t\t\tif not len(lowerCAmelCase_\t\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"No images found in save path, aborting (did you pass save_intermediate=True to the generate\"\"\"\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"\"\" function?)\"\"\"\t\t\t)\r\t\t\t\t\t\t\t\t\tif len(lowerCAmelCase_\t\t\t) == 1:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint(\"\"\"Only one image found in save path, (did you pass save_intermediate=True to the generate function?)\"\"\"\t\t\t)\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Tuple =total_duration / len(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Union[str, Any] =[frame_duration] * len(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\tif extend_frames:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Any =1.5\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Tuple =3\r\t\t\t\t\t\t\t\t\tfor file_name in paths:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif file_name.endswith(\"\"\".png\"\"\"\t\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\timages.append(imageio.imread(lowerCAmelCase_\t\t\t)\t\t\t)\r\t\t\t\t\t\t\t\t\timageio.mimsave(lowerCAmelCase_\t\t, lowerCAmelCase_\t\t, duration=lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\tprint(f\"gif saved to {output_path}\"\t\t\t)\r\r\r\t\t\tdef lowercase__\t\t\t\t\t\t(\t\t\t\tself\t:\t\t\t\t\t\tTuple\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tList[str]=None\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tint=None\t\t\t) ->\t\tList[Any]:\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\t\t\t\t\t\t\t\t\tif not (path or img):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"Input either path or tensor\"\"\"\t\t\t)\r\t\t\t\t\t\t\t\t\tif img is not None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise NotImplementedError\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Optional[Any] =preprocess(Image.open(lowerCAmelCase_\t\t\t)\t\t, target_image_size=2_56\t\t\t).to(self.device\t\t\t)\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Tuple =preprocess_vqgan(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\tA__ ,\t\t\t\t\t\t*A__\t\t\t\t\t: Dict =self.vqgan.encode(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\treturn z\r\r\r\t\t\tdef lowercase__\t\t\t\t\t\t(\t\t\t\tself\t:\t\t\t\t\t\tAny\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tOptional[int]\t\t\t) ->\t\tint:\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Union[str, Any] =self.latent.detach().requires_grad_()\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: List[str] =base_latent + transform_vector\r\t\t\t\t\t\t\t\t\tif self.quantize:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ ,\t\t\t\t\t\t*A__\t\t\t\t\t: List[Any] =self.vqgan.quantize(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: int =trans_latent\r\t\t\t\t\t\t\t\t\treturn self.vqgan.decode(lowerCAmelCase_\t\t\t)\r\r\r\t\t\tdef lowercase__\t\t\t\t\t\t(\t\t\t\tself\t:\t\t\t\t\t\tint\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tDict\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tList[Any]\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tint=None\t\t\t) ->\t\tUnion[str, Any]:\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: int =self.clip_preprocessor(text=lowerCAmelCase_\t\t, images=lowerCAmelCase_\t\t, return_tensors=\"\"\"pt\"\"\"\t\t, padding=lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: int =self.clip(**lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: List[Any] =clip_outputs.logits_per_image\r\t\t\t\t\t\t\t\t\tif weights is not None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Dict =similarity_logits * weights\r\t\t\t\t\t\t\t\t\treturn similarity_logits.sum()\r\r\r\t\t\tdef lowercase__\t\t\t\t\t\t(\t\t\t\tself\t:\t\t\t\t\t\tList[str]\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tDict\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tAny\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tAny\t\t\t) ->\t\tTuple:\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Dict =self._get_clip_similarity(pos_prompts[\"\"\"prompts\"\"\"]\t\t, lowerCAmelCase_\t\t, weights=(1 / pos_prompts[\"\"\"weights\"\"\"])\t\t\t)\r\t\t\t\t\t\t\t\t\tif neg_prompts:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Any =self._get_clip_similarity(neg_prompts[\"\"\"prompts\"\"\"]\t\t, lowerCAmelCase_\t\t, weights=neg_prompts[\"\"\"weights\"\"\"]\t\t\t)\r\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Optional[int] =torch.tensor([1]\t\t, device=self.device\t\t\t)\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: List[str] =-torch.log(lowerCAmelCase_\t\t\t) + torch.log(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\treturn loss\r\r\r\t\t\tdef lowercase__\t\t\t\t\t\t(\t\t\t\tself\t:\t\t\t\t\t\tOptional[Any]\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tOptional[int]\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tTuple\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tDict\t\t\t) ->\t\tDict:\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Optional[int] =torch.randn_like(self.latent\t\t, requires_grad=lowerCAmelCase_\t\t, device=self.device\t\t\t)\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: str =torch.optim.Adam([vector]\t\t, lr=self.lr\t\t\t)\r\r\t\t\t\t\t\t\t\t\tfor i in range(self.iterations\t\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toptim.zero_grad()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: str =self._add_vector(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: List[Any] =loop_post_process(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Optional[Any] =self._get_CLIP_loss(lowerCAmelCase_\t\t, lowerCAmelCase_\t\t, lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint(\"\"\"CLIP loss\"\"\"\t\t, lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif self.log:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twandb.log({\"\"\"CLIP Loss\"\"\": clip_loss}\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tclip_loss.backward(retain_graph=lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toptim.step()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif self.return_val == \"image\":\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tyield custom_to_pil(transformed_img[0]\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tyield vector\r\r\r\t\t\tdef lowercase__\t\t\t\t\t\t(\t\t\t\tself\t:\t\t\t\t\t\tDict\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tDict\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tOptional[int]\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tDict\t\t\t) ->\t\tList[str]:\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\t\t\t\t\t\t\t\t\twandb.init(reinit=lowerCAmelCase_\t\t, project=\"\"\"face-editor\"\"\"\t\t\t)\r\t\t\t\t\t\t\t\t\twandb.config.update({\"\"\"Positive Prompts\"\"\": positive_prompts}\t\t\t)\r\t\t\t\t\t\t\t\t\twandb.config.update({\"\"\"Negative Prompts\"\"\": negative_prompts}\t\t\t)\r\t\t\t\t\t\t\t\t\twandb.config.update({\"\"\"lr\"\"\": self.lr, \"\"\"iterations\"\"\": self.iterations}\t\t\t)\r\t\t\t\t\t\t\t\t\tif image_path:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: str =Image.open(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Optional[Any] =image.resize((2_56, 2_56)\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twandb.log(\"\"\"Original Image\"\"\"\t\t, wandb.Image(lowerCAmelCase_\t\t\t)\t\t\t)\r\r\r\t\t\tdef lowercase__\t\t\t\t\t\t(\t\t\t\tself\t:\t\t\t\t\t\tList[str]\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tUnion[str, Any]\t\t\t) ->\t\tOptional[int]:\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\t\t\t\t\t\t\t\t\tif not prompts:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn []\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: List[Any] =[]\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: int =[]\r\t\t\t\t\t\t\t\t\tif isinstance(lowerCAmelCase_\t\t, lowerCAmelCase_\t\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Any =[prompt.strip() for prompt in prompts.split(\"\"\"|\"\"\"\t\t\t)]\r\t\t\t\t\t\t\t\t\tfor prompt in prompts:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif isinstance(lowerCAmelCase_\t\t, (tuple, list)\t\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Any =prompt[0]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Tuple =float(prompt[1]\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif \":\" in prompt:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__ ,\t\t\t\t\t\tA__\t\t\t\t\t: Dict =prompt.split(\"\"\":\"\"\"\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Union[str, Any] =float(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Union[str, Any] =prompt\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Dict =1.0\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tprocessed_prompts.append(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tweights.append(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\treturn {\r\t\t\t\t\t\t\t\t\t \"prompts\": processed_prompts,\r\t\t\t\t\t\t\t\t\t \"weights\": torch.tensor(lowerCAmelCase_\t\t, device=self.device\t\t\t),\r\t\t\t\t\t\t\t\t\t}\r\r\r\r\t\t\tdef lowercase__\t\t\t\t\t\t(\t\t\t\tself\t:\t\t\t\t\t\tTuple\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tUnion[str, Any]\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tDict=None\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tOptional[Any]=None\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tOptional[int]=True\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tstr=False\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tstr=True\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tList[str]=True\t\t, lowerCAmelCase_\t:\t\t\t\t\t\tList[str]=None\t\t, ) ->\t\tOptional[Any]:\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\t\t\t\t\t\t\t\t\tif image_path:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Union[str, Any] =self._get_latent(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: str =torch.randn(self.latent_dim\t\t, device=self.device\t\t\t)\r\t\t\t\t\t\t\t\t\tif self.log:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself._init_logging(lowerCAmelCase_\t\t, lowerCAmelCase_\t\t, lowerCAmelCase_\t\t\t)\r\r\t\t\t\t\t\t\t\t\tassert pos_prompts, \"You must provide at least one positive prompt.\"\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: List[str] =self.process_prompts(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: List[str] =self.process_prompts(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\tif save_final and save_path is None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: List[str] =os.path.join(\"\"\"./outputs/\"\"\"\t\t, \"\"\"_\"\"\".join(pos_prompts[\"\"\"prompts\"\"\"]\t\t\t)\t\t\t)\r\t\t\t\t\t\t\t\t\tif not os.path.exists(lowerCAmelCase_\t\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tos.makedirs(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: List[Any] =save_path + \"\"\"_\"\"\" + get_timestamp()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tos.makedirs(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: List[Any] =save_path\r\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Dict =self.vqgan.decode(self.latent\t\t\t)[0]\r\t\t\t\t\t\t\t\t\tif show_intermediate:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint(\"\"\"Original Image\"\"\"\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tshow_pil(custom_to_pil(lowerCAmelCase_\t\t\t)\t\t\t)\r\r\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: str =loop_post_process(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\tfor iter, transformed_img in enumerate(self._optimize_CLIP(lowerCAmelCase_\t\t, lowerCAmelCase_\t\t, lowerCAmelCase_\t\t\t)\t\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif show_intermediate:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tshow_pil(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif save_intermediate:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttransformed_img.save(os.path.join(self.save_path\t\t, f\"iter_{iter:03d}.png\"\t\t\t)\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif self.log:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twandb.log({\"\"\"Image\"\"\": wandb.Image(lowerCAmelCase_\t\t\t)}\t\t\t)\r\t\t\t\t\t\t\t\t\tif show_final:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tshow_pil(lowerCAmelCase_\t\t\t)\r\t\t\t\t\t\t\t\t\tif save_final:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttransformed_img.save(os.path.join(self.save_path\t\t, f\"iter_{iter:03d}_final.png\"\t\t\t)\t\t\t)\r\r"},"code_codestyle":{"kind":"number","value":136,"string":"136"},"style_context":{"kind":"string","value":"\r\r'''simple docstring'''\r\r\r\rdef \t\t\t\t\t\t__lowerCamelCase\t( __snake_case\t\t\t\t\t:\t\tint\t)\t->\t\t\t\t\t\t\tbool:\r\r\r\r\r\r\r\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t\t\t\t\tif p < 2:\r\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"p should not be less than 2!\"\"\"\t)\r\t\t\t\t\t\telif p == 2:\r\t\t\t\t\t\t\t\t\t\t\t\treturn True\r\r\t\t\t\t\t\tA__\t\t\t\t\t: Any =4\r\t\t\t\t\t\tA__\t\t\t\t\t: int =(1 << p) - 1\r\t\t\t\t\t\tfor _ in range(p - 2\t):\r\t\t\t\t\t\t\t\t\t\t\t\tA__\t\t\t\t\t: Dict =((s * s) - 2) % m\r\t\t\t\t\t\treturn s == 0\r\r\rif __name__ == \"__main__\":\r\t\tprint(lucas_lehmer_test(7))\r\t\tprint(lucas_lehmer_test(11))\r\r"},"style_context_codestyle":{"kind":"number","value":136,"string":"136"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":818,"cells":{"code":{"kind":"string","value":"\r\n\r\n'''simple docstring'''\r\n\r\nfrom __future__ import annotations\r\n\r\nfrom functools import lru_cache\r\nfrom math import ceil\r\n\r\nlowercase__\t\t\t =\t\t\t100\r\n\r\nlowercase__\t\t\t =\t\t\tset(range(3, NUM_PRIMES, 2))\r\nprimes.add(2)\r\nlowercase__\t\t\t =\t\t\t42\r\n\r\nfor prime in range(3, ceil(NUM_PRIMES**0.5), 2):\r\n if prime not in primes:\r\n continue\r\n primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))\r\n\r\n\r\n\r\n\r\n@lru_cache(maxsize=1_00\t)\r\ndef UpperCamelCase( UpperCAmelCase_\t):\r\n if number_to_partition < 0:\r\n return set()\r\n elif number_to_partition == 0:\r\n return {1}\r\n\r\n UpperCAmelCase :\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t= set()\r\n UpperCAmelCase :\tstr\t\t\t\t\t\t\t\t\t\t\t= 42\r\n UpperCAmelCase :\tDict\t\t\t\t\t\t\t\t\t\t\t= 42\r\n\r\n for prime in primes:\r\n if prime > number_to_partition:\r\n continue\r\n for sub in partition(number_to_partition - prime\t):\r\n ret.add(sub * prime\t)\r\n\r\n return ret\r\n\r\n\r\n\r\n\r\ndef UpperCamelCase( UpperCAmelCase_ = 50_00\t):\r\n for number_to_partition in range(1 , __lowercase\t):\r\n if len(partition(__lowercase\t)\t) > number_unique_partitions:\r\n return number_to_partition\r\n return None\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(f'''{solution() = }''')\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":151,"string":"151"},"style_context":{"kind":"string","value":"\rimport unittest\rfrom typing import Tuple\r\rimport torch\r\rfrom diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device\rfrom diffusers.utils.testing_utils import require_torch\r\r\r\r\r@require_torch\rclass SCREAMING_SNAKE_CASE__ :\r\r\r\r\r\r\t'''simple docstring'''\r\r\r\r\t@property\r\tdef A\t\t\t\t\t\t\t( self\t: List[str]\t):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\treturn self.get_dummy_input()\r\r\r\r\t@property\r\tdef A\t\t\t\t\t\t\t( self\t: Any\t):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\tif self.block_type == \"down\":\r\t\t\t\t\t\t\treturn (4, 32, 16, 16)\r\t\t\t\telif self.block_type == \"mid\":\r\t\t\t\t\t\t\treturn (4, 32, 32, 32)\r\t\t\t\telif self.block_type == \"up\":\r\t\t\t\t\t\t\treturn (4, 32, 64, 64)\r\r\t\t\t\traise ValueError(f'''\\'{self.block_type}\\' is not a supported block_type. Set it to \\'up\\', \\'mid\\', or \\'down\\'.'''\t)\r\r\r\r\tdef A\t\t\t\t\t\t\t( self\t: Union[str, Any]\t\t\t, lowercase\t: Any=True\t\t\t, lowercase\t: List[Any]=False\t\t\t, lowercase\t: List[str]=False\t\t\t, lowercase\t: Dict=False\t\t\t, ):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\t_snake_case\t\t=\t\t\t4\r\t\t\t\t_snake_case\t\t=\t\t\t32\r\t\t\t\t_snake_case\t\t=\t\t\t(32, 32)\r\r\t\t\t\t_snake_case\t\t=\t\t\ttorch.manual_seed(0\t)\r\t\t\t\t_snake_case\t\t=\t\t\ttorch.device(lowercase\t)\r\t\t\t\t_snake_case\t\t=\t\t\t(batch_size, num_channels) + sizes\r\t\t\t\t_snake_case\t\t=\t\t\trandn_tensor(lowercase\t\t\t, generator=lowercase\t\t\t, device=lowercase\t)\r\t\t\t\t_snake_case\t\t=\t\t\t{'hidden_states': hidden_states}\r\r\t\t\t\tif include_temb:\r\t\t\t\t\t\t\t_snake_case\t\t=\t\t\t128\r\t\t\t\t\t\t\t_snake_case\t\t=\t\t\trandn_tensor((batch_size, temb_channels)\t\t\t, generator=lowercase\t\t\t, device=lowercase\t)\r\r\t\t\t\tif include_res_hidden_states_tuple:\r\t\t\t\t\t\t\t_snake_case\t\t=\t\t\ttorch.manual_seed(1\t)\r\t\t\t\t\t\t\t_snake_case\t\t=\t\t\t(randn_tensor(lowercase\t\t\t, generator=lowercase\t\t\t, device=lowercase\t),)\r\r\t\t\t\tif include_encoder_hidden_states:\r\t\t\t\t\t\t\t_snake_case\t\t=\t\t\tfloats_tensor((batch_size, 32, 32)\t).to(lowercase\t)\r\r\t\t\t\tif include_skip_sample:\r\t\t\t\t\t\t\t_snake_case\t\t=\t\t\trandn_tensor(((batch_size, 3) + sizes)\t\t\t, generator=lowercase\t\t\t, device=lowercase\t)\r\r\t\t\t\treturn dummy_input\r\r\r\r\tdef A\t\t\t\t\t\t\t( self\t: Any\t):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\t_snake_case\t\t=\t\t\t{\r\t\t\t\t 'in_channels': 32,\r\t\t\t\t 'out_channels': 32,\r\t\t\t\t 'temb_channels': 128,\r\t\t\t\t}\r\t\t\t\tif self.block_type == \"up\":\r\t\t\t\t\t\t\t_snake_case\t\t=\t\t\t32\r\r\t\t\t\tif self.block_type == \"mid\":\r\t\t\t\t\t\t\tinit_dict.pop('out_channels'\t)\r\r\t\t\t\t_snake_case\t\t=\t\t\tself.dummy_input\r\t\t\t\treturn init_dict, inputs_dict\r\r\r\r\tdef A\t\t\t\t\t\t\t( self\t: Dict\t\t\t, lowercase\t: Optional[int]\t):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\t_snake_case\t,\t\t\t\t\t_snake_case\t\t=\t\t\tself.prepare_init_args_and_inputs_for_common()\r\t\t\t\t_snake_case\t\t=\t\t\tself.block_class(**lowercase\t)\r\t\t\t\tunet_block.to(lowercase\t)\r\t\t\t\tunet_block.eval()\r\r\t\t\t\twith torch.no_grad():\r\t\t\t\t\t\t\t_snake_case\t\t=\t\t\tunet_block(**lowercase\t)\r\r\t\t\t\tif isinstance(lowercase\t\t\t, lowercase\t):\r\t\t\t\t\t\t\t_snake_case\t\t=\t\t\toutput[0]\r\r\t\t\t\tself.assertEqual(output.shape\t\t\t, self.output_shape\t)\r\r\t\t\t\t_snake_case\t\t=\t\t\toutput[0, -1, -3:, -3:]\r\t\t\t\t_snake_case\t\t=\t\t\ttorch.tensor(lowercase\t).to(lowercase\t)\r\t\t\t\tassert torch_all_close(output_slice.flatten()\t\t\t, lowercase\t\t\t, atol=5E-3\t)\r\r\r\r\r\r\t@unittest.skipIf(torch_device == 'mps'\t\t\t, 'Training is not supported in mps'\t)\r\tdef A\t\t\t\t\t\t\t( self\t: Dict\t):\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\t\t\t\t_snake_case\t,\t\t\t\t\t_snake_case\t\t=\t\t\tself.prepare_init_args_and_inputs_for_common()\r\t\t\t\t_snake_case\t\t=\t\t\tself.block_class(**lowercase\t)\r\t\t\t\tmodel.to(lowercase\t)\r\t\t\t\tmodel.train()\r\t\t\t\t_snake_case\t\t=\t\t\tmodel(**lowercase\t)\r\r\t\t\t\tif isinstance(lowercase\t\t\t, lowercase\t):\r\t\t\t\t\t\t\t_snake_case\t\t=\t\t\toutput[0]\r\r\t\t\t\t_snake_case\t\t=\t\t\ttorch.device(lowercase\t)\r\t\t\t\t_snake_case\t\t=\t\t\trandn_tensor(output.shape\t\t\t, device=lowercase\t)\r\t\t\t\t_snake_case\t\t=\t\t\ttorch.nn.functional.mse_loss(lowercase\t\t\t, lowercase\t)\r\t\t\t\tloss.backward()"},"style_context_codestyle":{"kind":"number","value":282,"string":"282"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":819,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport math\r\n\r\nclass A__ :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n def __init__(\tself , __snake_case=0 ): # a graph with Node 0,1,...,N-1\r\n snake_case\t\t\t\t\t= n\r\n snake_case\t\t\t\t\t= [\r\n [math.inf for j in range(0 , __snake_case )] for i in range(0 , __snake_case )\r\n ] # adjacency matrix for weight\r\n snake_case\t\t\t\t\t= [\r\n [math.inf for j in range(0 , __snake_case )] for i in range(0 , __snake_case )\r\n ] # dp[i][j] stores minimum distance from i to j\r\n def \t\ta_ (\tself , __snake_case , __snake_case , __snake_case ):\r\n snake_case\t\t\t\t\t= w\r\n def \t\ta_ (\tself ):\r\n for k in range(0 , self.n ):\r\n for i in range(0 , self.n ):\r\n for j in range(0 , self.n ):\r\n snake_case\t\t\t\t\t= min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )\r\n\r\n\r\n\r\n\r\n def \t\ta_ (\tself , __snake_case , __snake_case ):\r\n return self.dp[u][v]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n _SCREAMING_SNAKE_CASE :\t\t\tAny \t\t\t\t= Graph(5)\r\n graph.add_edge(0, 2, 9)\r\n graph.add_edge(0, 4, 10)\r\n graph.add_edge(1, 3, 5)\r\n graph.add_edge(2, 3, 7)\r\n graph.add_edge(3, 0, 10)\r\n graph.add_edge(3, 1, 2)\r\n graph.add_edge(3, 2, 1)\r\n graph.add_edge(3, 4, 6)\r\n graph.add_edge(4, 1, 3)\r\n graph.add_edge(4, 2, 4)\r\n graph.add_edge(4, 3, 9)\r\n graph.floyd_warshall()\r\n graph.show_min(1, 4)\r\n graph.show_min(0, 3)\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":367,"string":"367"},"style_context":{"kind":"string","value":"\r\n\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available\r\n\r\n\r\n_SCREAMING_SNAKE_CASE :\t\t\tDict \t\t\t\t= {\"configuration_van\": [\"VAN_PRETRAINED_CONFIG_ARCHIVE_MAP\", \"VanConfig\"]}\r\n\r\n\r\ntry:\r\n if not is_torch_available():\r\n raise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n pass\r\nelse:\r\n _SCREAMING_SNAKE_CASE :\t\t\tDict \t\t\t\t= [\r\n \"VAN_PRETRAINED_MODEL_ARCHIVE_LIST\",\r\n \"VanForImageClassification\",\r\n \"VanModel\",\r\n \"VanPreTrainedModel\",\r\n ]\r\n\r\nif TYPE_CHECKING:\r\n from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig\r\n\r\n try:\r\n if not is_torch_available():\r\n raise OptionalDependencyNotAvailable()\r\n except OptionalDependencyNotAvailable:\r\n pass\r\n else:\r\n from .modeling_van import (\r\n VAN_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n VanForImageClassification,\r\n VanModel,\r\n VanPreTrainedModel,\r\n )\r\n\r\nelse:\r\n import sys\r\n\r\n _SCREAMING_SNAKE_CASE :\t\t\tstr \t\t\t\t= _LazyModule(__name__, globals()[\"__file__\"], _import_structure)\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":213,"string":"213"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":820,"cells":{"code":{"kind":"string","value":"\r\r\r\r\rfrom ...utils import (\r OptionalDependencyNotAvailable,\r is_torch_available,\r is_transformers_available,\r is_transformers_version,\r)\r\r\rtry:\r\t\t\t\tif not (is_transformers_available() and is_torch_available()):\r\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\rexcept OptionalDependencyNotAvailable:\r\t\t\t\tfrom ...utils.dummy_torch_and_transformers_objects import (\r\t\t\t\t ImageTextPipelineOutput,\r\t\t\t\t UniDiffuserPipeline,\r\t\t\t\t)\relse:\r\t\t\t\tfrom .modeling_text_decoder import UniDiffuserTextDecoder\r\t\t\t\tfrom .modeling_uvit import UniDiffuserModel, UTransformeraDModel\r\t\t\t\tfrom .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":133,"string":"133"},"style_context":{"kind":"string","value":"\r\r\r\r\rimport json\rimport os\rfrom datetime import date\rfrom pathlib import Path\r\rfrom tabulate import DataRow, TableFormat, tabulate\r\r\rlowercase_\t:\t\t\t\tOptional[Any] \t\t\t\t= TableFormat(\r lineabove=None,\r linebelowheader=None,\r linebetweenrows=None,\r linebelow=None,\r headerrow=DataRow('', '|', '|'),\r datarow=DataRow('', '|', '|'),\r padding=1,\r with_header_hide=None,\r)\r\r\rlowercase_\t:\t\t\t\tstr \t\t\t\t= []\rlowercase_\t:\t\t\t\tint \t\t\t\t= []\r\rlowercase_\t:\t\t\t\tDict \t\t\t\t= {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}\r\rlowercase_\t:\t\t\t\tint \t\t\t\t= [\r {\r 'type': 'header',\r 'text': {\r 'type': 'plain_text',\r 'text': f\"\"\"🤗 Accelerate nightly {os.environ.get(\"TEST_TYPE\", \"\")} test results\"\"\",\r 'emoji': True,\r },\r }\r]\r\rlowercase_\t:\t\t\t\tint \t\t\t\t= 0\rfor log in Path().glob('*.log'):\r\t\t\t\tlowercase_\t:\t\t\t\tint \t\t\t\t= 0\r\t\t\t\twith open(log, 'r') as f:\r\t\t\t\t\t\t\t\tfor line in f:\r\t\t\t\t\t\t\t\t\t\t\t\tlowercase_\t:\t\t\t\tList[str] \t\t\t\t= json.loads(line)\r\t\t\t\t\t\t\t\t\t\t\t\tif line.get('nodeid', '') != \"\":\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowercase_\t:\t\t\t\tList[str] \t\t\t\t= line['nodeid']\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif line.get('duration', None) is not None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowercase_\t:\t\t\t\tTuple \t\t\t\t= f\"\"\"{line[\"duration\"]:.4f}\"\"\"\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif line.get('outcome', '') == \"failed\":\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsection_num_failed += 1\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfailed.append([test, duration, log.name.split('_')[0]])\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttotal_num_failed += 1\r\t\t\t\tgroup_info.append([str(log), section_num_failed, failed])\r\t\t\t\tlowercase_\t:\t\t\t\tList[Any] \t\t\t\t= []\r\t\t\t\tlog.unlink()\r\rlowercase_\t:\t\t\t\tint \t\t\t\t= ''\rlowercase_\t:\t\t\t\tint \t\t\t\t= []\rif total_num_failed > 0:\r\t\t\t\tfor name, num_failed, failed_tests in group_info:\r\t\t\t\t\t\t\t\tif num_failed > 0:\r\t\t\t\t\t\t\t\t\t\t\t\tif num_failed == 1:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmessage += f\"*{name[1:]}: {num_failed} failed test*\\n\"\r\t\t\t\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmessage += f\"*{name[1:]}: {num_failed} failed tests*\\n\"\r\t\t\t\t\t\t\t\t\t\t\t\tlowercase_\t:\t\t\t\tOptional[Any] \t\t\t\t= []\r\t\t\t\t\t\t\t\t\t\t\t\tlowercase_\t:\t\t\t\tAny \t\t\t\t= {}\r\t\t\t\t\t\t\t\t\t\t\t\tfor test in failed_tests:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowercase_\t:\t\t\t\tList[str] \t\t\t\t= test[0].split('::')\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowercase_\t:\t\t\t\tint \t\t\t\t= data[0].split('/')[-1]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif data[0] not in filesafailed:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowercase_\t:\t\t\t\tDict \t\t\t\t= [data[1:]]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfilesafailed[data[0]] += [data[1:]]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfailed_table.append(data)\r\r\t\t\t\t\t\t\t\t\t\t\t\tlowercase_\t:\t\t\t\tAny \t\t\t\t= [test[0] for test in failed_table]\r\t\t\t\t\t\t\t\t\t\t\t\tlowercase_\t:\t\t\t\tOptional[Any] \t\t\t\t= list(set(files))\r\t\t\t\t\t\t\t\t\t\t\t\t# Count number of instances in failed_tests\r\t\t\t\t\t\t\t\t\t\t\t\tlowercase_\t:\t\t\t\tOptional[Any] \t\t\t\t= []\r\t\t\t\t\t\t\t\t\t\t\t\tfor file in individual_files:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttable.append([file, len(filesafailed[file])])\r\r\t\t\t\t\t\t\t\t\t\t\t\tlowercase_\t:\t\t\t\tOptional[Any] \t\t\t\t= tabulate(\r\t\t\t\t\t\t\t\t\t\t\t\t table,\r\t\t\t\t\t\t\t\t\t\t\t\t headers=['Test Location', 'Num Failed'],\r\t\t\t\t\t\t\t\t\t\t\t\t tablefmt=hf_table_format,\r\t\t\t\t\t\t\t\t\t\t\t\t stralign='right',\r\t\t\t\t\t\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tmessage += f\"\\n```\\n{failed_table}\\n```\"\r\t\t\t\t\t\t\t\t\t\t\t\tall_filesafailed.append(filesafailed)\r\t\t\t\tif len(message) > 30_00:\r\t\t\t\t\t\t\t\tlowercase_\t:\t\t\t\tList[Any] \t\t\t\t= 'Too many failed tests, please see the full report in the Action results.'\r\t\t\t\t\t\t\t\tlowercase_\t:\t\t\t\tUnion[str, Any] \t\t\t\t= len(err) + 10\r\t\t\t\t\t\t\t\tlowercase_\t:\t\t\t\tTuple \t\t\t\t= message[: 30_00 - offset] + f\"\"\"\\n...\\n```\\n{err}\"\"\"\r\t\t\t\tprint(f\"\"\"### {message}\"\"\")\relse:\r\t\t\t\tlowercase_\t:\t\t\t\tint \t\t\t\t= 'No failed tests! 🤗'\r\t\t\t\tprint(f\"\"\"## {message}\"\"\")\r\t\t\t\tpayload.append(no_error_payload)\r\rif os.environ.get('TEST_TYPE', '') != \"\":\r\t\t\t\tfrom slack_sdk import WebClient\r\r\t\t\t\tlowercase_\t:\t\t\t\tUnion[str, Any] \t\t\t\t= WebClient(token=os.environ['SLACK_API_TOKEN'])\r\t\t\t\tif message != \"No failed tests! 🤗\":\r\t\t\t\t\t\t\t\tlowercase_\t:\t\t\t\tList[Any] \t\t\t\t= {\r\t\t\t\t\t\t\t\t 'type': 'section',\r\t\t\t\t\t\t\t\t 'text': {\r\t\t\t\t\t\t\t\t 'type': 'mrkdwn',\r\t\t\t\t\t\t\t\t 'text': message,\r\t\t\t\t\t\t\t\t },\r\t\t\t\t\t\t\t\t}\r\t\t\t\t\t\t\t\tpayload.append(md_report)\r\t\t\t\t\t\t\t\tlowercase_\t:\t\t\t\tList[Any] \t\t\t\t= {\r\t\t\t\t\t\t\t\t 'type': 'section',\r\t\t\t\t\t\t\t\t 'text': {\r\t\t\t\t\t\t\t\t 'type': 'mrkdwn',\r\t\t\t\t\t\t\t\t 'text': '*For more details:*',\r\t\t\t\t\t\t\t\t },\r\t\t\t\t\t\t\t\t 'accessory': {\r\t\t\t\t\t\t\t\t 'type': 'button',\r\t\t\t\t\t\t\t\t 'text': {\r\t\t\t\t\t\t\t\t 'type': 'plain_text',\r\t\t\t\t\t\t\t\t 'text': 'Check Action results',\r\t\t\t\t\t\t\t\t 'emoji': True,\r\t\t\t\t\t\t\t\t },\r\t\t\t\t\t\t\t\t 'url': f\"\"\"https://github.com/{os.environ[\"GITHUB_REPOSITORY\"]}/actions/runs/{os.environ[\"GITHUB_RUN_ID\"]}\"\"\",\r\t\t\t\t\t\t\t\t },\r\t\t\t\t\t\t\t\t}\r\t\t\t\t\t\t\t\tpayload.append(action_button)\r\t\t\t\t\t\t\t\tlowercase_\t:\t\t\t\tList[str] \t\t\t\t= {\r\t\t\t\t\t\t\t\t 'type': 'context',\r\t\t\t\t\t\t\t\t 'elements': [\r\t\t\t\t\t\t\t\t {\r\t\t\t\t\t\t\t\t 'type': 'plain_text',\r\t\t\t\t\t\t\t\t 'text': f\"\"\"Nightly {os.environ.get(\"TEST_TYPE\")} test results for {date.today()}\"\"\",\r\t\t\t\t\t\t\t\t }\r\t\t\t\t\t\t\t\t ],\r\t\t\t\t\t\t\t\t}\r\t\t\t\t\t\t\t\tpayload.append(date_report)\r\t\t\t\tlowercase_\t:\t\t\t\tAny \t\t\t\t= client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)\r\t\t\t\tlowercase_\t:\t\t\t\tAny \t\t\t\t= response.data['ts']\r\t\t\t\tfor failed_file in all_filesafailed:\r\t\t\t\t\t\t\t\tfor test_location, test_failures in failed_file.items():\r\t\t\t\t\t\t\t\t\t\t\t\t# Keep only the first instance of the test name\r\t\t\t\t\t\t\t\t\t\t\t\tlowercase_\t:\t\t\t\tOptional[int] \t\t\t\t= ''\r\t\t\t\t\t\t\t\t\t\t\t\tfor i, row in enumerate(test_failures):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif row[0] != test_class:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowercase_\t:\t\t\t\tTuple \t\t\t\t= row[0]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowercase_\t:\t\t\t\tTuple \t\t\t\t= ''\r\r\t\t\t\t\t\t\t\t\t\t\t\tlowercase_\t:\t\t\t\tint \t\t\t\t= {\r\t\t\t\t\t\t\t\t\t\t\t\t 'type': 'section',\r\t\t\t\t\t\t\t\t\t\t\t\t 'text': {\r\t\t\t\t\t\t\t\t\t\t\t\t 'type': 'mrkdwn',\r\t\t\t\t\t\t\t\t\t\t\t\t 'text': f\"\"\"Test location: {test_location}\\n```\\n{tabulate(test_failures, headers=[\"Class\", \"Test\"], tablefmt=hf_table_format, stralign=\"right\")}\\n```\"\"\",\r\t\t\t\t\t\t\t\t\t\t\t\t },\r\t\t\t\t\t\t\t\t\t\t\t\t}\r\r\t\t\t\t\t\t\t\t\t\t\t\tclient.chat_postMessage(\r\t\t\t\t\t\t\t\t\t\t\t\t channel='#accelerate-ci-daily',\r\t\t\t\t\t\t\t\t\t\t\t\t thread_ts=ts,\r\t\t\t\t\t\t\t\t\t\t\t\t blocks=[payload],\r\t\t\t\t\t\t\t\t\t\t\t\t)\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":133,"string":"133"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":821,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\nfrom collections import Counter\r\n\r\nimport numpy as np\r\nfrom sklearn import datasets\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nsnake_case_ \t\t\t\t\t=\tdatasets.load_iris()\r\n\r\nsnake_case_ \t\t\t\t\t=\tnp.array(data[\"\"\"data\"\"\"])\r\nsnake_case_ \t\t\t\t\t=\tnp.array(data[\"\"\"target\"\"\"])\r\nsnake_case_ \t\t\t\t\t=\tdata[\"\"\"target_names\"\"\"]\r\n\r\nsnake_case_ \t\t\t\t\t=\ttrain_test_split(X, y)\r\n\r\ndef \t\t\t_lowerCAmelCase\t\t\t\t( lowercase_\t\t, lowercase_\t\t\t\t\t\t):\r\n\t\t\t\t\t\t\treturn np.linalg.norm(np.array(lowercase_\t\t\t\t\t\t) - np.array(lowercase_\t\t\t\t\t\t)\t\t\t\t\t\t)\r\n\r\ndef \t\t\t_lowerCAmelCase\t\t\t\t( lowercase_\t\t, lowercase_\t\t, lowercase_\t\t, lowercase_\t\t, lowercase_=5\t\t\t\t\t\t):\r\n\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\tzip(lowercase_\t\t, lowercase_\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t# List of distances of all points from the point to be classified\r\n\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\t[]\r\n\t\t\t\t\t\t\tfor data_point in data:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\teuclidean_distance(data_point[0]\t\t, lowercase_\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tdistances.append((distance, data_point[1])\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t# Choosing 'k' points with the least distances.\r\n\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\t[i[1] for i in sorted(lowercase_\t\t\t\t\t\t)[:k]]\r\n\t\t\t\t\t\t\t# Most commonly occurring class among them\r\n\t\t\t\t\t\t\t# is the class into which the point is classified\r\n\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\tCounter(lowercase_\t\t\t\t\t\t).most_common(1\t\t\t\t\t\t)[0][0]\r\n\t\t\t\t\t\t\treturn classes[result]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\tprint(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":359,"string":"359"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\nfrom collections import deque\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\tA_ :\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef __init__( self\t\t\t:Any , lowercase_\t\t\t:str , lowercase_\t\t\t:int , lowercase_\t\t\t:int ) -> None:\r\n\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\tprocess_name # process name\r\n\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\tarrival_time # arrival time of the process\r\n\t\t\t\t\t\t\t\t\t\t\t\t# completion time of finished process or last interrupted time\r\n\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\tarrival_time\r\n\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\tburst_time # remaining burst time\r\n\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\t0 # total time of the process wait in ready queue\r\n\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\t0 # time from arrival time to completion time\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\tA_ :\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef __init__( self\t\t\t:Any , lowercase_\t\t\t:int , lowercase_\t\t\t:list[int] , lowercase_\t\t\t:deque[Process] , lowercase_\t\t\t:int , ) -> None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t# total number of mlfq's queues\r\n\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\tnumber_of_queues\r\n\t\t\t\t\t\t\t\t\t\t\t\t# time slice of queues that round robin algorithm applied\r\n\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\ttime_slices\r\n\t\t\t\t\t\t\t\t\t\t\t\t# unfinished process is in this ready_queue\r\n\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\tqueue\r\n\t\t\t\t\t\t\t\t\t\t\t\t# current time\r\n\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\tcurrent_time\r\n\t\t\t\t\t\t\t\t\t\t\t\t# finished process is in this sequence queue\r\n\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\tdeque()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef UpperCAmelCase__\t\t\t\t\t( self\t\t\t:Optional[int] ) -> list[str]:\r\n\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\t[]\r\n\t\t\t\t\t\t\t\t\t\t\t\tfor i in range(len(self.finish_queue ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsequence.append(self.finish_queue[i].process_name )\r\n\t\t\t\t\t\t\t\t\t\t\t\treturn sequence\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef UpperCAmelCase__\t\t\t\t\t( self\t\t\t:List[str] , lowercase_\t\t\t:list[Process] ) -> list[int]:\r\n\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\t[]\r\n\t\t\t\t\t\t\t\t\t\t\t\tfor i in range(len(lowercase_ ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twaiting_times.append(queue[i].waiting_time )\r\n\t\t\t\t\t\t\t\t\t\t\t\treturn waiting_times\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef UpperCAmelCase__\t\t\t\t\t( self\t\t\t:List[str] , lowercase_\t\t\t:list[Process] ) -> list[int]:\r\n\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\t[]\r\n\t\t\t\t\t\t\t\t\t\t\t\tfor i in range(len(lowercase_ ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tturnaround_times.append(queue[i].turnaround_time )\r\n\t\t\t\t\t\t\t\t\t\t\t\treturn turnaround_times\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef UpperCAmelCase__\t\t\t\t\t( self\t\t\t:Dict , lowercase_\t\t\t:list[Process] ) -> list[int]:\r\n\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\t[]\r\n\t\t\t\t\t\t\t\t\t\t\t\tfor i in range(len(lowercase_ ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcompletion_times.append(queue[i].stop_time )\r\n\t\t\t\t\t\t\t\t\t\t\t\treturn completion_times\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef UpperCAmelCase__\t\t\t\t\t( self\t\t\t:str , lowercase_\t\t\t:deque[Process] ) -> list[int]:\r\n\t\t\t\t\t\t\t\t\t\t\t\treturn [q.burst_time for q in queue]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef UpperCAmelCase__\t\t\t\t\t( self\t\t\t:int , lowercase_\t\t\t:Process ) -> int:\r\n\t\t\t\t\t\t\t\t\t\t\t\tprocess.waiting_time += self.current_time - process.stop_time\r\n\t\t\t\t\t\t\t\t\t\t\t\treturn process.waiting_time\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef UpperCAmelCase__\t\t\t\t\t( self\t\t\t:Optional[int] , lowercase_\t\t\t:deque[Process] ) -> deque[Process]:\r\n\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\tdeque() # sequence deque of finished process\r\n\t\t\t\t\t\t\t\t\t\t\t\twhile len(lowercase_ ) != 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\tready_queue.popleft() # current process\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# if process's arrival time is later than current time, update current time\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif self.current_time < cp.arrival_time:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.current_time += cp.arrival_time\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# update waiting time of current process\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.update_waiting_time(lowercase_ )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# update current time\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.current_time += cp.burst_time\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# finish the process and set the process's burst-time 0\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\t0\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# set the process's turnaround time because it is finished\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\tself.current_time - cp.arrival_time\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# set the completion time\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\tself.current_time\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# add the process to queue that has finished queue\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfinished.append(lowercase_ )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.finish_queue.extend(lowercase_ ) # add finished process to finish queue\r\n\t\t\t\t\t\t\t\t\t\t\t\t# FCFS will finish all remaining processes\r\n\t\t\t\t\t\t\t\t\t\t\t\treturn finished\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef UpperCAmelCase__\t\t\t\t\t( self\t\t\t:Tuple , lowercase_\t\t\t:deque[Process] , lowercase_\t\t\t:int ) -> tuple[deque[Process], deque[Process]]:\r\n\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\tdeque() # sequence deque of terminated process\r\n\t\t\t\t\t\t\t\t\t\t\t\t# just for 1 cycle and unfinished processes will go back to queue\r\n\t\t\t\t\t\t\t\t\t\t\t\tfor _ in range(len(lowercase_ ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\tready_queue.popleft() # current process\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# if process's arrival time is later than current time, update current time\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif self.current_time < cp.arrival_time:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.current_time += cp.arrival_time\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# update waiting time of unfinished processes\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.update_waiting_time(lowercase_ )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# if the burst time of process is bigger than time-slice\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif cp.burst_time > time_slice:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# use CPU for only time-slice\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.current_time += time_slice\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# update remaining burst time\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcp.burst_time -= time_slice\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# update end point time\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\tself.current_time\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# locate the process behind the queue because it is not finished\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tready_queue.append(lowercase_ )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# use CPU for remaining burst time\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.current_time += cp.burst_time\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# set burst time 0 because the process is finished\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\t0\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# set the finish time\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\tself.current_time\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# update the process' turnaround time because it is finished\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t\t=\t\t\tself.current_time - cp.arrival_time\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# add the process to queue that has finished queue\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfinished.append(lowercase_ )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.finish_queue.extend(lowercase_ ) # add finished process to finish queue\r\n\t\t\t\t\t\t\t\t\t\t\t\t# return finished processes queue and remaining processes queue\r\n\t\t\t\t\t\t\t\t\t\t\t\treturn finished, ready_queue\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef UpperCAmelCase__\t\t\t\t\t( self\t\t\t:Optional[Any] ) -> deque[Process]:\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t# all queues except last one have round_robin algorithm\r\n\t\t\t\t\t\t\t\t\t\t\t\tfor i in range(self.number_of_queues - 1 ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t, UpperCAmelCase\t\t\t\t\t=\t\t\tself.round_robin(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t self.ready_queue , self.time_slices[i] )\r\n\t\t\t\t\t\t\t\t\t\t\t\t# the last queue has first_come_first_served algorithm\r\n\t\t\t\t\t\t\t\t\t\t\t\tself.first_come_first_served(self.ready_queue )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\treturn self.finish_queue\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tsnake_case_ \t\t\t\t\t=\tProcess(\"\"\"P1\"\"\", 0, 53)\r\n\t\t\t\tsnake_case_ \t\t\t\t\t=\tProcess(\"\"\"P2\"\"\", 0, 17)\r\n\t\t\t\tsnake_case_ \t\t\t\t\t=\tProcess(\"\"\"P3\"\"\", 0, 68)\r\n\t\t\t\tsnake_case_ \t\t\t\t\t=\tProcess(\"\"\"P4\"\"\", 0, 24)\r\n\t\t\t\tsnake_case_ \t\t\t\t\t=\t3\r\n\t\t\t\tsnake_case_ \t\t\t\t\t=\t[17, 25]\r\n\t\t\t\tsnake_case_ \t\t\t\t\t=\tdeque([Pa, Pa, Pa, Pa])\r\n\r\n\t\t\t\tif len(time_slices) != number_of_queues - 1:\r\n\t\t\t\t\t\t\t\traise SystemExit(0)\r\n\r\n\t\t\t\tdoctest.testmod(extraglobs={\"\"\"queue\"\"\": deque([Pa, Pa, Pa, Pa])})\r\n\r\n\t\t\t\tsnake_case_ \t\t\t\t\t=\tProcess(\"\"\"P1\"\"\", 0, 53)\r\n\t\t\t\tsnake_case_ \t\t\t\t\t=\tProcess(\"\"\"P2\"\"\", 0, 17)\r\n\t\t\t\tsnake_case_ \t\t\t\t\t=\tProcess(\"\"\"P3\"\"\", 0, 68)\r\n\t\t\t\tsnake_case_ \t\t\t\t\t=\tProcess(\"\"\"P4\"\"\", 0, 24)\r\n\t\t\t\tsnake_case_ \t\t\t\t\t=\t3\r\n\t\t\t\tsnake_case_ \t\t\t\t\t=\t[17, 25]\r\n\t\t\t\tsnake_case_ \t\t\t\t\t=\tdeque([Pa, Pa, Pa, Pa])\r\n\t\t\t\tsnake_case_ \t\t\t\t\t=\tMLFQ(number_of_queues, time_slices, queue, 0)\r\n\t\t\t\tsnake_case_ \t\t\t\t\t=\tmlfq.multi_level_feedback_queue()\r\n\r\n\t\t\t\t# print total waiting times of processes(P1, P2, P3, P4)\r\n\t\t\t\tprint(\r\n\t\t\t\t f'''waiting time:\\\n \\t\\t\\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''\r\n\t\t\t\t)\r\n\t\t\t\t# print completion times of processes(P1, P2, P3, P4)\r\n\t\t\t\tprint(\r\n\t\t\t\t f'''completion time:\\\n \\t\\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''\r\n\t\t\t\t)\r\n\t\t\t\t# print total turnaround times of processes(P1, P2, P3, P4)\r\n\t\t\t\tprint(\r\n\t\t\t\t f'''turnaround time:\\\n \\t\\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''\r\n\t\t\t\t)\r\n\t\t\t\t# print sequence of finished processes\r\n\t\t\t\tprint(\r\n\t\t\t\t f'''sequence of finished processes:\\\n {mlfq.calculate_sequence_of_finish_queue()}'''\r\n\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":181,"string":"181"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":822,"cells":{"code":{"kind":"string","value":"\n\n\ndef lowerCAmelCase__ (\t\t\ta__:\t\tOptional[int] ) ->\t\tOptional[Any]:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t\t_UpperCAmelCase \t\t=\t\t\t1\n\t\t\t\t\t\t\t_UpperCAmelCase \t\t=\t\t\t2\n\t\t\t\t\t\t\twhile i * i <= n:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCAmelCase \t\t=\t\t\t0\n\t\t\t\t\t\t\t\t\t\t\t\t\t\twhile n % i == 0:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tn //= i\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmultiplicity += 1\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tn_divisors *= multiplicity + 1\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ti += 1\n\t\t\t\t\t\t\tif n > 1:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tn_divisors *= 2\n\t\t\t\t\t\t\treturn n_divisors\n\ndef lowerCAmelCase__ (\t\t\t) ->\t\tDict:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\t\t\t\t_UpperCAmelCase \t\t=\t\t\t1\n\t\t\t\t\t\t\t_UpperCAmelCase \t\t=\t\t\t1\n\n\t\t\t\t\t\t\twhile True:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ti += 1\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tt_num += i\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif count_divisors(UpperCamelCase_ ) > 5_0_0:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\t\treturn t_num\n\n\nif __name__ == \"__main__\":\n\t\tprint(solution())\n"},"code_codestyle":{"kind":"number","value":329,"string":"329"},"style_context":{"kind":"string","value":"\nimport os\nimport unittest\n\nfrom transformers import LxmertTokenizer, LxmertTokenizerFast\nfrom transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES\nfrom transformers.testing_utils import require_tokenizers\n\nfrom ...test_tokenization_common import TokenizerTesterMixin\n\n\n\n\n@require_tokenizers\nclass \t\t\t\t\t\tlowercase__ (\t\t\t\t\t\t_UpperCAmelCase , unittest.TestCase ):\n\tA__ : Dict\t\t\t\t =LxmertTokenizer\n\tA__ : List[Any]\t\t\t\t =LxmertTokenizerFast\n\tA__ : Any\t\t\t\t =True\n\tA__ : List[Any]\t\t\t\t =True\n\tdef A_\t\t\t\t(\t\t\tself\t\t\t\t\t\t\t: Optional[Any]\t\t\t\t\t\t\t):\n\t\t\t\t\t\tsuper().setUp()\n\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t\t\t\t\t\t= [\n\t\t\t\t\t\t '[UNK]',\n\t\t\t\t\t\t '[CLS]',\n\t\t\t\t\t\t '[SEP]',\n\t\t\t\t\t\t 'want',\n\t\t\t\t\t\t '##want',\n\t\t\t\t\t\t '##ed',\n\t\t\t\t\t\t 'wa',\n\t\t\t\t\t\t 'un',\n\t\t\t\t\t\t 'runn',\n\t\t\t\t\t\t '##ing',\n\t\t\t\t\t\t ',',\n\t\t\t\t\t\t 'low',\n\t\t\t\t\t\t 'lowest',\n\t\t\t\t\t\t]\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t\t\t\t\t\t= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']\t\t\t\t\t\t\t)\n\t\t\t\t\t\twith open(self.vocab_file , 'w' , encoding='utf-8'\t\t\t\t\t\t\t) as vocab_writer:\n\t\t\t\t\t\t\t\t\t\t\tvocab_writer.write(''.join([x + '\\n' for x in vocab_tokens]\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\n\tdef A_\t\t\t\t(\t\t\tself\t\t\t\t\t\t\t: int , UpperCAmelCase_\t\t\t\t\t\t\t: int\t\t\t\t\t\t\t):\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t\t\t\t\t\t= 'UNwant\\u00E9d,running'\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t\t\t\t\t\t= 'unwanted, running'\n\t\t\t\t\t\treturn input_text, output_text\n\tdef A_\t\t\t\t(\t\t\tself\t\t\t\t\t\t\t: Optional[Any]\t\t\t\t\t\t\t):\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t\t\t\t\t\t= self.tokenizer_class(self.vocab_file\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t\t\t\t\t\t= tokenizer.tokenize('UNwant\\u00E9d,running'\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertListEqual(UpperCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing']\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_\t\t\t\t\t\t\t) , [7, 4, 5, 10, 8, 9]\t\t\t\t\t\t\t)\n\n\n\n\n\n\tdef A_\t\t\t\t(\t\t\tself\t\t\t\t\t\t\t: List[str]\t\t\t\t\t\t\t):\n\t\t\t\t\t\tif not self.test_rust_tokenizer:\n\t\t\t\t\t\t\t\t\t\t\treturn\n\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t\t\t\t\t\t= self.get_tokenizer()\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t\t\t\t\t\t= self.get_rust_tokenizer()\n\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t\t\t\t\t\t= 'I was born in 92000, and this is falsé.'\n\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t\t\t\t\t\t= tokenizer.tokenize(UpperCAmelCase_\t\t\t\t\t\t\t)\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t\t\t\t\t\t= rust_tokenizer.tokenize(UpperCAmelCase_\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t\t\t\t\t\t= tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_\t\t\t\t\t\t\t)\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t\t\t\t\t\t= rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t\t\t\t\t\t= self.get_rust_tokenizer()\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t\t\t\t\t\t= tokenizer.encode(UpperCAmelCase_\t\t\t\t\t\t\t)\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t\t\t\t\t\t= rust_tokenizer.encode(UpperCAmelCase_\t\t\t\t\t\t\t)\n\t\t\t\t\t\tself.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_\t\t\t\t\t\t\t)\n\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":176,"string":"176"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":823,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\n\nimport os\nimport tempfile\nimport unittest\n\nfrom transformers import NezhaConfig, is_torch_available\nfrom transformers.models.auto import get_values\nfrom transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device\n\nfrom ...generation.test_utils import GenerationTesterMixin\nfrom ...test_configuration_common import ConfigTester\nfrom ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask\nfrom ...test_pipeline_mixin import PipelineTesterMixin\n\n\nif is_torch_available():\n\t\t\t\timport torch\n\n\t\t\t\tfrom transformers import (\n\t\t\t\t MODEL_FOR_PRETRAINING_MAPPING,\n\t\t\t\t NezhaForMaskedLM,\n\t\t\t\t NezhaForMultipleChoice,\n\t\t\t\t NezhaForNextSentencePrediction,\n\t\t\t\t NezhaForPreTraining,\n\t\t\t\t NezhaForQuestionAnswering,\n\t\t\t\t NezhaForSequenceClassification,\n\t\t\t\t NezhaForTokenClassification,\n\t\t\t\t NezhaModel,\n\t\t\t\t)\n\t\t\t\tfrom transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST\n\nclass \t\t\t\t\t\t_snake_case\t\t\t\t\t:\n\n\n\n\n\n\n\t\t\tdef __init__( self\t\t\t\t\t\t, a\t\t\t\t\t\t, a=13\t\t\t\t\t\t, a=7\t\t\t\t\t\t, a=True\t\t\t\t\t\t, a=True\t\t\t\t\t\t, a=True\t\t\t\t\t\t, a=True\t\t\t\t\t\t, a=99\t\t\t\t\t\t, a=32\t\t\t\t\t\t, a=5\t\t\t\t\t\t, a=4\t\t\t\t\t\t, a=37\t\t\t\t\t\t, a=\"gelu\"\t\t\t\t\t\t, a=0.1\t\t\t\t\t\t, a=0.1\t\t\t\t\t\t, a=128\t\t\t\t\t\t, a=32\t\t\t\t\t\t, a=16\t\t\t\t\t\t, a=2\t\t\t\t\t\t, a=0.02\t\t\t\t\t\t, a=3\t\t\t\t\t\t, a=4\t\t\t\t\t\t, a=None\t\t\t\t\t\t, ) -> List[str]:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tparent\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tbatch_size\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tseq_length\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tis_training\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tuse_input_mask\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tuse_token_type_ids\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tuse_labels\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tvocab_size\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\thidden_size\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tnum_hidden_layers\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tnum_attention_heads\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tintermediate_size\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\thidden_act\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\thidden_dropout_prob\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tattention_probs_dropout_prob\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tmax_position_embeddings\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\ttype_vocab_size\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\ttype_sequence_label_size\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tinitializer_range\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tnum_labels\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tnum_choices\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tscope\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self) -> str:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tids_tensor([self.batch_size, self.seq_length]\t\t\t\t\t\t, self.vocab_size)\n\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tNone\n\t\t\t\t\t\tif self.use_input_mask:\n\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\trandom_attention_mask([self.batch_size, self.seq_length])\n\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tNone\n\t\t\t\t\t\tif self.use_token_type_ids:\n\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tids_tensor([self.batch_size, self.seq_length]\t\t\t\t\t\t, self.type_vocab_size)\n\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tNone\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tNone\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tNone\n\t\t\t\t\t\tif self.use_labels:\n\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tids_tensor([self.batch_size]\t\t\t\t\t\t, self.type_sequence_label_size)\n\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tids_tensor([self.batch_size, self.seq_length]\t\t\t\t\t\t, self.num_labels)\n\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tids_tensor([self.batch_size]\t\t\t\t\t\t, self.num_choices)\n\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tself.get_config()\n\n\t\t\t\t\t\treturn config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self) -> Any:\n\t\t\t\t\t\treturn NezhaConfig(\n\t\t\t\t\t\t vocab_size=self.vocab_size\t\t\t\t\t\t, hidden_size=self.hidden_size\t\t\t\t\t\t, num_hidden_layers=self.num_hidden_layers\t\t\t\t\t\t, num_attention_heads=self.num_attention_heads\t\t\t\t\t\t, intermediate_size=self.intermediate_size\t\t\t\t\t\t, hidden_act=self.hidden_act\t\t\t\t\t\t, hidden_dropout_prob=self.hidden_dropout_prob\t\t\t\t\t\t, attention_probs_dropout_prob=self.attention_probs_dropout_prob\t\t\t\t\t\t, max_position_embeddings=self.max_position_embeddings\t\t\t\t\t\t, type_vocab_size=self.type_vocab_size\t\t\t\t\t\t, is_decoder=__A\t\t\t\t\t\t, initializer_range=self.initializer_range\t\t\t\t\t\t, )\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:\n\t\t\t\t\t\t(\n\t\t\t\t\t\t SCREAMING_SNAKE_CASE\t\t\t\t\t\t\n\t\t\t\t\t\t)\t\t\t\t\t\t\t\t\t=\tself.prepare_config_and_inputs()\n\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tTrue\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tfloats_tensor([self.batch_size, self.seq_length, self.hidden_size])\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tids_tensor([self.batch_size, self.seq_length]\t\t\t\t\t\t, vocab_size=2)\n\n\t\t\t\t\t\treturn (\n\t\t\t\t\t\t config,\n\t\t\t\t\t\t input_ids,\n\t\t\t\t\t\t token_type_ids,\n\t\t\t\t\t\t input_mask,\n\t\t\t\t\t\t sequence_labels,\n\t\t\t\t\t\t token_labels,\n\t\t\t\t\t\t choice_labels,\n\t\t\t\t\t\t encoder_hidden_states,\n\t\t\t\t\t\t encoder_attention_mask,\n\t\t\t\t\t\t)\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a) -> Dict:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tNezhaModel(config=__A)\n\t\t\t\t\t\tmodel.to(__A)\n\t\t\t\t\t\tmodel.eval()\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tmodel(__A\t\t\t\t\t\t, attention_mask=__A\t\t\t\t\t\t, token_type_ids=__A)\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tmodel(__A\t\t\t\t\t\t, token_type_ids=__A)\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tmodel(__A)\n\t\t\t\t\t\tself.parent.assertEqual(result.last_hidden_state.shape\t\t\t\t\t\t, (self.batch_size, self.seq_length, self.hidden_size))\n\t\t\t\t\t\tself.parent.assertEqual(result.pooler_output.shape\t\t\t\t\t\t, (self.batch_size, self.hidden_size))\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, ) -> List[Any]:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tTrue\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tNezhaModel(__A)\n\t\t\t\t\t\tmodel.to(__A)\n\t\t\t\t\t\tmodel.eval()\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tmodel(\n\t\t\t\t\t\t __A\t\t\t\t\t\t, attention_mask=__A\t\t\t\t\t\t, token_type_ids=__A\t\t\t\t\t\t, encoder_hidden_states=__A\t\t\t\t\t\t, encoder_attention_mask=__A\t\t\t\t\t\t, )\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tmodel(\n\t\t\t\t\t\t __A\t\t\t\t\t\t, attention_mask=__A\t\t\t\t\t\t, token_type_ids=__A\t\t\t\t\t\t, encoder_hidden_states=__A\t\t\t\t\t\t, )\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tmodel(__A\t\t\t\t\t\t, attention_mask=__A\t\t\t\t\t\t, token_type_ids=__A)\n\t\t\t\t\t\tself.parent.assertEqual(result.last_hidden_state.shape\t\t\t\t\t\t, (self.batch_size, self.seq_length, self.hidden_size))\n\t\t\t\t\t\tself.parent.assertEqual(result.pooler_output.shape\t\t\t\t\t\t, (self.batch_size, self.hidden_size))\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a) -> List[Any]:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tNezhaForMaskedLM(config=__A)\n\t\t\t\t\t\tmodel.to(__A)\n\t\t\t\t\t\tmodel.eval()\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tmodel(__A\t\t\t\t\t\t, attention_mask=__A\t\t\t\t\t\t, token_type_ids=__A\t\t\t\t\t\t, labels=__A)\n\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape\t\t\t\t\t\t, (self.batch_size, self.seq_length, self.vocab_size))\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a) -> Optional[int]:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tNezhaForNextSentencePrediction(config=__A)\n\t\t\t\t\t\tmodel.to(__A)\n\t\t\t\t\t\tmodel.eval()\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tmodel(\n\t\t\t\t\t\t __A\t\t\t\t\t\t, attention_mask=__A\t\t\t\t\t\t, token_type_ids=__A\t\t\t\t\t\t, labels=__A\t\t\t\t\t\t, )\n\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape\t\t\t\t\t\t, (self.batch_size, 2))\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a) -> str:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tNezhaForPreTraining(config=__A)\n\t\t\t\t\t\tmodel.to(__A)\n\t\t\t\t\t\tmodel.eval()\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tmodel(\n\t\t\t\t\t\t __A\t\t\t\t\t\t, attention_mask=__A\t\t\t\t\t\t, token_type_ids=__A\t\t\t\t\t\t, labels=__A\t\t\t\t\t\t, next_sentence_label=__A\t\t\t\t\t\t, )\n\t\t\t\t\t\tself.parent.assertEqual(result.prediction_logits.shape\t\t\t\t\t\t, (self.batch_size, self.seq_length, self.vocab_size))\n\t\t\t\t\t\tself.parent.assertEqual(result.seq_relationship_logits.shape\t\t\t\t\t\t, (self.batch_size, 2))\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a) -> Dict:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tNezhaForQuestionAnswering(config=__A)\n\t\t\t\t\t\tmodel.to(__A)\n\t\t\t\t\t\tmodel.eval()\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tmodel(\n\t\t\t\t\t\t __A\t\t\t\t\t\t, attention_mask=__A\t\t\t\t\t\t, token_type_ids=__A\t\t\t\t\t\t, start_positions=__A\t\t\t\t\t\t, end_positions=__A\t\t\t\t\t\t, )\n\t\t\t\t\t\tself.parent.assertEqual(result.start_logits.shape\t\t\t\t\t\t, (self.batch_size, self.seq_length))\n\t\t\t\t\t\tself.parent.assertEqual(result.end_logits.shape\t\t\t\t\t\t, (self.batch_size, self.seq_length))\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a) -> Optional[int]:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tself.num_labels\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tNezhaForSequenceClassification(__A)\n\t\t\t\t\t\tmodel.to(__A)\n\t\t\t\t\t\tmodel.eval()\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tmodel(__A\t\t\t\t\t\t, attention_mask=__A\t\t\t\t\t\t, token_type_ids=__A\t\t\t\t\t\t, labels=__A)\n\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape\t\t\t\t\t\t, (self.batch_size, self.num_labels))\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a) -> Optional[int]:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tself.num_labels\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tNezhaForTokenClassification(config=__A)\n\t\t\t\t\t\tmodel.to(__A)\n\t\t\t\t\t\tmodel.eval()\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tmodel(__A\t\t\t\t\t\t, attention_mask=__A\t\t\t\t\t\t, token_type_ids=__A\t\t\t\t\t\t, labels=__A)\n\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape\t\t\t\t\t\t, (self.batch_size, self.seq_length, self.num_labels))\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a) -> Optional[Any]:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tself.num_choices\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tNezhaForMultipleChoice(config=__A)\n\t\t\t\t\t\tmodel.to(__A)\n\t\t\t\t\t\tmodel.eval()\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tinput_ids.unsqueeze(1).expand(-1\t\t\t\t\t\t, self.num_choices\t\t\t\t\t\t, -1).contiguous()\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\ttoken_type_ids.unsqueeze(1).expand(-1\t\t\t\t\t\t, self.num_choices\t\t\t\t\t\t, -1).contiguous()\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tinput_mask.unsqueeze(1).expand(-1\t\t\t\t\t\t, self.num_choices\t\t\t\t\t\t, -1).contiguous()\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tmodel(\n\t\t\t\t\t\t __A\t\t\t\t\t\t, attention_mask=__A\t\t\t\t\t\t, token_type_ids=__A\t\t\t\t\t\t, labels=__A\t\t\t\t\t\t, )\n\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape\t\t\t\t\t\t, (self.batch_size, self.num_choices))\n\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tself.prepare_config_and_inputs()\n\t\t\t\t\t\t(\n\t\t\t\t\t\t SCREAMING_SNAKE_CASE\t\t\t\t\t\t\n\t\t\t\t\t\t)\t\t\t\t\t\t\t\t\t=\tconfig_and_inputs\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\t{'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}\n\t\t\t\t\t\treturn config, inputs_dict\n\n\n\n\n@require_torch\nclass \t\t\t\t\t\t_snake_case\t\t\t\t\t(\t\t\t\t\tlowerCamelCase__\t\t\t\t, lowerCamelCase__\t\t\t\t, lowerCamelCase__\t\t\t\t, unittest.TestCase ):\n\t\t\t_lowercase\t\t\t\t\t\t: Optional[int] =\t\t\t\t\t\t\t(\n\t\t\t (\n\t\t\t NezhaModel,\n\t\t\t NezhaForMaskedLM,\n\t\t\t NezhaForMultipleChoice,\n\t\t\t NezhaForNextSentencePrediction,\n\t\t\t NezhaForPreTraining,\n\t\t\t NezhaForQuestionAnswering,\n\t\t\t NezhaForSequenceClassification,\n\t\t\t NezhaForTokenClassification,\n\t\t\t )\n\t\t\t if is_torch_available()\n\t\t\t else ()\n\t\t\t)\n\t\t\t_lowercase\t\t\t\t\t\t: str =\t\t\t\t\t\t\t(\n\t\t\t {\n\t\t\t '''feature-extraction''': NezhaModel,\n\t\t\t '''fill-mask''': NezhaForMaskedLM,\n\t\t\t '''question-answering''': NezhaForQuestionAnswering,\n\t\t\t '''text-classification''': NezhaForSequenceClassification,\n\t\t\t '''token-classification''': NezhaForTokenClassification,\n\t\t\t '''zero-shot''': NezhaForSequenceClassification,\n\t\t\t }\n\t\t\t if is_torch_available()\n\t\t\t else {}\n\t\t\t)\n\t\t\t_lowercase\t\t\t\t\t\t: Union[str, Any] =\t\t\t\t\t\t\tTrue\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self\t\t\t\t\t\t, a\t\t\t\t\t\t, a\t\t\t\t\t\t, a=False) -> str:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tsuper()._prepare_for_class(__A\t\t\t\t\t\t, __A\t\t\t\t\t\t, return_labels=__A)\n\n\t\t\t\t\t\tif return_labels:\n\t\t\t\t\t\t\t\t\tif model_class in get_values(__A):\n\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\ttorch.zeros(\n\t\t\t\t\t\t\t\t\t\t\t\t (self.model_tester.batch_size, self.model_tester.seq_length)\t\t\t\t\t\t, dtype=torch.long\t\t\t\t\t\t, device=__A)\n\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\ttorch.zeros(\n\t\t\t\t\t\t\t\t\t\t\t\t self.model_tester.batch_size\t\t\t\t\t\t, dtype=torch.long\t\t\t\t\t\t, device=__A)\n\t\t\t\t\t\treturn inputs_dict\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self) -> List[str]:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tNezhaModelTester(self)\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tConfigTester(self\t\t\t\t\t\t, config_class=__A\t\t\t\t\t\t, hidden_size=37)\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self) -> Dict:\n\t\t\t\t\t\tself.config_tester.run_common_tests()\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self) -> Any:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tself.model_tester.prepare_config_and_inputs()\n\t\t\t\t\t\tself.model_tester.create_and_check_model(*__A)\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tself.model_tester.prepare_config_and_inputs_for_decoder()\n\t\t\t\t\t\tself.model_tester.create_and_check_model_as_decoder(*__A)\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self) -> List[Any]:\n\t\t\t\t\t\t# This regression test was failing with PyTorch < 1.3\n\t\t\t\t\t\t(\n\t\t\t\t\t\t SCREAMING_SNAKE_CASE\t\t\t\t\t\t\n\t\t\t\t\t\t)\t\t\t\t\t\t\t\t\t=\tself.model_tester.prepare_config_and_inputs_for_decoder()\n\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tNone\n\n\t\t\t\t\t\tself.model_tester.create_and_check_model_as_decoder(\n\t\t\t\t\t\t __A\t\t\t\t\t\t, __A\t\t\t\t\t\t, __A\t\t\t\t\t\t, __A\t\t\t\t\t\t, __A\t\t\t\t\t\t, __A\t\t\t\t\t\t, __A\t\t\t\t\t\t, __A\t\t\t\t\t\t, __A\t\t\t\t\t\t, )\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self) -> List[str]:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tself.model_tester.prepare_config_and_inputs()\n\t\t\t\t\t\tself.model_tester.create_and_check_for_masked_lm(*__A)\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tself.model_tester.prepare_config_and_inputs()\n\t\t\t\t\t\tself.model_tester.create_and_check_for_multiple_choice(*__A)\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tself.model_tester.prepare_config_and_inputs()\n\t\t\t\t\t\tself.model_tester.create_and_check_for_next_sequence_prediction(*__A)\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self) -> List[str]:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tself.model_tester.prepare_config_and_inputs()\n\t\t\t\t\t\tself.model_tester.create_and_check_for_pretraining(*__A)\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self) -> List[Any]:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tself.model_tester.prepare_config_and_inputs()\n\t\t\t\t\t\tself.model_tester.create_and_check_for_question_answering(*__A)\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self) -> List[Any]:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tself.model_tester.prepare_config_and_inputs()\n\t\t\t\t\t\tself.model_tester.create_and_check_for_sequence_classification(*__A)\n\n\n\n\n\n\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self) -> Any:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tself.model_tester.prepare_config_and_inputs()\n\t\t\t\t\t\tself.model_tester.create_and_check_for_token_classification(*__A)\n\n\n\n\n\n\n\t\t\t@slow\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self) -> Tuple:\n\t\t\t\t\t\tfor model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tNezhaModel.from_pretrained(__A)\n\t\t\t\t\t\t\t\t\tself.assertIsNotNone(__A)\n\n\n\n\n\n\n\n\t\t\t@slow\n\t\t\t@require_torch_gpu\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tself.model_tester.prepare_config_and_inputs_for_common()\n\t\t\t\t\t\tfor model_class in self.all_model_classes:\n\t\t\t\t\t\t\t\t\t# NezhaForMultipleChoice behaves incorrectly in JIT environments.\n\t\t\t\t\t\t\t\t\tif model_class == NezhaForMultipleChoice:\n\t\t\t\t\t\t\t\t\t\t\t\treturn\n\n\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tTrue\n\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tmodel_class(config=__A)\n\n\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tself._prepare_for_class(__A\t\t\t\t\t\t, __A)\n\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\ttorch.jit.trace(\n\t\t\t\t\t\t\t\t\t __A\t\t\t\t\t\t, (inputs_dict['input_ids'].to('cpu'), inputs_dict['attention_mask'].to('cpu')))\n\n\t\t\t\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmp:\n\t\t\t\t\t\t\t\t\t\t\t\ttorch.jit.save(__A\t\t\t\t\t\t, os.path.join(__A\t\t\t\t\t\t, 'bert.pt'))\n\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\ttorch.jit.load(os.path.join(__A\t\t\t\t\t\t, 'bert.pt')\t\t\t\t\t\t, map_location=__A)\n\t\t\t\t\t\t\t\t\t\t\t\tloaded(inputs_dict['input_ids'].to(__A)\t\t\t\t\t\t, inputs_dict['attention_mask'].to(__A))\n\n\n\n\n@require_torch\nclass \t\t\t\t\t\t_snake_case\t\t\t\t\t(\t\t\t\t\tunittest.TestCase ):\n\n\n\n\n\n\n\t\t\t@slow\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tNezhaModel.from_pretrained('sijunhe/nezha-cn-base')\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\ttorch.tensor([[0, 1, 2, 3, 4, 5]])\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\ttorch.tensor([[0, 1, 1, 1, 1, 1]])\n\t\t\t\t\t\twith torch.no_grad():\n\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tmodel(__A\t\t\t\t\t\t, attention_mask=__A)[0]\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\ttorch.Size((1, 6, 768))\n\t\t\t\t\t\tself.assertEqual(output.shape\t\t\t\t\t\t, __A)\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\ttorch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]])\n\n\t\t\t\t\t\tself.assertTrue(torch.allclose(output[:, 1:4, 1:4]\t\t\t\t\t\t, __A\t\t\t\t\t\t, atol=1E-4))\n\n\n\n\n\n\n\n\t\t\t@slow\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tNezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base')\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\ttorch.tensor([[0, 1, 2, 3, 4, 5]])\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\ttorch.tensor([[1, 1, 1, 1, 1, 1]])\n\t\t\t\t\t\twith torch.no_grad():\n\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tmodel(__A\t\t\t\t\t\t, attention_mask=__A)[0]\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\ttorch.Size((1, 6, 2_1128))\n\t\t\t\t\t\tself.assertEqual(output.shape\t\t\t\t\t\t, __A)\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\ttorch.tensor(\n\t\t\t\t\t\t [[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]])\n\n\t\t\t\t\t\tself.assertTrue(torch.allclose(output[:, 1:4, 1:4]\t\t\t\t\t\t, __A\t\t\t\t\t\t, atol=1E-4))\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":361,"string":"361"},"style_context":{"kind":"string","value":"\n\n\n\n\n\n\nfrom __future__ import annotations\n\nimport unittest\n\nfrom transformers import is_tf_available\nfrom transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow\n\n\nif is_tf_available():\n\t\t\t\timport numpy as np\n\t\t\t\timport tensorflow as tf\n\n\t\t\t\tfrom transformers import TFCamembertModel\n\n@require_tf\n@require_sentencepiece\n@require_tokenizers\nclass \t\t\t\t\t\t_snake_case\t\t\t\t\t(\t\t\t\t\tunittest.TestCase ):\n\n\n\n\n\n\n\t\t\t@slow\n\t\t\tdef \t\t\t\tSCREAMING_SNAKE_CASE__ ( self) -> int:\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tTFCamembertModel.from_pretrained('jplu/tf-camembert-base')\n\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\ttf.convert_to_tensor(\n\t\t\t\t\t\t [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]]\t\t\t\t\t\t, dtype=tf.intaa\t\t\t\t\t\t, ) # J'aime le camembert !\"\n\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\tmodel(a)['last_hidden_state']\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\ttf.TensorShape((1, 10, 768))\n\t\t\t\t\t\tself.assertEqual(output.shape\t\t\t\t\t\t, a)\n\t\t\t\t\t\t# compare the actual values for a slice.\n\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t\t\t=\ttf.convert_to_tensor(\n\t\t\t\t\t\t [[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]]\t\t\t\t\t\t, dtype=tf.floataa\t\t\t\t\t\t, )\n\t\t\t\t\t\t# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')\n\t\t\t\t\t\t# camembert.eval()\n\t\t\t\t\t\t# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()\n\n\t\t\t\t\t\tself.assertTrue(np.allclose(output[:, :3, :3].numpy()\t\t\t\t\t\t, expected_slice.numpy()\t\t\t\t\t\t, atol=1E-4))\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":327,"string":"327"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":824,"cells":{"code":{"kind":"string","value":"\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\nimport gc\r\nimport unittest\r\n\r\nfrom diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline\r\nfrom diffusers.utils import is_flax_available, load_image, slow\r\nfrom diffusers.utils.testing_utils import require_flax\r\n\r\n\r\nif is_flax_available():\r\n\t\t\t\timport jax\r\n\t\t\t\timport jax.numpy as jnp\r\n\t\t\t\tfrom flax.jax_utils import replicate\r\n\t\t\t\tfrom flax.training.common_utils import shard\r\n@slow\r\n@require_flax\r\nclass \t\t\t_UpperCAmelCase\t\t\t\t\t(\t\t\t\tunittest.TestCase ):\r\n\r\n\r\n\r\n\tdef a ( self\t\t\t: int\t\t):\r\n\t\tsuper().tearDown()\r\n\t\tgc.collect()\r\n\r\n\r\n\r\n\tdef a ( self\t\t\t: Tuple\t\t):\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t,\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= FlaxControlNetModel.from_pretrained(\r\n\t\t '''lllyasviel/sd-controlnet-canny''' , from_pt=UpperCamelCase__ , dtype=jnp.bfloataa\t\t)\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t,\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= FlaxStableDiffusionControlNetPipeline.from_pretrained(\r\n\t\t '''runwayml/stable-diffusion-v1-5''' , controlnet=UpperCamelCase__ , from_pt=UpperCamelCase__ , dtype=jnp.bfloataa\t\t)\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= controlnet_params\r\n\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= '''bird'''\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= jax.device_count()\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= pipe.prepare_text_inputs([prompts] * num_samples\t\t)\r\n\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= load_image(\r\n\t\t '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png'''\t\t)\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= pipe.prepare_image_inputs([canny_image] * num_samples\t\t)\r\n\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= jax.random.PRNGKey(0\t\t)\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= jax.random.split(UpperCamelCase__ , jax.device_count()\t\t)\r\n\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= replicate(UpperCamelCase__\t\t)\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= shard(UpperCamelCase__\t\t)\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= shard(UpperCamelCase__\t\t)\r\n\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= pipe(\r\n\t\t prompt_ids=UpperCamelCase__ , image=UpperCamelCase__ , params=UpperCamelCase__ , prng_seed=UpperCamelCase__ , num_inference_steps=50 , jit=UpperCamelCase__ , ).images\r\n\t\tassert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)\r\n\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]\t\t)\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= images[0, 2_53:2_56, 2_53:2_56, -1]\r\n\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= jnp.asarray(jax.device_get(image_slice.flatten()\t\t)\t\t)\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= jnp.array(\r\n\t\t [0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078]\t\t)\r\n\t\tprint(F'''output_slice: {output_slice}'''\t\t)\r\n\t\tassert jnp.abs(output_slice - expected_slice\t\t).max() < 1E-2\r\n\r\n\r\n\r\n\tdef a ( self\t\t\t: List[str]\t\t):\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t,\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= FlaxControlNetModel.from_pretrained(\r\n\t\t '''lllyasviel/sd-controlnet-openpose''' , from_pt=UpperCamelCase__ , dtype=jnp.bfloataa\t\t)\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t,\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t= FlaxStableDiffusionControlNetPipeline.from_pretrained(\r\n\t\t '''runwayml/stable-diffusion-v1-5''' , controlnet=UpperCamelCase__ , from_pt=UpperCamelCase__ , dtype=jnp.bfloataa\t\t)\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= controlnet_params\r\n\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= '''Chef in the kitchen'''\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= jax.device_count()\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= pipe.prepare_text_inputs([prompts] * num_samples\t\t)\r\n\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= load_image(\r\n\t\t '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png'''\t\t)\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= pipe.prepare_image_inputs([pose_image] * num_samples\t\t)\r\n\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= jax.random.PRNGKey(0\t\t)\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= jax.random.split(UpperCamelCase__ , jax.device_count()\t\t)\r\n\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= replicate(UpperCamelCase__\t\t)\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= shard(UpperCamelCase__\t\t)\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= shard(UpperCamelCase__\t\t)\r\n\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= pipe(\r\n\t\t prompt_ids=UpperCamelCase__ , image=UpperCamelCase__ , params=UpperCamelCase__ , prng_seed=UpperCamelCase__ , num_inference_steps=50 , jit=UpperCamelCase__ , ).images\r\n\t\tassert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)\r\n\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]\t\t)\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= images[0, 2_53:2_56, 2_53:2_56, -1]\r\n\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= jnp.asarray(jax.device_get(image_slice.flatten()\t\t)\t\t)\r\n\t\t__UpperCAmelCase\t\t\t\t\t\t\t= jnp.array(\r\n\t\t [[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]]\t\t)\r\n\t\tprint(F'''output_slice: {output_slice}'''\t\t)\r\n\t\tassert jnp.abs(output_slice - expected_slice\t\t).max() < 1E-2\r\n"},"code_codestyle":{"kind":"number","value":332,"string":"332"},"style_context":{"kind":"string","value":"\n\n\n\n\nimport argparse\nimport json\n\nimport requests\nimport torch\nfrom huggingface_hub import hf_hub_download\nfrom PIL import Image\nfrom torchvision import transforms\n\nfrom transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification\nfrom transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling\n\n\n\n\n\ndef __UpperCamelCase\t\t\t( _A ):\n\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]\n\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= True if '''large''' in model_name or '''huge''' in model_name else False\n\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= True if '''large''' in model_name or '''huge''' in model_name else False\n\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= True if '''large''' in model_name or '''huge''' in model_name else False\n\n\t\t\t\t\t\tif \"large\" in model_name or \"xlarge\" in model_name or \"huge\" in model_name:\n\t\t\t\t\t\t\t\t\t\t\t\tif \"fl3\" in model_name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= [3, 3, 3, 3]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= [5, 5, 5, 5]\n\t\t\t\t\t\t\t\t\t\t\t\telif \"fl4\" in model_name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= [4, 4, 4, 4]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= [3, 3, 3, 3]\n\n\t\t\t\t\t\tif \"tiny\" in model_name or \"small\" in model_name or \"base\" in model_name:\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= [3, 3, 3, 3]\n\t\t\t\t\t\t\t\t\t\t\t\tif \"lrf\" in model_name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= [3, 3, 3, 3]\n\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= [2, 2, 2, 2]\n\n\t\t\t\t\t\tif \"tiny\" in model_name:\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= 96\n\t\t\t\t\t\telif \"small\" in model_name:\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= 96\n\t\t\t\t\t\telif \"base\" in model_name:\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= 128\n\t\t\t\t\t\telif \"large\" in model_name:\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= 192\n\t\t\t\t\t\telif \"xlarge\" in model_name:\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= 256\n\t\t\t\t\t\telif \"huge\" in model_name:\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= 352\n\n\t\t\t\t\t\t# set label information\n\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= '''huggingface/label-files'''\n\t\t\t\t\t\tif \"large\" in model_name or \"huge\" in model_name:\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= '''imagenet-22k-id2label.json'''\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= '''imagenet-1k-id2label.json'''\n\n\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )\n\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= {int(_A ): v for k, v in idalabel.items()}\n\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= {v: k for k, v in idalabel.items()}\n\n\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= FocalNetConfig(\n\t\t\t\t\t\t embed_dim=_A , depths=_A , focal_levels=_A , focal_windows=_A , use_conv_embed=_A , idalabel=_A , labelaid=_A , use_post_layernorm=_A , use_layerscale=_A , )\n\n\t\t\t\t\t\treturn config\n\n\n\n\n\ndef __UpperCamelCase\t\t\t( _A ):\n\t\t\t\t\t\tif \"patch_embed.proj\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )\n\t\t\t\t\t\tif \"patch_embed.norm\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= name.replace('''patch_embed.norm''' , '''embeddings.norm''' )\n\t\t\t\t\t\tif \"layers\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= '''encoder.''' + name\n\t\t\t\t\t\tif \"encoder.layers\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= name.replace('''encoder.layers''' , '''encoder.stages''' )\n\t\t\t\t\t\tif \"downsample.proj\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= name.replace('''downsample.proj''' , '''downsample.projection''' )\n\t\t\t\t\t\tif \"blocks\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= name.replace('''blocks''' , '''layers''' )\n\t\t\t\t\t\tif \"modulation.f.weight\" in name or \"modulation.f.bias\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= name.replace('''modulation.f''' , '''modulation.projection_in''' )\n\t\t\t\t\t\tif \"modulation.h.weight\" in name or \"modulation.h.bias\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= name.replace('''modulation.h''' , '''modulation.projection_context''' )\n\t\t\t\t\t\tif \"modulation.proj.weight\" in name or \"modulation.proj.bias\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= name.replace('''modulation.proj''' , '''modulation.projection_out''' )\n\n\t\t\t\t\t\tif name == \"norm.weight\":\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= '''layernorm.weight'''\n\t\t\t\t\t\tif name == \"norm.bias\":\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= '''layernorm.bias'''\n\n\t\t\t\t\t\tif \"head\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= name.replace('''head''' , '''classifier''' )\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= '''focalnet.''' + name\n\n\t\t\t\t\t\treturn name\n\n\n\n\n\ndef __UpperCamelCase\t\t\t( _A , _A , _A=False ):\n\t\t\t\t\t\t# fmt: off\n\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= {\n\t\t\t\t\t\t '''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',\n\t\t\t\t\t\t '''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',\n\t\t\t\t\t\t '''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',\n\t\t\t\t\t\t '''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',\n\t\t\t\t\t\t '''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',\n\t\t\t\t\t\t '''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',\n\t\t\t\t\t\t '''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',\n\t\t\t\t\t\t '''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',\n\t\t\t\t\t\t '''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',\n\t\t\t\t\t\t '''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',\n\t\t\t\t\t\t}\n\t\t\t\t\t\t# fmt: on\n\n\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= model_name_to_url[model_name]\n\t\t\t\t\t\tprint('''Checkpoint URL: ''' , _A )\n\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )['''model''']\n\n\t\t\t\t\t\t# rename keys\n\t\t\t\t\t\tfor key in state_dict.copy().keys():\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= state_dict.pop(_A )\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= val\n\n\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= get_focalnet_config(_A )\n\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= FocalNetForImageClassification(_A )\n\t\t\t\t\t\tmodel.eval()\n\n\t\t\t\t\t\t# load state dict\n\t\t\t\t\t\tmodel.load_state_dict(_A )\n\n\t\t\t\t\t\t# verify conversion\n\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= '''http://images.cocodataset.org/val2017/000000039769.jpg'''\n\n\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= BitImageProcessor(\n\t\t\t\t\t\t do_resize=_A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_A , crop_size=224 , do_normalize=_A , image_mean=_A , image_std=_A , )\n\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= Image.open(requests.get(_A , stream=_A ).raw )\n\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= processor(images=_A , return_tensors='''pt''' )\n\n\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= transforms.Compose(\n\t\t\t\t\t\t [\n\t\t\t\t\t\t transforms.Resize(256 ),\n\t\t\t\t\t\t transforms.CenterCrop(224 ),\n\t\t\t\t\t\t transforms.ToTensor(),\n\t\t\t\t\t\t transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),\n\t\t\t\t\t\t ] )\n\n\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= image_transforms(_A ).unsqueeze(0 )\n\n\t\t\t\t\t\t# verify pixel_values\n\t\t\t\t\t\tassert torch.allclose(inputs.pixel_values , _A , atol=1E-4 )\n\n\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= model(**_A )\n\n\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= outputs.logits.argmax(-1 ).item()\n\t\t\t\t\t\tprint('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )\n\n\t\t\t\t\t\tprint('''First values of logits:''' , outputs.logits[0, :3] )\n\n\t\t\t\t\t\tif model_name == \"focalnet-tiny\":\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )\n\t\t\t\t\t\telif model_name == \"focalnet-tiny-lrf\":\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )\n\t\t\t\t\t\telif model_name == \"focalnet-small\":\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )\n\t\t\t\t\t\telif model_name == \"focalnet-small-lrf\":\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )\n\t\t\t\t\t\telif model_name == \"focalnet-base\":\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )\n\t\t\t\t\t\telif model_name == \"focalnet-base-lrf\":\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t= torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )\n\t\t\t\t\t\tassert torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 )\n\t\t\t\t\t\tprint('''Looks ok!''' )\n\n\t\t\t\t\t\tif pytorch_dump_folder_path is not None:\n\t\t\t\t\t\t\t\t\t\t\t\tprint(f\"Saving model and processor of {model_name} to {pytorch_dump_folder_path}\" )\n\t\t\t\t\t\t\t\t\t\t\t\tmodel.save_pretrained(_A )\n\t\t\t\t\t\t\t\t\t\t\t\tprocessor.save_pretrained(_A )\n\n\t\t\t\t\t\tif push_to_hub:\n\t\t\t\t\t\t\t\t\t\t\t\tprint(f\"Pushing model and processor of {model_name} to the hub...\" )\n\t\t\t\t\t\t\t\t\t\t\t\tmodel.push_to_hub(f\"{model_name}\" )\n\t\t\t\t\t\t\t\t\t\t\t\tprocessor.push_to_hub(f\"{model_name}\" )\n\n\nif __name__ == \"__main__\":\n\t\t\t\t\t\t_A =\t\t\targparse.ArgumentParser()\n\t\t\t\t\t\t# Required parameters\n\t\t\t\t\t\tparser.add_argument(\n\t\t\t\t\t\t '''--model_name''',\n\t\t\t\t\t\t default='''focalnet-tiny''',\n\t\t\t\t\t\t type=str,\n\t\t\t\t\t\t help='''Name of the FocalNet model you\\'d like to convert.''',\n\t\t\t\t\t\t)\n\t\t\t\t\t\tparser.add_argument(\n\t\t\t\t\t\t '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''\n\t\t\t\t\t\t)\n\t\t\t\t\t\tparser.add_argument(\n\t\t\t\t\t\t '''--push_to_hub''',\n\t\t\t\t\t\t action='''store_true''',\n\t\t\t\t\t\t help='''Whether to push the model and processor to the hub.''',\n\t\t\t\t\t\t)\n\n\t\t\t\t\t\t_A =\t\t\tparser.parse_args()\n\t\t\t\t\t\tconvert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":278,"string":"278"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":825,"cells":{"code":{"kind":"string","value":"\rimport importlib\rimport inspect\rimport os\rimport re\r\r\r# All paths are set with the intent you should run this script from the root of the repo with the command\r# python utils/check_config_docstrings.py\r__UpperCAmelCase \t\t= \"src/transformers\"\r\r\r# This is to make sure the transformers module imported is the one in the repo.\r__UpperCAmelCase \t\t= importlib.util.spec_from_file_location(\r \"transformers\",\r os.path.join(PATH_TO_TRANSFORMERS, \"__init__.py\"),\r submodule_search_locations=[PATH_TO_TRANSFORMERS],\r)\r__UpperCAmelCase \t\t= spec.loader.load_module()\r\r__UpperCAmelCase \t\t= transformers.models.auto.configuration_auto.CONFIG_MAPPING\r\r# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.\r# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`\r__UpperCAmelCase \t\t= re.compile(\"\\[(.+?)\\]\\((https://huggingface\\.co/.+?)\\)\")\r\r\r__UpperCAmelCase \t\t= {\r \"CLIPConfigMixin\",\r \"DecisionTransformerConfigMixin\",\r \"EncoderDecoderConfigMixin\",\r \"RagConfigMixin\",\r \"SpeechEncoderDecoderConfigMixin\",\r \"VisionEncoderDecoderConfigMixin\",\r \"VisionTextDualEncoderConfigMixin\",\r}\r\r\rdef \t\t\t\tA__\t\t\t\t( ):\r SCREAMING_SNAKE_CASE_\t\t\t\t\t= []\r\r for config_class in list(CONFIG_MAPPING.values()\t\t):\r SCREAMING_SNAKE_CASE_\t\t\t\t\t= False\r\r # source code of `config_class`\r SCREAMING_SNAKE_CASE_\t\t\t\t\t= inspect.getsource(__lowerCamelCase\t\t)\r SCREAMING_SNAKE_CASE_\t\t\t\t\t= _re_checkpoint.findall(__lowerCamelCase\t\t)\r\r for checkpoint in checkpoints:\r # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.\r # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`\r SCREAMING_SNAKE_CASE_\t\t\t\t\t,\t\t\t\tSCREAMING_SNAKE_CASE_\t\t\t\t\t= checkpoint\r\r # verify the checkpoint name corresponds to the checkpoint link\r SCREAMING_SNAKE_CASE_\t\t\t\t\t= F'''https://huggingface.co/{ckpt_name}'''\r if ckpt_link == ckpt_link_from_name:\r SCREAMING_SNAKE_CASE_\t\t\t\t\t= True\r break\r\r SCREAMING_SNAKE_CASE_\t\t\t\t\t= config_class.__name__\r if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:\r configs_without_checkpoint.append(__lowerCamelCase\t\t)\r\r if len(__lowerCamelCase\t\t) > 0:\r SCREAMING_SNAKE_CASE_\t\t\t\t\t= '''\\n'''.join(sorted(__lowerCamelCase\t\t)\t\t)\r raise ValueError(F'''The following configurations don\\'t contain any valid checkpoint:\\n{message}'''\t\t)\r\r\rif __name__ == \"__main__\":\r check_config_docstrings_have_checkpoints()\r\r\r"},"code_codestyle":{"kind":"number","value":257,"string":"257"},"style_context":{"kind":"string","value":"\rfrom __future__ import annotations\r\r__UpperCAmelCase \t\t= 10\r\r\rdef \t\t\t\tA__\t\t\t\t( __lowerCamelCase\t\t):\r SCREAMING_SNAKE_CASE_\t\t\t\t\t= 1\r SCREAMING_SNAKE_CASE_\t\t\t\t\t= max(__lowerCamelCase\t\t)\r while placement <= max_digit:\r # declare and initialize empty buckets\r SCREAMING_SNAKE_CASE_\t\t\t\t\t= [[] for _ in range(__lowerCamelCase\t\t)]\r # split list_of_ints between the buckets\r for i in list_of_ints:\r SCREAMING_SNAKE_CASE_\t\t\t\t\t= int((i / placement) % RADIX\t\t)\r buckets[tmp].append(__lowerCamelCase\t\t)\r # put each buckets' contents into list_of_ints\r SCREAMING_SNAKE_CASE_\t\t\t\t\t= 0\r for b in range(__lowerCamelCase\t\t):\r for i in buckets[b]:\r SCREAMING_SNAKE_CASE_\t\t\t\t\t= i\r a += 1\r # move to next\r placement *= RADIX\r return list_of_ints\r\r\rif __name__ == \"__main__\":\r import doctest\r\r doctest.testmod()\r\r\r"},"style_context_codestyle":{"kind":"number","value":257,"string":"257"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":826,"cells":{"code":{"kind":"string","value":"\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\rfrom __future__ import annotations\r\rfrom typing import Any\rdef __lowerCAmelCase\t\t\t\t(_UpperCamelCase\t\t):\r\tcreate_state_space_tree(_UpperCamelCase , [] , 0\t\t)\rdef __lowerCAmelCase\t\t\t\t(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase\t\t):\r\r\tif index == len(_UpperCamelCase\t\t):\r\t\tprint(_UpperCamelCase\t\t)\r\t\treturn\r\r\tcreate_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1\t\t)\r\tcurrent_subsequence.append(sequence[index]\t\t)\r\tcreate_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1\t\t)\r\tcurrent_subsequence.pop()\r\r\rif __name__ == \"__main__\":\r\t\t\t\t\t\t\tlowerCamelCase__ \t\t\t\t\t=\t\t[3, 1, 2, 4]\r\t\t\t\t\t\t\tgenerate_all_subsequences(seq)\r\r\t\t\t\t\t\t\tseq.clear()\r\t\t\t\t\t\t\tseq.extend([\"\"\"A\"\"\", \"\"\"B\"\"\", \"\"\"C\"\"\"])\r\t\t\t\t\t\t\tgenerate_all_subsequences(seq)"},"code_codestyle":{"kind":"number","value":86,"string":"86"},"style_context":{"kind":"string","value":"\r\n\r\n\r\nfrom transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef SCREAMING_SNAKE_CASE_\t\t\t\t\t\t(\t\t\t)\t->\t\tAny:\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n a_\t\t\t\t\t:\t\tOptional[Any]\t\t\t\t\t\t\t= HfArgumentParser(__A\t\t\t\t)\r\n a_\t\t\t\t\t:\t\tOptional[int]\t\t\t\t\t\t\t= parser.parse_args_into_dataclasses()[0]\r\n a_\t\t\t\t\t:\t\tList[Any]\t\t\t\t\t\t\t= TensorFlowBenchmark(args=__A\t\t\t\t)\r\n try:\r\n a_\t\t\t\t\t:\t\tList[str]\t\t\t\t\t\t\t= parser.parse_args_into_dataclasses()[0]\r\n except ValueError as e:\r\n a_\t\t\t\t\t:\t\tDict\t\t\t\t\t\t\t= 'Arg --no_{0} is no longer used, please use --no-{0} instead.'\r\n a_\t\t\t\t\t:\t\tDict\t\t\t\t\t\t\t= ' '.join(str(__A\t\t\t\t).split(' '\t\t\t\t)[:-1]\t\t\t\t)\r\n a_\t\t\t\t\t:\t\tint\t\t\t\t\t\t\t= ''\r\n a_\t\t\t\t\t:\t\tint\t\t\t\t\t\t\t= eval(str(__A\t\t\t\t).split(' '\t\t\t\t)[-1]\t\t\t\t)\r\n a_\t\t\t\t\t:\t\tAny\t\t\t\t\t\t\t= []\r\n for arg in depreciated_args:\r\n # arg[2:] removes '--'\r\n if arg[2:] in TensorFlowBenchmark.deprecated_args:\r\n # arg[5:] removes '--no_'\r\n full_error_msg += arg_error_msg.format(arg[5:]\t\t\t\t)\r\n else:\r\n wrong_args.append(__A\t\t\t\t)\r\n if len(__A\t\t\t\t) > 0:\r\n a_\t\t\t\t\t:\t\tstr\t\t\t\t\t\t\t= full_error_msg + begin_error_msg + str(__A\t\t\t\t)\r\n raise ValueError(__A\t\t\t\t)\r\n benchmark.run()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":32,"string":"32"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":827,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\nimport gc\r\nimport random\r\nimport unittest\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom PIL import Image\r\nfrom transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer\r\n\r\nfrom diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel\r\nfrom diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device\r\nfrom diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu\r\n\r\n\r\nenable_full_determinism()\r\n\r\nclass \t\tSCREAMING_SNAKE_CASE__ (\tunittest.TestCase ):\r\n\r\n\r\n def a\t\t\t(self\t:\t\t\t\tAny\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n super().tearDown()\r\n gc.collect()\r\n torch.cuda.empty_cache()\r\n\r\n\r\n @property\r\n def a\t\t\t(self\t:\t\t\t\tint\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= 1\r\n __snake_case\t\t\t\t\t\t\t\t= 3\r\n __snake_case\t\t\t\t\t\t\t\t= (32, 32)\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0\t\t\t\t)\t\t\t\t).to(__lowercase\t\t\t\t)\r\n return image\r\n\r\n\r\n @property\r\n def a\t\t\t(self\t:\t\t\t\tList[str]\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n torch.manual_seed(0\t\t\t\t)\r\n __snake_case\t\t\t\t\t\t\t\t= UNetaDConditionModel(\r\n block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__lowercase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )\r\n return model\r\n\r\n\r\n @property\r\n def a\t\t\t(self\t:\t\t\t\tList[str]\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n torch.manual_seed(0\t\t\t\t)\r\n __snake_case\t\t\t\t\t\t\t\t= AutoencoderKL(\r\n block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )\r\n return model\r\n\r\n\r\n @property\r\n def a\t\t\t(self\t:\t\t\t\tList[str]\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n torch.manual_seed(0\t\t\t\t)\r\n __snake_case\t\t\t\t\t\t\t\t= CLIPTextConfig(\r\n bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )\r\n return CLIPTextModel(__lowercase\t\t\t\t)\r\n\r\n\r\n def a\t\t\t(self\t:\t\t\t\tList[Any]\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= '''cpu''' # ensure determinism for the device-dependent torch.Generator\r\n __snake_case\t\t\t\t\t\t\t\t= self.dummy_cond_unet_upscale\r\n __snake_case\t\t\t\t\t\t\t\t= DDPMScheduler()\r\n __snake_case\t\t\t\t\t\t\t\t= DDIMScheduler(prediction_type='''v_prediction'''\t\t\t\t)\r\n __snake_case\t\t\t\t\t\t\t\t= self.dummy_vae\r\n __snake_case\t\t\t\t\t\t\t\t= self.dummy_text_encoder\r\n __snake_case\t\t\t\t\t\t\t\t= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip'''\t\t\t\t)\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= self.dummy_image.cpu().permute(0 , 2 , 3 , 1\t\t\t\t)[0]\r\n __snake_case\t\t\t\t\t\t\t\t= Image.fromarray(np.uinta(__lowercase\t\t\t\t)\t\t\t\t).convert('''RGB'''\t\t\t\t).resize((64, 64)\t\t\t\t)\r\n\r\n # make sure here that pndm scheduler skips prk\r\n __snake_case\t\t\t\t\t\t\t\t= StableDiffusionUpscalePipeline(\r\n unet=__lowercase , low_res_scheduler=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , max_noise_level=350 , )\r\n __snake_case\t\t\t\t\t\t\t\t= sd_pipe.to(__lowercase\t\t\t\t)\r\n sd_pipe.set_progress_bar_config(disable=__lowercase\t\t\t\t)\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= '''A painting of a squirrel eating a burger'''\r\n __snake_case\t\t\t\t\t\t\t\t= torch.Generator(device=__lowercase\t\t\t\t).manual_seed(0\t\t\t\t)\r\n __snake_case\t\t\t\t\t\t\t\t= sd_pipe(\r\n [prompt] , image=__lowercase , generator=__lowercase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= output.images\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= torch.Generator(device=__lowercase\t\t\t\t).manual_seed(0\t\t\t\t)\r\n __snake_case\t\t\t\t\t\t\t\t= sd_pipe(\r\n [prompt] , image=__lowercase , generator=__lowercase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=__lowercase , )[0]\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= image[0, -3:, -3:, -1]\r\n __snake_case\t\t\t\t\t\t\t\t= image_from_tuple[0, -3:, -3:, -1]\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= low_res_image.size[0] * 4\r\n assert image.shape == (1, expected_height_width, expected_height_width, 3)\r\n __snake_case\t\t\t\t\t\t\t\t= np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1]\t\t\t\t)\r\n\r\n assert np.abs(image_slice.flatten() - expected_slice\t\t\t\t).max() < 1E-2\r\n assert np.abs(image_from_tuple_slice.flatten() - expected_slice\t\t\t\t).max() < 1E-2\r\n\r\n\r\n def a\t\t\t(self\t:\t\t\t\tOptional[int]\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= '''cpu''' # ensure determinism for the device-dependent torch.Generator\r\n __snake_case\t\t\t\t\t\t\t\t= self.dummy_cond_unet_upscale\r\n __snake_case\t\t\t\t\t\t\t\t= DDPMScheduler()\r\n __snake_case\t\t\t\t\t\t\t\t= DDIMScheduler(prediction_type='''v_prediction'''\t\t\t\t)\r\n __snake_case\t\t\t\t\t\t\t\t= self.dummy_vae\r\n __snake_case\t\t\t\t\t\t\t\t= self.dummy_text_encoder\r\n __snake_case\t\t\t\t\t\t\t\t= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip'''\t\t\t\t)\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= self.dummy_image.cpu().permute(0 , 2 , 3 , 1\t\t\t\t)[0]\r\n __snake_case\t\t\t\t\t\t\t\t= Image.fromarray(np.uinta(__lowercase\t\t\t\t)\t\t\t\t).convert('''RGB'''\t\t\t\t).resize((64, 64)\t\t\t\t)\r\n\r\n # make sure here that pndm scheduler skips prk\r\n __snake_case\t\t\t\t\t\t\t\t= StableDiffusionUpscalePipeline(\r\n unet=__lowercase , low_res_scheduler=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , max_noise_level=350 , )\r\n __snake_case\t\t\t\t\t\t\t\t= sd_pipe.to(__lowercase\t\t\t\t)\r\n sd_pipe.set_progress_bar_config(disable=__lowercase\t\t\t\t)\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= '''A painting of a squirrel eating a burger'''\r\n __snake_case\t\t\t\t\t\t\t\t= sd_pipe(\r\n 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )\r\n __snake_case\t\t\t\t\t\t\t\t= output.images\r\n assert image.shape[0] == 2\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= torch.Generator(device=__lowercase\t\t\t\t).manual_seed(0\t\t\t\t)\r\n __snake_case\t\t\t\t\t\t\t\t= sd_pipe(\r\n [prompt] , image=__lowercase , generator=__lowercase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )\r\n __snake_case\t\t\t\t\t\t\t\t= output.images\r\n assert image.shape[0] == 2\r\n\r\n\r\n @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU'''\t\t\t\t)\r\n def a\t\t\t(self\t:\t\t\t\tUnion[str, Any]\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= self.dummy_cond_unet_upscale\r\n __snake_case\t\t\t\t\t\t\t\t= DDPMScheduler()\r\n __snake_case\t\t\t\t\t\t\t\t= DDIMScheduler(prediction_type='''v_prediction'''\t\t\t\t)\r\n __snake_case\t\t\t\t\t\t\t\t= self.dummy_vae\r\n __snake_case\t\t\t\t\t\t\t\t= self.dummy_text_encoder\r\n __snake_case\t\t\t\t\t\t\t\t= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip'''\t\t\t\t)\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= self.dummy_image.cpu().permute(0 , 2 , 3 , 1\t\t\t\t)[0]\r\n __snake_case\t\t\t\t\t\t\t\t= Image.fromarray(np.uinta(__lowercase\t\t\t\t)\t\t\t\t).convert('''RGB'''\t\t\t\t).resize((64, 64)\t\t\t\t)\r\n\r\n # put models in fp16, except vae as it overflows in fp16\r\n __snake_case\t\t\t\t\t\t\t\t= unet.half()\r\n __snake_case\t\t\t\t\t\t\t\t= text_encoder.half()\r\n\r\n # make sure here that pndm scheduler skips prk\r\n __snake_case\t\t\t\t\t\t\t\t= StableDiffusionUpscalePipeline(\r\n unet=__lowercase , low_res_scheduler=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , max_noise_level=350 , )\r\n __snake_case\t\t\t\t\t\t\t\t= sd_pipe.to(__lowercase\t\t\t\t)\r\n sd_pipe.set_progress_bar_config(disable=__lowercase\t\t\t\t)\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= '''A painting of a squirrel eating a burger'''\r\n __snake_case\t\t\t\t\t\t\t\t= torch.manual_seed(0\t\t\t\t)\r\n __snake_case\t\t\t\t\t\t\t\t= sd_pipe(\r\n [prompt] , image=__lowercase , generator=__lowercase , num_inference_steps=2 , output_type='''np''' , ).images\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= low_res_image.size[0] * 4\r\n assert image.shape == (1, expected_height_width, expected_height_width, 3)\r\n\r\n\r\n\r\n\r\n\r\n\r\n@slow\r\n@require_torch_gpu\r\nclass \t\tSCREAMING_SNAKE_CASE__ (\tunittest.TestCase ):\r\n\r\n\r\n def a\t\t\t(self\t:\t\t\t\tUnion[str, Any]\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n super().tearDown()\r\n gc.collect()\r\n torch.cuda.empty_cache()\r\n\r\n\r\n def a\t\t\t(self\t:\t\t\t\tUnion[str, Any]\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= load_image(\r\n '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''\r\n '''/sd2-upscale/low_res_cat.png'''\t\t\t\t)\r\n __snake_case\t\t\t\t\t\t\t\t= load_numpy(\r\n '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''\r\n '''/upsampled_cat.npy'''\t\t\t\t)\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= '''stabilityai/stable-diffusion-x4-upscaler'''\r\n __snake_case\t\t\t\t\t\t\t\t= StableDiffusionUpscalePipeline.from_pretrained(__lowercase\t\t\t\t)\r\n pipe.to(__lowercase\t\t\t\t)\r\n pipe.set_progress_bar_config(disable=__lowercase\t\t\t\t)\r\n pipe.enable_attention_slicing()\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= '''a cat sitting on a park bench'''\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= torch.manual_seed(0\t\t\t\t)\r\n __snake_case\t\t\t\t\t\t\t\t= pipe(\r\n prompt=__lowercase , image=__lowercase , generator=__lowercase , output_type='''np''' , )\r\n __snake_case\t\t\t\t\t\t\t\t= output.images[0]\r\n\r\n assert image.shape == (512, 512, 3)\r\n assert np.abs(expected_image - image\t\t\t\t).max() < 1E-3\r\n\r\n\r\n def a\t\t\t(self\t:\t\t\t\tAny\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= load_image(\r\n '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''\r\n '''/sd2-upscale/low_res_cat.png'''\t\t\t\t)\r\n __snake_case\t\t\t\t\t\t\t\t= load_numpy(\r\n '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''\r\n '''/upsampled_cat_fp16.npy'''\t\t\t\t)\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= '''stabilityai/stable-diffusion-x4-upscaler'''\r\n __snake_case\t\t\t\t\t\t\t\t= StableDiffusionUpscalePipeline.from_pretrained(\r\n __lowercase , torch_dtype=torch.floataa , )\r\n pipe.to(__lowercase\t\t\t\t)\r\n pipe.set_progress_bar_config(disable=__lowercase\t\t\t\t)\r\n pipe.enable_attention_slicing()\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= '''a cat sitting on a park bench'''\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= torch.manual_seed(0\t\t\t\t)\r\n __snake_case\t\t\t\t\t\t\t\t= pipe(\r\n prompt=__lowercase , image=__lowercase , generator=__lowercase , output_type='''np''' , )\r\n __snake_case\t\t\t\t\t\t\t\t= output.images[0]\r\n\r\n assert image.shape == (512, 512, 3)\r\n assert np.abs(expected_image - image\t\t\t\t).max() < 5E-1\r\n\r\n\r\n def a\t\t\t(self\t:\t\t\t\tList[Any]\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n torch.cuda.empty_cache()\r\n torch.cuda.reset_max_memory_allocated()\r\n torch.cuda.reset_peak_memory_stats()\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= load_image(\r\n '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''\r\n '''/sd2-upscale/low_res_cat.png'''\t\t\t\t)\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= '''stabilityai/stable-diffusion-x4-upscaler'''\r\n __snake_case\t\t\t\t\t\t\t\t= StableDiffusionUpscalePipeline.from_pretrained(\r\n __lowercase , torch_dtype=torch.floataa , )\r\n pipe.to(__lowercase\t\t\t\t)\r\n pipe.set_progress_bar_config(disable=__lowercase\t\t\t\t)\r\n pipe.enable_attention_slicing(1\t\t\t\t)\r\n pipe.enable_sequential_cpu_offload()\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= '''a cat sitting on a park bench'''\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= torch.manual_seed(0\t\t\t\t)\r\n __snake_case\t\t\t\t\t\t\t\t= pipe(\r\n prompt=__lowercase , image=__lowercase , generator=__lowercase , num_inference_steps=5 , output_type='''np''' , )\r\n\r\n __snake_case\t\t\t\t\t\t\t\t= torch.cuda.max_memory_allocated()\r\n # make sure that less than 2.9 GB is allocated\r\n assert mem_bytes < 2.9 * 10**9\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":367,"string":"367"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\ndef lowerCamelCase__ (\t\t\t\t\t\t\tsnake_case_ : int = 1000\t)\t\t\t\t-> int:\r\n __snake_case\t\t\t\t\t\t\t\t= 2**power\r\n __snake_case\t\t\t\t\t\t\t\t= str(snake_case_\t)\r\n __snake_case\t\t\t\t\t\t\t\t= list(snake_case_\t)\r\n __snake_case\t\t\t\t\t\t\t\t= 0\r\n\r\n for i in list_num:\r\n sum_of_num += int(snake_case_\t)\r\n\r\n return sum_of_num\r\n\r\n\r\nif __name__ == \"__main__\":\r\n snake_case_ = int(input('Enter the power of 2: ').strip())\r\n print('2 ^ ', power, ' = ', 2**power)\r\n snake_case_ = solution(power)\r\n print('Sum of the digits is: ', result)\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":238,"string":"238"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":828,"cells":{"code":{"kind":"string","value":"\r\r\r\rdef \t\t\tUpperCamelCase( __UpperCamelCase\t\t\t\t\t\t\t:\t\tint ,__UpperCamelCase\t\t\t\t\t\t\t:\t\tlist ):\r\t\t_enforce_args(__UpperCamelCase ,__UpperCamelCase )\r\t\tif n == 0:\r\t\t\t\treturn 0\r\t\tlowerCAmelCase_\t\t\t: Any\t\t\t\t\t\t\t\t\t= float('''-inf''' )\r\t\tfor i in range(1 ,n + 1 ):\r\t\t\t\tlowerCAmelCase_\t\t\t: int\t\t\t\t\t\t\t\t\t= max(\r\t\t\t\t __UpperCamelCase ,prices[i - 1] + naive_cut_rod_recursive(n - i ,__UpperCamelCase ) )\r\r\t\treturn max_revue\r\r\rdef \t\t\tUpperCamelCase( __UpperCamelCase\t\t\t\t\t\t\t:\t\tint ,__UpperCamelCase\t\t\t\t\t\t\t:\t\tlist ):\r\t\t_enforce_args(__UpperCamelCase ,__UpperCamelCase )\r\t\tlowerCAmelCase_\t\t\t: Optional[Any]\t\t\t\t\t\t\t\t\t= [float('''-inf''' ) for _ in range(n + 1 )]\r\t\treturn _top_down_cut_rod_recursive(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )\r\r\rdef \t\t\tUpperCamelCase( __UpperCamelCase\t\t\t\t\t\t\t:\t\tint ,__UpperCamelCase\t\t\t\t\t\t\t:\t\tlist ,__UpperCamelCase\t\t\t\t\t\t\t:\t\tlist ):\r\t\tif max_rev[n] >= 0:\r\t\t\t\treturn max_rev[n]\r\t\telif n == 0:\r\t\t\t\treturn 0\r\t\telse:\r\t\t\t\tlowerCAmelCase_\t\t\t: Union[str, Any]\t\t\t\t\t\t\t\t\t= float('''-inf''' )\r\t\t\t\tfor i in range(1 ,n + 1 ):\r\t\t\t\t\t\tlowerCAmelCase_\t\t\t: List[Any]\t\t\t\t\t\t\t\t\t= max(\r\t\t\t\t\t\t __UpperCamelCase ,prices[i - 1] + _top_down_cut_rod_recursive(n - i ,__UpperCamelCase ,__UpperCamelCase ) ,)\r\r\t\t\t\tlowerCAmelCase_\t\t\t: Tuple\t\t\t\t\t\t\t\t\t= max_revenue\r\r\t\treturn max_rev[n]\r\r\rdef \t\t\tUpperCamelCase( __UpperCamelCase\t\t\t\t\t\t\t:\t\tint ,__UpperCamelCase\t\t\t\t\t\t\t:\t\tlist ):\r\t\t_enforce_args(__UpperCamelCase ,__UpperCamelCase )\r\r\t\t# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of\r\t\t# length 0.\r\t\tlowerCAmelCase_\t\t\t: str\t\t\t\t\t\t\t\t\t= [float('''-inf''' ) for _ in range(n + 1 )]\r\t\tlowerCAmelCase_\t\t\t: Dict\t\t\t\t\t\t\t\t\t= 0\r\r\t\tfor i in range(1 ,n + 1 ):\r\t\t\t\tlowerCAmelCase_\t\t\t: List[Any]\t\t\t\t\t\t\t\t\t= max_rev[i]\r\t\t\t\tfor j in range(1 ,i + 1 ):\r\t\t\t\t\t\tlowerCAmelCase_\t\t\t: Optional[int]\t\t\t\t\t\t\t\t\t= max(__UpperCamelCase ,prices[j - 1] + max_rev[i - j] )\r\r\t\t\t\tlowerCAmelCase_\t\t\t: Optional[int]\t\t\t\t\t\t\t\t\t= max_revenue_i\r\r\t\treturn max_rev[n]\r\r\rdef \t\t\tUpperCamelCase( __UpperCamelCase\t\t\t\t\t\t\t:\t\tint ,__UpperCamelCase\t\t\t\t\t\t\t:\t\tlist ):\r\t\tif n < 0:\r\t\t\t\tlowerCAmelCase_\t\t\t: int\t\t\t\t\t\t\t\t\t= f\"\"\"n must be greater than or equal to 0. Got n = {n}\"\"\"\r\t\t\t\traise ValueError(__UpperCamelCase )\r\r\t\tif n > len(__UpperCamelCase ):\r\t\t\t\tlowerCAmelCase_\t\t\t: Union[str, Any]\t\t\t\t\t\t\t\t\t= (\r\t\t\t\t '''Each integral piece of rod must have a corresponding price. '''\r\t\t\t\t f\"\"\"Got n = {n} but length of prices = {len(__UpperCamelCase )}\"\"\"\r\t\t\t\t)\r\t\t\t\traise ValueError(__UpperCamelCase )\r\r\rdef \t\t\tUpperCamelCase( ):\r\t\tlowerCAmelCase_\t\t\t: Union[str, Any]\t\t\t\t\t\t\t\t\t= [6, 10, 12, 15, 20, 23]\r\t\tlowerCAmelCase_\t\t\t: int\t\t\t\t\t\t\t\t\t= len(__UpperCamelCase )\r\r\t\t# the best revenue comes from cutting the rod into 6 pieces, each\r\t\t# of length 1 resulting in a revenue of 6 * 6 = 36.\r\t\tlowerCAmelCase_\t\t\t: List[str]\t\t\t\t\t\t\t\t\t= 36\r\r\t\tlowerCAmelCase_\t\t\t: Optional[int]\t\t\t\t\t\t\t\t\t= top_down_cut_rod(__UpperCamelCase ,__UpperCamelCase )\r\t\tlowerCAmelCase_\t\t\t: Optional[int]\t\t\t\t\t\t\t\t\t= bottom_up_cut_rod(__UpperCamelCase ,__UpperCamelCase )\r\t\tlowerCAmelCase_\t\t\t: Optional[Any]\t\t\t\t\t\t\t\t\t= naive_cut_rod_recursive(__UpperCamelCase ,__UpperCamelCase )\r\r\t\tassert expected_max_revenue == max_rev_top_down\r\t\tassert max_rev_top_down == max_rev_bottom_up\r\t\tassert max_rev_bottom_up == max_rev_naive\r\r\rif __name__ == \"__main__\":\r\t\t\t\t\t\tmain()\r\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":103,"string":"103"},"style_context":{"kind":"string","value":"\r\r\r\rfrom pathlib import Path\r\rimport fire\r\r\rdef \t\t\tUpperCamelCase( __UpperCamelCase\t\t\t\t\t\t\t:\t\tstr ,__UpperCamelCase\t\t\t\t\t\t\t:\t\tstr ,__UpperCamelCase\t\t\t\t\t\t\t:\t\tint ):\r\t\tlowerCAmelCase_\t\t\t: List[str]\t\t\t\t\t\t\t\t\t= Path(__UpperCamelCase )\r\t\tlowerCAmelCase_\t\t\t: Union[str, Any]\t\t\t\t\t\t\t\t\t= Path(__UpperCamelCase )\r\t\tdest_dir.mkdir(exist_ok=__UpperCamelCase )\r\t\tfor path in src_dir.iterdir():\r\t\t\t\tlowerCAmelCase_\t\t\t: Optional[Any]\t\t\t\t\t\t\t\t\t= [x.rstrip() for x in list(path.open().readlines() )][:n]\r\t\t\t\tlowerCAmelCase_\t\t\t: List[str]\t\t\t\t\t\t\t\t\t= dest_dir.joinpath(path.name )\r\t\t\t\tprint(__UpperCamelCase )\r\t\t\t\tdest_path.open('''w''' ).write('''\\n'''.join(__UpperCamelCase ) )\r\r\rif __name__ == \"__main__\":\r\t\t\t\t\t\tfire.Fire(minify)\r\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":103,"string":"103"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":829,"cells":{"code":{"kind":"string","value":"\rimport argparse\rimport json\rimport os\r\rimport fairseq\rimport torch\rfrom fairseq.data import Dictionary\r\rfrom transformers import (\r HubertConfig,\r HubertForCTC,\r HubertModel,\r WavaVecaCTCTokenizer,\r WavaVecaFeatureExtractor,\r WavaVecaProcessor,\r logging,\r)\r\r\rlogging.set_verbosity_info()\rSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:str\t\t\t\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\rSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:List[Any]\t\t\t\t\t\t\t\t\t\t= {\r 'post_extract_proj': 'feature_projection.projection',\r 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',\r 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',\r 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',\r 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',\r 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',\r 'self_attn_layer_norm': 'encoder.layers.*.layer_norm',\r 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',\r 'fc2': 'encoder.layers.*.feed_forward.output_dense',\r 'final_layer_norm': 'encoder.layers.*.final_layer_norm',\r 'encoder.layer_norm': 'encoder.layer_norm',\r 'w2v_model.layer_norm': 'feature_projection.layer_norm',\r 'w2v_encoder.proj': 'lm_head',\r 'mask_emb': 'masked_spec_embed',\r}\r\r\rdef \t\t\t\t\t\tUpperCAmelCase ( a_\t\t, a_\t\t, a_\t\t, a_\t\t, a_\t\t\t\t\t\t)\t\t-> Optional[Any]:\r\r\r\r\r\r\r\r\t\"\"\"simple docstring\"\"\"\r\r\r\r\tfor attribute in key.split(\".\"\t\t\t\t\t\t):\r\t\t__A =\t\t\tgetattr(a_\t\t, a_\t\t\t\t\t\t)\r\r\tif weight_type is not None:\r\t\t__A =\t\t\tgetattr(a_\t\t, a_\t\t\t\t\t\t).shape\r\telse:\r\t\t__A =\t\t\thf_pointer.shape\r\r\tassert hf_shape == value.shape, (\r\t F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''\r\t F''' {value.shape} for {full_name}'''\r\t)\r\r\tif weight_type == \"weight\":\r\t\t__A =\t\t\tvalue\r\telif weight_type == \"weight_g\":\r\t\t__A =\t\t\tvalue\r\telif weight_type == \"weight_v\":\r\t\t__A =\t\t\tvalue\r\telif weight_type == \"bias\":\r\t\t__A =\t\t\tvalue\r\telse:\r\t\t__A =\t\t\tvalue\r\r\tlogger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.'''\t\t\t\t\t\t)\r\r\rdef \t\t\t\t\t\tUpperCAmelCase ( a_\t\t, a_\t\t, a_\t\t\t\t\t\t)\t\t-> List[Any]:\r\r\r\r\r\r\r\r\t\"\"\"simple docstring\"\"\"\r\r\r\r\t__A =\t\t\t[]\r\t__A =\t\t\tfairseq_model.state_dict()\r\r\t__A =\t\t\thf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor\r\r\tfor name, value in fairseq_dict.items():\r\t\t__A =\t\t\tFalse\r\t\tif \"conv_layers\" in name:\r\t\t\tload_conv_layer(\r\t\t\t a_\t\t, a_\t\t, a_\t\t, a_\t\t, hf_model.config.feat_extract_norm == \"group\"\t\t, )\r\t\t\t__A =\t\t\tTrue\r\t\telse:\r\t\t\tfor key, mapped_key in MAPPING.items():\r\t\t\t\t__A =\t\t\t\"hubert.\" + mapped_key if (is_finetuned and mapped_key != \"lm_head\") else mapped_key\r\r\t\t\t\tif key in name or (key.split(\"w2v_model.\"\t\t\t\t\t\t)[-1] == name.split(\".\"\t\t\t\t\t\t)[0] and not is_finetuned):\r\t\t\t\t\t__A =\t\t\tTrue\r\t\t\t\t\tif \"*\" in mapped_key:\r\t\t\t\t\t\t__A =\t\t\tname.split(a_\t\t\t\t\t\t)[0].split(\".\"\t\t\t\t\t\t)[-2]\r\t\t\t\t\t\t__A =\t\t\tmapped_key.replace(\"*\"\t\t, a_\t\t\t\t\t\t)\r\t\t\t\t\tif \"weight_g\" in name:\r\t\t\t\t\t\t__A =\t\t\t\"weight_g\"\r\t\t\t\t\telif \"weight_v\" in name:\r\t\t\t\t\t\t__A =\t\t\t\"weight_v\"\r\t\t\t\t\telif \"weight\" in name:\r\t\t\t\t\t\t__A =\t\t\t\"weight\"\r\t\t\t\t\telif \"bias\" in name:\r\t\t\t\t\t\t__A =\t\t\t\"bias\"\r\t\t\t\t\telse:\r\t\t\t\t\t\t__A =\t\t\tNone\r\t\t\t\t\tset_recursively(a_\t\t, a_\t\t, a_\t\t, a_\t\t, a_\t\t\t\t\t\t)\r\t\t\t\tcontinue\r\t\tif not is_used:\r\t\t\tunused_weights.append(a_\t\t\t\t\t\t)\r\r\tlogger.warning(F'''Unused weights: {unused_weights}'''\t\t\t\t\t\t)\r\r\rdef \t\t\t\t\t\tUpperCAmelCase ( a_\t\t, a_\t\t, a_\t\t, a_\t\t, a_\t\t\t\t\t\t)\t\t-> int:\r\r\r\r\r\r\r\r\t\"\"\"simple docstring\"\"\"\r\r\r\r\t__A =\t\t\tfull_name.split(\"conv_layers.\"\t\t\t\t\t\t)[-1]\r\t__A =\t\t\tname.split(\".\"\t\t\t\t\t\t)\r\t__A =\t\t\tint(items[0]\t\t\t\t\t\t)\r\t__A =\t\t\tint(items[1]\t\t\t\t\t\t)\r\r\tif type_id == 0:\r\t\tif \"bias\" in name:\r\t\t\tassert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (\r\t\t\t F'''{full_name} has size {value.shape}, but'''\r\t\t\t F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''\r\t\t\t)\r\t\t\t__A =\t\t\tvalue\r\t\t\tlogger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.'''\t\t\t\t\t\t)\r\t\telif \"weight\" in name:\r\t\t\tassert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (\r\t\t\t F'''{full_name} has size {value.shape}, but'''\r\t\t\t F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''\r\t\t\t)\r\t\t\t__A =\t\t\tvalue\r\t\t\tlogger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.'''\t\t\t\t\t\t)\r\telif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):\r\t\tif \"bias\" in name:\r\t\t\tassert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (\r\t\t\t F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''\r\t\t\t \" found.\"\r\t\t\t)\r\t\t\t__A =\t\t\tvalue\r\t\t\tlogger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.'''\t\t\t\t\t\t)\r\t\telif \"weight\" in name:\r\t\t\tassert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (\r\t\t\t F'''{full_name} has size {value.shape}, but'''\r\t\t\t F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''\r\t\t\t)\r\t\t\t__A =\t\t\tvalue\r\t\t\tlogger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.'''\t\t\t\t\t\t)\r\telse:\r\t\tunused_weights.append(a_\t\t\t\t\t\t)\r\r\r@torch.no_grad()\rdef \t\t\t\t\t\tUpperCAmelCase ( a_\t\t, a_\t\t, a_=None\t\t, a_=None\t\t, a_=True\t\t\t\t\t\t)\t\t-> Union[str, Any]:\r\r\r\r\r\r\r\r\t\"\"\"simple docstring\"\"\"\r\r\r\r\tif config_path is not None:\r\t\t__A =\t\t\tHubertConfig.from_pretrained(a_\t\t\t\t\t\t)\r\telse:\r\t\t__A =\t\t\tHubertConfig()\r\r\tif is_finetuned:\r\t\tif dict_path:\r\t\t\t__A =\t\t\tDictionary.load(a_\t\t\t\t\t\t)\r\r\t\t\t# important change bos & pad token id since CTC symbol is and\r\t\t\t# not as in fairseq\r\t\t\t__A =\t\t\ttarget_dict.pad_index\r\t\t\t__A =\t\t\ttarget_dict.bos_index\r\t\t\t__A =\t\t\ttarget_dict.eos_index\r\t\t\t__A =\t\t\tlen(target_dict.symbols\t\t\t\t\t\t)\r\t\t\t__A =\t\t\tos.path.join(a_\t\t, \"vocab.json\"\t\t\t\t\t\t)\r\t\t\tif not os.path.isdir(a_\t\t\t\t\t\t):\r\t\t\t\tlogger.error(\"--pytorch_dump_folder_path ({}) should be a directory\".format(a_\t\t\t\t\t\t)\t\t\t\t\t\t)\r\t\t\t\treturn\r\t\t\tos.makedirs(a_\t\t, exist_ok=a_\t\t\t\t\t\t)\r\t\t\twith open(a_\t\t, \"w\"\t\t, encoding=\"utf-8\"\t\t\t\t\t\t) as vocab_handle:\r\t\t\t\tjson.dump(target_dict.indices\t\t, a_\t\t\t\t\t\t)\r\t\t\t__A =\t\t\tWavaVecaCTCTokenizer(\r\t\t\t a_\t\t, unk_token=target_dict.unk_word\t\t, pad_token=target_dict.pad_word\t\t, bos_token=target_dict.bos_word\t\t, eos_token=target_dict.eos_word\t\t, word_delimiter_token=\"|\"\t\t, do_lower_case=a_\t\t, )\r\t\t\t__A =\t\t\tTrue if config.feat_extract_norm == \"layer\" else False\r\t\t\t__A =\t\t\tWavaVecaFeatureExtractor(\r\t\t\t feature_size=1\t\t, sampling_rate=1_6_0_0_0\t\t, padding_value=0\t\t, do_normalize=a_\t\t, return_attention_mask=a_\t\t, )\r\t\t\t__A =\t\t\tWavaVecaProcessor(feature_extractor=a_\t\t, tokenizer=a_\t\t\t\t\t\t)\r\t\t\tprocessor.save_pretrained(a_\t\t\t\t\t\t)\r\r\t\t__A =\t\t\tHubertForCTC(a_\t\t\t\t\t\t)\r\telse:\r\t\t__A =\t\t\tHubertModel(a_\t\t\t\t\t\t)\r\r\tif is_finetuned:\r\t\t__A ,\t\t\t\t\t\t\t__A ,\t\t\t\t\t\t\t__A =\t\t\tfairseq.checkpoint_utils.load_model_ensemble_and_task(\r\t\t [checkpoint_path]\t\t, arg_overrides={\"data\": \"/\".join(dict_path.split(\"/\"\t\t\t\t\t\t)[:-1]\t\t\t\t\t\t)}\t\t\t\t\t\t)\r\telse:\r\t\t__A ,\t\t\t\t\t\t\t__A ,\t\t\t\t\t\t\t__A =\t\t\tfairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path]\t\t\t\t\t\t)\r\r\t__A =\t\t\tmodel[0].eval()\r\r\trecursively_load_weights(a_\t\t, a_\t\t, a_\t\t\t\t\t\t)\r\r\thf_wavavec.save_pretrained(a_\t\t\t\t\t\t)\r\r\rif __name__ == \"__main__\":\r\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:Optional[int]\t\t\t\t\t\t\t\t\t\t= argparse.ArgumentParser()\r\t\t\t\t\tparser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')\r\t\t\t\t\tparser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')\r\t\t\t\t\tparser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')\r\t\t\t\t\tparser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')\r\t\t\t\t\tparser.add_argument(\r\t\t\t\t\t '--not_finetuned', action="https://huggingface.co/datasets/infinityofspace/python_codestyles-mixed1-500/viewer/default/store_true", help='Whether the model to convert is a fine-tuned model or not'\r\t\t\t\t\t)\r\t\t\t\t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:str\t\t\t\t\t\t\t\t\t\t= parser.parse_args()\r\t\t\t\t\tconvert_hubert_checkpoint(\r\t\t\t\t\t args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned\r\t\t\t\t\t)\r"},"code_codestyle":{"kind":"number","value":359,"string":"359"},"style_context":{"kind":"string","value":"\rimport copy\rimport re\r\r\r\r\r\rclass \t\t\t\t\t\t\tUpperCAmelCase\t\t\t\t:\r\r\r\r\r\t'''simple docstring'''\r\r\r\r\r\r\tsnake_case_ =\t\t\t\t\t\"hp\"\r\tsnake_case_ =\t\t\t\t\t{}\r\tsnake_case_ =\t\t\t\t\tNone\r\r\r\r\r\r\r\t@classmethod\r\tdef UpperCamelCase_ (\tcls :\t\t\tDict\t\t\t\t\t\t\t,A :\t\t\tDict\t\t\t\t\t\t\t,A :\t\t\tAny\t\t\t):\r\t\t\t\t\t__A =\t\t\tprefix\r\t\t\t\t\t__A =\t\t\tdefaults\r\t\t\t\t\tcls.build_naming_info()\r\r\r\r\r\r\r\t@staticmethod\r\tdef UpperCamelCase_ (\tA :\t\t\tDict\t\t\t\t\t\t\t,A :\t\t\tint\t\t\t):\r\t\t\t\t\tif len(A\t\t\t) == 0:\r\t\t\t\t\t\t\t\t\treturn \"\"\r\t\t\t\t\t__A =\t\t\tNone\r\t\t\t\t\tif any(char.isdigit() for char in word\t\t\t):\r\t\t\t\t\t\t\t\t\traise Exception(f'''Parameters should not contain numbers: \\'{word}\\' contains a number'''\t\t\t)\r\t\t\t\t\tif word in info[\"short_word\"]:\r\t\t\t\t\t\t\t\t\treturn info[\"short_word\"][word]\r\t\t\t\t\tfor prefix_len in range(1\t\t\t\t\t\t\t,len(A\t\t\t) + 1\t\t\t):\r\t\t\t\t\t\t\t\t\t__A =\t\t\tword[:prefix_len]\r\t\t\t\t\t\t\t\t\tif prefix in info[\"reverse_short_word\"]:\r\t\t\t\t\t\t\t\t\t\t\t\t\tcontinue\r\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t__A =\t\t\tprefix\r\t\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\r\t\t\t\t\tif short_word is None:\r\t\t\t\t\t\t\t\t\t# Paranoid fallback\r\t\t\t\t\t\t\t\t\tdef int_to_alphabetic(A :\t\t\tstr\t\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\t\t__A =\t\t\t\"\"\r\t\t\t\t\t\t\t\t\t\t\t\t\twhile integer != 0:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__A =\t\t\tchr(ord(\"A\"\t\t\t) + integer % 10\t\t\t) + s\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tinteger //= 10\r\t\t\t\t\t\t\t\t\t\t\t\t\treturn s\r\r\t\t\t\t\t\t\t\t\t__A =\t\t\t0\r\t\t\t\t\t\t\t\t\twhile True:\r\t\t\t\t\t\t\t\t\t\t\t\t\t__A =\t\t\tword + \"#\" + int_to_alphabetic(A\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\tif sword in info[\"reverse_short_word\"]:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcontinue\r\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__A =\t\t\tsword\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\r\t\t\t\t\t__A =\t\t\tshort_word\r\t\t\t\t\t__A =\t\t\tword\r\t\t\t\t\treturn short_word\r\r\r\r\r\r\r\t@staticmethod\r\tdef UpperCamelCase_ (\tA :\t\t\tint\t\t\t\t\t\t\t,A :\t\t\tTuple\t\t\t):\r\t\t\t\t\t__A =\t\t\tparam_name.split(\"_\"\t\t\t)\r\r\t\t\t\t\t__A =\t\t\t[TrialShortNamer.shortname_for_word(A\t\t\t\t\t\t\t,A\t\t\t) for word in words]\r\r\t\t\t\t\t# We try to create a separatorless short name, but if there is a collision we have to fallback\r\t\t\t\t\t# to a separated short name\r\t\t\t\t\t__A =\t\t\t[\"\", \"_\"]\r\r\t\t\t\t\tfor separator in separators:\r\t\t\t\t\t\t\t\t\t__A =\t\t\tseparator.join(A\t\t\t)\r\t\t\t\t\t\t\t\t\tif shortname not in info[\"reverse_short_param\"]:\r\t\t\t\t\t\t\t\t\t\t\t\t\t__A =\t\t\tshortname\r\t\t\t\t\t\t\t\t\t\t\t\t\t__A =\t\t\tparam_name\r\t\t\t\t\t\t\t\t\t\t\t\t\treturn shortname\r\r\t\t\t\t\treturn param_name\r\r\r\r\r\r\r\t@staticmethod\r\tdef UpperCamelCase_ (\tA :\t\t\tOptional[Any]\t\t\t\t\t\t\t,A :\t\t\tTuple\t\t\t):\r\t\t\t\t\t__A =\t\t\tTrialShortNamer.shortname_for_key(A\t\t\t\t\t\t\t,A\t\t\t)\r\t\t\t\t\t__A =\t\t\tshort_name\r\t\t\t\t\t__A =\t\t\tparam_name\r\r\r\r\r\r\r\t@classmethod\r\tdef UpperCamelCase_ (\tcls :\t\t\tDict\t\t\t):\r\t\t\t\t\tif cls.NAMING_INFO is not None:\r\t\t\t\t\t\t\t\t\treturn\r\r\t\t\t\t\t__A =\t\t\t{\r\t\t\t\t\t \"short_word\": {},\r\t\t\t\t\t \"reverse_short_word\": {},\r\t\t\t\t\t \"short_param\": {},\r\t\t\t\t\t \"reverse_short_param\": {},\r\t\t\t\t\t}\r\r\t\t\t\t\t__A =\t\t\tlist(cls.DEFAULTS.keys()\t\t\t)\r\r\t\t\t\t\tfor k in field_keys:\r\t\t\t\t\t\t\t\t\tcls.add_new_param_name(A\t\t\t\t\t\t\t,A\t\t\t)\r\r\t\t\t\t\t__A =\t\t\tinfo\r\r\r\r\r\r\r\t@classmethod\r\tdef UpperCamelCase_ (\tcls :\t\t\tDict\t\t\t\t\t\t\t,A :\t\t\tList[str]\t\t\t):\r\t\t\t\t\tcls.build_naming_info()\r\t\t\t\t\tassert cls.PREFIX is not None\r\t\t\t\t\t__A =\t\t\t[copy.copy(cls.PREFIX\t\t\t)]\r\r\t\t\t\t\tfor k, v in params.items():\r\t\t\t\t\t\t\t\t\tif k not in cls.DEFAULTS:\r\t\t\t\t\t\t\t\t\t\t\t\t\traise Exception(f'''You should provide a default value for the param name {k} with value {v}'''\t\t\t)\r\t\t\t\t\t\t\t\t\tif v == cls.DEFAULTS[k]:\r\t\t\t\t\t\t\t\t\t\t\t\t\t# The default value is not added to the name\r\t\t\t\t\t\t\t\t\t\t\t\t\tcontinue\r\r\t\t\t\t\t\t\t\t\t__A =\t\t\tcls.NAMING_INFO[\"short_param\"][k]\r\r\t\t\t\t\t\t\t\t\tif isinstance(A\t\t\t\t\t\t\t,A\t\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\t\t__A =\t\t\t1 if v else 0\r\r\t\t\t\t\t\t\t\t\t__A =\t\t\t\"\" if isinstance(A\t\t\t\t\t\t\t,(int, float)\t\t\t) else \"-\"\r\t\t\t\t\t\t\t\t\t__A =\t\t\tf'''{key}{sep}{v}'''\r\t\t\t\t\t\t\t\t\tname.append(A\t\t\t)\r\r\t\t\t\t\treturn \"_\".join(A\t\t\t)\r\r\r\r\r\r\r\r\t@classmethod\r\tdef UpperCamelCase_ (\tcls :\t\t\tTuple\t\t\t\t\t\t\t,A :\t\t\tTuple\t\t\t):\r\t\t\t\t\t__A =\t\t\trepr[len(cls.PREFIX\t\t\t) + 1 :]\r\t\t\t\t\tif repr == \"\":\r\t\t\t\t\t\t\t\t\t__A =\t\t\t[]\r\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t__A =\t\t\trepr.split(\"_\"\t\t\t)\r\r\t\t\t\t\t__A =\t\t\t{}\r\r\t\t\t\t\tfor value in values:\r\t\t\t\t\t\t\t\t\tif \"-\" in value:\r\t\t\t\t\t\t\t\t\t\t\t\t\t__A ,\t\t\t\t\t\t\t__A =\t\t\tvalue.split(\"-\"\t\t\t)\r\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t__A =\t\t\tre.sub(\"[0-9.]\"\t\t\t\t\t\t\t,\"\"\t\t\t\t\t\t\t,A\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t__A =\t\t\tfloat(re.sub(\"[^0-9.]\"\t\t\t\t\t\t\t,\"\"\t\t\t\t\t\t\t,A\t\t\t)\t\t\t)\r\r\t\t\t\t\t\t\t\t\t__A =\t\t\tcls.NAMING_INFO[\"reverse_short_param\"][p_k]\r\r\t\t\t\t\t\t\t\t\t__A =\t\t\tp_v\r\r\t\t\t\t\tfor k in cls.DEFAULTS:\r\t\t\t\t\t\t\t\t\tif k not in parameters:\r\t\t\t\t\t\t\t\t\t\t\t\t\t__A =\t\t\tcls.DEFAULTS[k]\r\r\t\t\t\t\treturn parameters\r"},"style_context_codestyle":{"kind":"number","value":124,"string":"124"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":830,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport json\r\nimport os\r\nfrom pathlib import Path\r\nfrom shutil import copyfile\r\nfrom typing import Any, Dict, List, Optional, Tuple, Union\r\n\r\nimport sentencepiece\r\n\r\nfrom ...tokenization_utils import PreTrainedTokenizer\r\nfrom ...utils import logging\r\n\r\n\r\nUpperCamelCase__: Optional[Any]\t\t\t\t\t\t\t\t=\t\t\tlogging.get_logger(__name__)\r\n\r\nUpperCamelCase__: Any\t\t\t\t\t\t\t\t=\t\t\t\"▁\"\r\n\r\nUpperCamelCase__: List[str]\t\t\t\t\t\t\t\t=\t\t\t{\r\n \"vocab_file\": \"vocab.json\",\r\n \"spm_file\": \"sentencepiece.bpe.model\",\r\n}\r\n\r\nUpperCamelCase__: Any\t\t\t\t\t\t\t\t=\t\t\t{\r\n \"vocab_file\": {\r\n \"facebook/s2t-small-librispeech-asr\": (\r\n \"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json\"\r\n ),\r\n },\r\n \"spm_file\": {\r\n \"facebook/s2t-small-librispeech-asr\": (\r\n \"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model\"\r\n )\r\n },\r\n}\r\n\r\nUpperCamelCase__: Optional[int]\t\t\t\t\t\t\t\t=\t\t\t{\r\n \"facebook/s2t-small-librispeech-asr\": 1024,\r\n}\r\n\r\nUpperCamelCase__: Union[str, Any]\t\t\t\t\t\t\t\t=\t\t\t[\"pt\", \"fr\", \"ru\", \"nl\", \"ro\", \"it\", \"es\", \"de\"]\r\n\r\nUpperCamelCase__: Optional[int]\t\t\t\t\t\t\t\t=\t\t\t{\"mustc\": MUSTC_LANGS}\r\n\r\nclass \t\t\t\tSCREAMING_SNAKE_CASE(\t\t\t\t\t\t\tA__\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tlowerCamelCase__\t\t\t = VOCAB_FILES_NAMES\r\n\tlowerCamelCase__\t\t\t = PRETRAINED_VOCAB_FILES_MAP\r\n\tlowerCamelCase__\t\t\t = MAX_MODEL_INPUT_SIZES\r\n\tlowerCamelCase__\t\t\t = [\"\"\"input_ids\"\"\", \"\"\"attention_mask\"\"\"]\r\n\r\n\tlowerCamelCase__\t\t\t = []\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef __init__(\t\t\t\t\t\tself\t\t\t: List[Any]\t\t,\t\t\t__snake_case\t\t\t: str\t\t,\t\t\t__snake_case\t\t\t: Any\t\t,\t\t\t__snake_case\t\t\t: List[str]=\"\"\t\t,\t\t\t__snake_case\t\t\t: str=\"\"\t\t,\t\t\t__snake_case\t\t\t: str=\"\"\t\t,\t\t\t__snake_case\t\t\t: Any=\"\"\t\t,\t\t\t__snake_case\t\t\t: str=False\t\t,\t\t\t__snake_case\t\t\t: List[Any]=False\t\t,\t\t\t__snake_case\t\t\t: List[Any]=None\t\t,\t\t\t__snake_case\t\t\t: Optional[Any]=None\t\t,\t\t\t__snake_case\t\t\t: Optional[Dict[str, Any]] = None\t\t,\t\t\t**__snake_case\t\t\t: Optional[Any]\t\t,\t\t\t)\t\t\t->\t\tNone:\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : Tuple \t\t\t\t\t\t\t=\t\t{} if sp_model_kwargs is None else sp_model_kwargs\r\n\r\n\t\t\t\t\t\t\t\tsuper().__init__(\r\n\t\t\t\t\t\t\t\t bos_token=__snake_case\t\t,\t\t\teos_token=__snake_case\t\t,\t\t\tunk_token=__snake_case\t\t,\t\t\tpad_token=__snake_case\t\t,\t\t\tdo_upper_case=__snake_case\t\t,\t\t\tdo_lower_case=__snake_case\t\t,\t\t\ttgt_lang=__snake_case\t\t,\t\t\tlang_codes=__snake_case\t\t,\t\t\tsp_model_kwargs=self.sp_model_kwargs\t\t,\t\t\t**__snake_case\t\t,\t\t\t)\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : Union[str, Any] \t\t\t\t\t\t\t=\t\tdo_upper_case\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : int \t\t\t\t\t\t\t=\t\tdo_lower_case\r\n\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : Dict \t\t\t\t\t\t\t=\t\tload_json(__snake_case\t\t)\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : Union[str, Any] \t\t\t\t\t\t\t=\t\t{v: k for k, v in self.encoder.items()}\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : Tuple \t\t\t\t\t\t\t=\t\tspm_file\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : Any \t\t\t\t\t\t\t=\t\tload_spm(__snake_case\t\t,\t\t\tself.sp_model_kwargs\t\t)\r\n\r\n\t\t\t\t\t\t\t\tif lang_codes is not None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase : List[str] \t\t\t\t\t\t\t=\t\tlang_codes\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase : int \t\t\t\t\t\t\t=\t\tLANGUAGES[lang_codes]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase : Any \t\t\t\t\t\t\t=\t\t[F\"\"\"\"\"\" for lang in self.langs]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase : Optional[int] \t\t\t\t\t\t\t=\t\t{lang: self.sp_model.PieceToId(F\"\"\"\"\"\"\t\t) for lang in self.langs}\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase : Optional[int] \t\t\t\t\t\t\t=\t\tself.lang_tokens\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase : List[str] \t\t\t\t\t\t\t=\t\ttgt_lang if tgt_lang is not None else self.langs[0]\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.set_tgt_lang_special_tokens(self._tgt_lang\t\t)\r\n\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase : str \t\t\t\t\t\t\t=\t\t{}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t@property\r\n\tdef A\t(\t\t\t\t\t\tself\t\t\t: Dict\t\t)\t\t\t->\t\tint:\r\n\t\t\t\t\t\t\t\treturn len(self.encoder\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t@property\r\n\tdef A\t(\t\t\t\t\t\tself\t\t\t: Optional[Any]\t\t)\t\t\t->\t\tstr:\r\n\t\t\t\t\t\t\t\treturn self._tgt_lang\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t@tgt_lang.setter\r\n\tdef A\t(\t\t\t\t\t\tself\t\t\t: List[str]\t\t,\t\t\t__snake_case\t\t\t: int\t\t)\t\t\t->\t\tNone:\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : List[Any] \t\t\t\t\t\t\t=\t\tnew_tgt_lang\r\n\t\t\t\t\t\t\t\tself.set_tgt_lang_special_tokens(__snake_case\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef A\t(\t\t\t\t\t\tself\t\t\t: int\t\t,\t\t\t__snake_case\t\t\t: str\t\t)\t\t\t->\t\tNone:\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : Optional[Any] \t\t\t\t\t\t\t=\t\tself.lang_code_to_id[tgt_lang]\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : Any \t\t\t\t\t\t\t=\t\t[lang_code_id]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef A\t(\t\t\t\t\t\tself\t\t\t: Tuple\t\t,\t\t\t__snake_case\t\t\t: str\t\t)\t\t\t->\t\tList[str]:\r\n\t\t\t\t\t\t\t\treturn self.sp_model.encode(__snake_case\t\t,\t\t\tout_type=__snake_case\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef A\t(\t\t\t\t\t\tself\t\t\t: str\t\t,\t\t\t__snake_case\t\t\t: Union[str, Any]\t\t)\t\t\t->\t\tUnion[str, Any]:\r\n\t\t\t\t\t\t\t\treturn self.encoder.get(__snake_case\t\t,\t\t\tself.encoder[self.unk_token]\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef A\t(\t\t\t\t\t\tself\t\t\t: str\t\t,\t\t\t__snake_case\t\t\t: int\t\t)\t\t\t->\t\tstr:\r\n\t\t\t\t\t\t\t\treturn self.decoder.get(__snake_case\t\t,\t\t\tself.unk_token\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef A\t(\t\t\t\t\t\tself\t\t\t: Tuple\t\t,\t\t\t__snake_case\t\t\t: List[str]\t\t)\t\t\t->\t\tstr:\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : List[Any] \t\t\t\t\t\t\t=\t\t[]\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : Tuple \t\t\t\t\t\t\t=\t\t''''''\r\n\t\t\t\t\t\t\t\tfor token in tokens:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# make sure that special tokens are not decoded using sentencepiece model\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif token in self.all_special_tokens:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase : Any \t\t\t\t\t\t\t=\t\tself.sp_model.decode(__snake_case\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tout_string += (decoded.upper() if self.do_upper_case else decoded) + token + \" \"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase : List[Any] \t\t\t\t\t\t\t=\t\t[]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcurrent_sub_tokens.append(__snake_case\t\t)\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : Optional[Any] \t\t\t\t\t\t\t=\t\tself.sp_model.decode(__snake_case\t\t)\r\n\t\t\t\t\t\t\t\tout_string += decoded.upper() if self.do_upper_case else decoded\r\n\t\t\t\t\t\t\t\treturn out_string.strip()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef A\t(\t\t\t\t\t\tself\t\t\t: Any\t\t,\t\t\t__snake_case\t\t\t: List[str]\t\t,\t\t\t__snake_case\t\t\t: Optional[int]=None\t\t)\t\t\t->\t\tList[int]:\r\n\t\t\t\t\t\t\t\tif token_ids_a is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn self.prefix_tokens + token_ids_a + [self.eos_token_id]\r\n\t\t\t\t\t\t\t\t# We don't expect to process pairs, but leave the pair logic for API consistency\r\n\t\t\t\t\t\t\t\treturn self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef A\t(\t\t\t\t\t\tself\t\t\t: int\t\t,\t\t\t__snake_case\t\t\t: List[int]\t\t,\t\t\t__snake_case\t\t\t: Optional[List[int]] = None\t\t,\t\t\t__snake_case\t\t\t: bool = False\t\t)\t\t\t->\t\tList[int]:\r\n\r\n\t\t\t\t\t\t\t\tif already_has_special_tokens:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn super().get_special_tokens_mask(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t token_ids_a=__snake_case\t\t,\t\t\ttoken_ids_a=__snake_case\t\t,\t\t\talready_has_special_tokens=__snake_case\t\t)\r\n\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : Tuple \t\t\t\t\t\t\t=\t\t[1] * len(self.prefix_tokens\t\t)\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : Optional[int] \t\t\t\t\t\t\t=\t\t[1]\r\n\t\t\t\t\t\t\t\tif token_ids_a is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn prefix_ones + ([0] * len(__snake_case\t\t)) + suffix_ones\r\n\t\t\t\t\t\t\t\treturn prefix_ones + ([0] * len(__snake_case\t\t)) + ([0] * len(__snake_case\t\t)) + suffix_ones\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef A\t(\t\t\t\t\t\tself\t\t\t: Union[str, Any]\t\t)\t\t\t->\t\tDict:\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : List[Any] \t\t\t\t\t\t\t=\t\tself.encoder.copy()\r\n\t\t\t\t\t\t\t\tvocab.update(self.added_tokens_encoder\t\t)\r\n\t\t\t\t\t\t\t\treturn vocab\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef __getstate__(\t\t\t\t\t\tself\t\t\t: Union[str, Any]\t\t)\t\t\t->\t\tDict:\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : Optional[int] \t\t\t\t\t\t\t=\t\tself.__dict__.copy()\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : str \t\t\t\t\t\t\t=\t\tNone\r\n\t\t\t\t\t\t\t\treturn state\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef __setstate__(\t\t\t\t\t\tself\t\t\t: str\t\t,\t\t\t__snake_case\t\t\t: Dict\t\t)\t\t\t->\t\tNone:\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : Tuple \t\t\t\t\t\t\t=\t\td\r\n\r\n\t\t\t\t\t\t\t\t# for backward compatibility\r\n\t\t\t\t\t\t\t\tif not hasattr(self\t\t,\t\t\t'''sp_model_kwargs'''\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase : List[str] \t\t\t\t\t\t\t=\t\t{}\r\n\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : Optional[Any] \t\t\t\t\t\t\t=\t\tload_spm(self.spm_file\t\t,\t\t\tself.sp_model_kwargs\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef A\t(\t\t\t\t\t\tself\t\t\t: List[Any]\t\t,\t\t\t__snake_case\t\t\t: str\t\t,\t\t\t__snake_case\t\t\t: Optional[str] = None\t\t)\t\t\t->\t\tTuple[str]:\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : List[Any] \t\t\t\t\t\t\t=\t\tPath(__snake_case\t\t)\r\n\t\t\t\t\t\t\t\tassert save_dir.is_dir(), F\"\"\"{save_directory} should be a directory\"\"\"\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : Dict \t\t\t\t\t\t\t=\t\tsave_dir / (\r\n\t\t\t\t\t\t\t\t (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']\r\n\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\tUpperCAmelCase : Union[str, Any] \t\t\t\t\t\t\t=\t\tsave_dir / (\r\n\t\t\t\t\t\t\t\t (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']\r\n\t\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t\t\tsave_json(self.encoder\t\t,\t\t\t__snake_case\t\t)\r\n\r\n\t\t\t\t\t\t\t\tif os.path.abspath(self.spm_file\t\t) != os.path.abspath(__snake_case\t\t) and os.path.isfile(self.spm_file\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcopyfile(self.spm_file\t\t,\t\t\t__snake_case\t\t)\r\n\t\t\t\t\t\t\t\telif not os.path.isfile(self.spm_file\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twith open(__snake_case\t\t,\t\t\t'''wb'''\t\t) as fi:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase : Optional[int] \t\t\t\t\t\t\t=\t\tself.sp_model.serialized_model_proto()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfi.write(__snake_case\t\t)\r\n\r\n\t\t\t\t\t\t\t\treturn (str(__snake_case\t\t), str(__snake_case\t\t))\r\n\r\n\r\ndef snake_case_ (\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tstr , _lowerCAmelCase :\t\t\t\t\t\tDict[str, Any] ) -> sentencepiece.SentencePieceProcessor:\r\n\t\t\t\t\t\t\tUpperCAmelCase : int \t\t\t\t\t\t\t=\t\tsentencepiece.SentencePieceProcessor(**_lowerCAmelCase )\r\n\t\t\t\t\t\t\tspm.Load(str(_lowerCAmelCase ) )\r\n\t\t\t\t\t\t\treturn spm\r\n\r\n\r\ndef snake_case_ (\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tstr ) -> Union[Dict, List]:\r\n\t\t\t\t\t\t\twith open(_lowerCAmelCase , '''r''' ) as f:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn json.load(_lowerCAmelCase )\r\n\r\n\r\n\r\n\r\ndef snake_case_ (\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tList[Any] , _lowerCAmelCase :\t\t\t\t\t\tstr ) -> None:\r\n\t\t\t\t\t\t\twith open(_lowerCAmelCase , '''w''' ) as f:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tjson.dump(_lowerCAmelCase , _lowerCAmelCase , indent=2 )\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":23,"string":"23"},"style_context":{"kind":"string","value":"\r\n\r\nfrom .imports import is_tqdm_available\r\n\r\n\r\nif is_tqdm_available():\r\n from tqdm.auto import tqdm as _tqdm\r\n\r\nfrom ..state import PartialState\r\ndef a (\t\t\tA__\t: bool = True , *A__\t: int , **A__\t: Union[str, Any] ) -> List[str]:\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n if not is_tqdm_available():\r\n raise ImportError('Accelerate\\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )\r\n _lowercase\t =False\r\n if main_process_only:\r\n _lowercase\t =PartialState().local_process_index == 0\r\n return _tqdm(*A__ , **A__ , disable=A__ )\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":205,"string":"205"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":831,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\r\"\"\"simple docstring\"\"\"\r\r\rfrom __future__ import annotations\r\r\r\r\rdef lowercase__ ( lowercase_ ) ->\t\t\t\t\t\tbool:\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r _UpperCamelCase :\t\tOptional[int] = str(lowercase_ )\r return n == n[::-1]\r\r\r\r\rdef lowercase__ ( lowercase_ = 1_000_000 ) ->\t\t\t\t\t\tUnion[str, Any]:\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r _UpperCamelCase :\t\tList[Any] = 0\r\r for i in range(1\t\t\t\t\t\t,lowercase_ ):\r if is_palindrome(lowercase_ ) and is_palindrome(bin(lowercase_ ).split(\"b\" )[1] ):\r total += i\r return total\r\r\rif __name__ == \"__main__\":\r print(solution(int(str(input().strip()))))\r"},"code_codestyle":{"kind":"number","value":369,"string":"369"},"style_context":{"kind":"string","value":"\r\r\r\r\r\r\"\"\"simple docstring\"\"\"\r\r\rimport torch\r\rfrom transformers import AutoModel\r\r\r\rclass __SCREAMING_SNAKE_CASE ( torch.nn.Module\t\t\t\t):\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r def __init__(\t\t\t\t\t\tself\t\t\t\t: Dict ,\t\t\t\t__a\t\t\t\t: Tuple=\"sayef/fsner-bert-base-uncased\"\t\t)\t\t\t\t\t\t->\t\t\tDict:\r super(__a ,\t\t\t\tself\t\t).__init__()\r\r _UpperCamelCase :\t\tOptional[Any] = AutoModel.from_pretrained(__a ,\t\t\t\treturn_dict=__a\t\t)\r _UpperCamelCase :\t\tstr = torch.nn.CosineSimilarity(3 ,\t\t\t\t1e-0_8\t\t)\r _UpperCamelCase :\t\tList[str] = torch.nn.Softmax(dim=1\t\t)\r\r def \t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t(\t\t\t\t\t\tself\t\t\t\t: int ,\t\t\t\t**__a\t\t\t\t: Tuple\t\t)\t\t\t\t\t\t->\t\t\tOptional[Any]:\r return self.bert(**__a\t\t).last_hidden_state\r\r def \t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t(\t\t\t\t\t\tself\t\t\t\t: List[str] ,\t\t\t\t__a\t\t\t\t: Optional[Any]\t\t)\t\t\t\t\t\t->\t\t\tOptional[int]:\r return token_embeddings.sum(2 ,\t\t\t\tkeepdim=__a\t\t)\r\r def \t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t(\t\t\t\t\t\tself\t\t\t\t: str ,\t\t\t\t__a\t\t\t\t: Any ,\t\t\t\t__a\t\t\t\t: List[Any] ,\t\t\t\t__a\t\t\t\t: Tuple=1\t\t)\t\t\t\t\t\t->\t\t\tList[Any]:\r return self.softmax(T * self.cos(__a ,\t\t\t\t__a\t\t)\t\t)\r\r\r def \t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t(\t\t\t\t\t\tself\t\t\t\t: Union[str, Any] ,\t\t\t\t__a\t\t\t\t: List[str] ,\t\t\t\t__a\t\t\t\t: Dict\t\t)\t\t\t\t\t\t->\t\t\tUnion[str, Any]:\r _UpperCamelCase :\t\tstr = W_supports[\"sizes\"].tolist()\r _UpperCamelCase :\t\tAny = W_supports[\"start_token_id\"].item()\r _UpperCamelCase :\t\tOptional[Any] = W_supports[\"end_token_id\"].item()\r\r del W_supports[\"sizes\"]\r del W_supports[\"start_token_id\"]\r del W_supports[\"end_token_id\"]\r\r _UpperCamelCase :\t\tstr = self.BERT(**__a\t\t)\r _UpperCamelCase :\t\tint = self.BERT(**__a\t\t)\r\r _UpperCamelCase :\t\tint = None\r _UpperCamelCase :\t\tOptional[int] = None\r\r _UpperCamelCase :\t\tList[Any] = W_supports[\"input_ids\"] == start_token_id\r _UpperCamelCase :\t\tOptional[int] = W_supports[\"input_ids\"] == end_token_id\r\r for i, size in enumerate(__a\t\t):\r if i == 0:\r _UpperCamelCase :\t\tDict = 0\r else:\r _UpperCamelCase :\t\tAny = support_sizes[i - 1]\r\r _UpperCamelCase :\t\tDict = S[s : s + size][start_token_masks[s : s + size]]\r _UpperCamelCase :\t\tOptional[int] = S[s : s + size][end_token_masks[s : s + size]]\r\r _UpperCamelCase :\t\tList[Any] = torch.matmul(q[i] ,\t\t\t\ts_start.T\t\t).sum(1\t\t).softmax(0\t\t)\r _UpperCamelCase :\t\tAny = torch.matmul(q[i] ,\t\t\t\ts_end.T\t\t).sum(1\t\t).softmax(0\t\t)\r\r if p_starts is not None:\r _UpperCamelCase :\t\tAny = torch.vstack((p_starts, p_start)\t\t)\r _UpperCamelCase :\t\tAny = torch.vstack((p_ends, p_end)\t\t)\r else:\r _UpperCamelCase :\t\tOptional[Any] = p_start\r _UpperCamelCase :\t\tstr = p_end\r\r return p_starts, p_ends\r"},"style_context_codestyle":{"kind":"number","value":310,"string":"310"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":832,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\rfrom graphs.minimum_spanning_tree_kruskal import kruskal\r\rdef a_ ( ):\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\t9\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\t[\r [0, 1, 4],\r [0, 7, 8],\r [1, 2, 8],\r [7, 8, 7],\r [7, 6, 1],\r [2, 8, 2],\r [8, 6, 6],\r [2, 3, 7],\r [2, 5, 4],\r [6, 5, 2],\r [3, 5, 14],\r [3, 4, 9],\r [5, 4, 10],\r [1, 7, 11],\r ]\r\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\tkruskal(lowerCAmelCase_,\tlowerCAmelCase_\t)\r\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\t[\r [7, 6, 1],\r [2, 8, 2],\r [6, 5, 2],\r [0, 1, 4],\r [2, 5, 4],\r [2, 3, 7],\r [0, 7, 8],\r [3, 4, 9],\r ]\r\r assert sorted(lowerCAmelCase_\t) == sorted(lowerCAmelCase_\t)\r\r"},"code_codestyle":{"kind":"number","value":284,"string":"284"},"style_context":{"kind":"string","value":"\r\r\r\r\r\rimport logging\rfrom pathlib import Path\r\rimport numpy as np\rimport pytorch_lightning as pl\rimport torch\rfrom pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint\rfrom pytorch_lightning.utilities import rank_zero_only\rfrom utils_rag import save_json\r\rdef a_ ( lowerCAmelCase_ :\t\t\tOptional[int]\t):\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\tfilter(lambda lowerCAmelCase_\t: p.requires_grad,\tmodel.parameters()\t)\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\tsum([np.prod(p.size()\t) for p in model_parameters]\t)\r return params\r\r\r_snake_case : Dict\t\t\t\t\t\t = logging.getLogger(__name__)\r\rdef a_ ( lowerCAmelCase_ :\t\t\tOptional[int],\tlowerCAmelCase_ :\t\t\tOptional[int]\t):\r if metric == \"rouge2\":\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\t'{val_avg_rouge2:.4f}-{step_count}'\r elif metric == \"bleu\":\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\t'{val_avg_bleu:.4f}-{step_count}'\r elif metric == \"em\":\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\t'{val_avg_em:.4f}-{step_count}'\r else:\r raise NotImplementedError(\r F\"\"\"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this\"\"\"\r ' function.'\t)\r\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\tModelCheckpoint(\r dirpath=lowerCAmelCase_,\tfilename=lowerCAmelCase_,\tmonitor=F\"\"\"val_{metric}\"\"\",\tmode='max',\tsave_top_k=3,\tevery_n_epochs=1,\t)\r return checkpoint_callback\r\rdef a_ ( lowerCAmelCase_ :\t\t\tUnion[str, Any],\tlowerCAmelCase_ :\t\t\tAny\t):\r return EarlyStopping(\r monitor=F\"\"\"val_{metric}\"\"\",\tmode='min' if 'loss' in metric else 'max',\tpatience=lowerCAmelCase_,\tverbose=lowerCAmelCase_,\t)\r\r\r\r\rclass \t\t\t\t_UpperCAmelCase ( pl.Callback ):\r\r \"\"\"simple docstring\"\"\"\r\r\r def \t\t\t\tlowercase\t\t(\t\t\t\tself :\t\tTuple\t\t\t\t\t, lowerCAmelCase_ :\t\tList[str]\t\t\t\t\t, lowerCAmelCase_ :\t\tint )\t\t\t\t\t\t\t->\t\t\t\t\t\tAny:\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\t{f\"\"\"lr_group_{i}\"\"\": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}\r pl_module.logger.log_metrics(lowerCAmelCase_ )\r\r\r\r\r\r\r @rank_zero_only\r def \t\t\t\tlowercase\t\t(\t\t\t\tself :\t\tOptional[int]\t\t\t\t\t, lowerCAmelCase_ :\t\tpl.Trainer\t\t\t\t\t, lowerCAmelCase_ :\t\tpl.LightningModule\t\t\t\t\t, lowerCAmelCase_ :\t\tstr\t\t\t\t\t, lowerCAmelCase_ :\t\tList[Any]=True )\t\t\t\t\t\t\t->\t\t\t\t\t\tNone:\r logger.info(f\"\"\"***** {type_path} results at step {trainer.global_step:05d} *****\"\"\" )\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\ttrainer.callback_metrics\r trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )\r # Log results\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\tPath(pl_module.hparams.output_dir )\r if type_path == \"test\":\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\tod / 'test_results.txt'\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\tod / 'test_generations.txt'\r else:\r # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json\r # If people want this it will be easy enough to add back.\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\tod / f\"\"\"{type_path}_results/{trainer.global_step:05d}.txt\"\"\"\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\tod / f\"\"\"{type_path}_generations/{trainer.global_step:05d}.txt\"\"\"\r results_file.parent.mkdir(exist_ok=lowerCAmelCase_ )\r generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ )\r with open(lowerCAmelCase_\t\t\t\t\t, 'a+' ) as writer:\r for key in sorted(lowerCAmelCase_ ):\r if key in [\"log\", \"progress_bar\", \"preds\"]:\r continue\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\tmetrics[key]\r if isinstance(lowerCAmelCase_\t\t\t\t\t, torch.Tensor ):\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\tval.item()\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\tf\"\"\"{key}: {val:.6f}\\n\"\"\"\r writer.write(lowerCAmelCase_ )\r\r if not save_generations:\r return\r\r if \"preds\" in metrics:\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\t'\\n'.join(metrics['preds'] )\r generations_file.open('w+' ).write(lowerCAmelCase_ )\r\r\r\r\r\r\r @rank_zero_only\r def \t\t\t\tlowercase\t\t(\t\t\t\tself :\t\tUnion[str, Any]\t\t\t\t\t, lowerCAmelCase_ :\t\tUnion[str, Any]\t\t\t\t\t, lowerCAmelCase_ :\t\tList[str] )\t\t\t\t\t\t\t->\t\t\t\t\t\tDict:\r try:\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\tpl_module.model.model.num_parameters()\r except AttributeError:\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\tpl_module.model.num_parameters()\r\r __lowerCAmelCase\t\t\t\t\t\t\t\t=\tcount_trainable_parameters(lowerCAmelCase_ )\r # mp stands for million parameters\r trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )\r\r\r\r\r\r\r @rank_zero_only\r def \t\t\t\tlowercase\t\t(\t\t\t\tself :\t\tint\t\t\t\t\t, lowerCAmelCase_ :\t\tpl.Trainer\t\t\t\t\t, lowerCAmelCase_ :\t\tpl.LightningModule )\t\t\t\t\t\t\t->\t\t\t\t\t\tAny:\r save_json(pl_module.metrics\t\t\t\t\t, pl_module.metrics_save_path )\r return self._write_logs(lowerCAmelCase_\t\t\t\t\t, lowerCAmelCase_\t\t\t\t\t, 'test' )\r\r\r\r\r\r\r @rank_zero_only\r def \t\t\t\tlowercase\t\t(\t\t\t\tself :\t\tList[Any]\t\t\t\t\t, lowerCAmelCase_ :\t\tpl.Trainer\t\t\t\t\t, lowerCAmelCase_ :\t\tAny )\t\t\t\t\t\t\t->\t\t\t\t\t\tint:\r save_json(pl_module.metrics\t\t\t\t\t, pl_module.metrics_save_path )\r # Uncommenting this will save val generations\r # return self._write_logs(trainer, pl_module, \"valid\")\r\r"},"style_context_codestyle":{"kind":"number","value":284,"string":"284"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":833,"cells":{"code":{"kind":"string","value":"\n\n\n\n'''simple docstring'''\n\nfrom decimal import Decimal, getcontext\nfrom math import ceil, factorial\n\n\n\ndef \t\t\t\t\t__lowerCamelCase\t\t\t\t\t(\t\t\t\tlowerCAmelCase_ ) ->\t\t\t\tstr:\n\n\t\tif not isinstance(lowerCAmelCase_\t\t\t\t\t\t, lowerCAmelCase_ ):\n\t\t\t\traise TypeError('Undefined for non-integers' )\n\t\telif precision < 1:\n\t\t\t\traise ValueError('Undefined for non-natural numbers' )\n\n\t\t_a\t\t\t\t\t:\t\t\t\tTuple\t\t\t\t=\t\t\t\t\t\t\tprecision\n\t\t_a\t\t\t\t\t:\t\t\t\tint\t\t\t\t=\t\t\t\t\t\t\tceil(precision / 14 )\n\t\t_a\t\t\t\t\t:\t\t\t\tList[Any]\t\t\t\t=\t\t\t\t\t\t\t426880 * Decimal(10005 ).sqrt()\n\t\t_a\t\t\t\t\t:\t\t\t\tAny\t\t\t\t=\t\t\t\t\t\t\t1\n\t\t_a\t\t\t\t\t:\t\t\t\tTuple\t\t\t\t=\t\t\t\t\t\t\t13591409\n\t\t_a\t\t\t\t\t:\t\t\t\tOptional[Any]\t\t\t\t=\t\t\t\t\t\t\tDecimal(lowerCAmelCase_ )\n\t\tfor k in range(1\t\t\t\t\t\t, lowerCAmelCase_ ):\n\t\t\t\t_a\t\t\t\t\t:\t\t\t\tList[Any]\t\t\t\t=\t\t\t\t\t\t\tfactorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCAmelCase_ ) ** 3)\n\t\t\t\tlinear_term += 545140134\n\t\t\t\texponential_term *= -262537412640768000\n\t\t\t\tpartial_sum += Decimal(multinomial_term * linear_term ) / exponential_term\n\t\treturn str(constant_term / partial_sum )[:-1]\n\n\nif __name__ == \"__main__\":\n\t\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t\t = 50\n\t\t\t\t\t\t\tprint(f\"\"\"The first {n} digits of pi is: {pi(n)}\"\"\")\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":107,"string":"107"},"style_context":{"kind":"string","value":"\n\n\n\n'''simple docstring'''\n\nfrom math import sqrt\n\n\n\ndef \t\t\t\t\t__lowerCamelCase\t\t\t\t\t(\t\t\t\tlowerCAmelCase_ ) ->\t\t\t\tint:\n\t\t_a\t\t\t\t\t:\t\t\t\tDict\t\t\t\t=\t\t\t\t\t\t\t0\n\t\tfor i in range(1\t\t\t\t\t\t, int(sqrt(lowerCAmelCase_ ) + 1 ) ):\n\t\t\t\tif n % i == 0 and i != sqrt(lowerCAmelCase_ ):\n\t\t\t\t\t\ttotal += i + n // i\n\t\t\t\telif i == sqrt(lowerCAmelCase_ ):\n\t\t\t\t\t\ttotal += i\n\t\treturn total - n\n\n\n\ndef \t\t\t\t\t__lowerCamelCase\t\t\t\t\t(\t\t\t\tlowerCAmelCase_ = 10000 ) ->\t\t\t\tint:\n\t\t_a\t\t\t\t\t:\t\t\t\tUnion[str, Any]\t\t\t\t=\t\t\t\t\t\t\tsum(\n\t\t i\n\t\t for i in range(1\t\t\t\t\t\t, lowerCAmelCase_ )\n\t\t if sum_of_divisors(sum_of_divisors(lowerCAmelCase_ ) ) == i and sum_of_divisors(lowerCAmelCase_ ) != i )\n\t\treturn total\n\n\nif __name__ == \"__main__\":\n\t\t\t\t\t\t\tprint(solution(int(str(input()).strip())))\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":107,"string":"107"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":834,"cells":{"code":{"kind":"string","value":"\rimport inspect\rimport unittest\r\rfrom transformers import RegNetConfig, is_flax_available\rfrom transformers.testing_utils import require_flax, slow\rfrom transformers.utils import cached_property, is_vision_available\r\rfrom ...test_configuration_common import ConfigTester\rfrom ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor\r\r\rif is_flax_available():\r\timport jax\r\timport jax.numpy as jnp\r\r\tfrom transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel\r\rif is_vision_available():\r\tfrom PIL import Image\r\r\tfrom transformers import AutoImageProcessor\r\r\r\r\r\r\r\rclass A_ (\t\t\tunittest.TestCase ):\r\r\r\r\r\t'''simple docstring'''\r\r\r\tdef __init__( self\t\t, snake_case\t\t, snake_case=3\t\t, snake_case=32\t\t, snake_case=3\t\t, snake_case=10\t\t, snake_case=[10, 20, 30, 40]\t\t, snake_case=[1, 1, 2, 1]\t\t, snake_case=True\t\t, snake_case=True\t\t, snake_case=\"relu\"\t\t, snake_case=3\t\t, snake_case=None\t\t, ):\r\t\tlowercase = parent\r\t\tlowercase = batch_size\r\t\tlowercase = image_size\r\t\tlowercase = num_channels\r\t\tlowercase = embeddings_size\r\t\tlowercase = hidden_sizes\r\t\tlowercase = depths\r\t\tlowercase = is_training\r\t\tlowercase = use_labels\r\t\tlowercase = hidden_act\r\t\tlowercase = num_labels\r\t\tlowercase = scope\r\t\tlowercase = len(snake_case )\r\r\r\r\r\r\tdef \tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t( self ):\r\t\tlowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )\r\r\t\tlowercase = self.get_config()\r\r\t\treturn config, pixel_values\r\r\r\r\r\r\tdef \tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t( self ):\r\t\treturn RegNetConfig(\r\t\t num_channels=self.num_channels\t\t, embeddings_size=self.embeddings_size\t\t, hidden_sizes=self.hidden_sizes\t\t, depths=self.depths\t\t, hidden_act=self.hidden_act\t\t, num_labels=self.num_labels\t\t, image_size=self.image_size\t\t, )\r\r\r\r\r\r\tdef \tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t( self\t\t, snake_case\t\t, snake_case ):\r\t\tlowercase = FlaxRegNetModel(config=snake_case )\r\t\tlowercase = model(snake_case )\r\r\t\t# Output shape (b, c, h, w)\r\t\tself.parent.assertEqual(\r\t\t result.last_hidden_state.shape\t\t, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32)\t\t, )\r\r\r\r\r\r\tdef \tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t( self\t\t, snake_case\t\t, snake_case ):\r\t\tlowercase = self.num_labels\r\t\tlowercase = FlaxRegNetForImageClassification(config=snake_case )\r\t\tlowercase = model(snake_case )\r\t\tself.parent.assertEqual(result.logits.shape\t\t, (self.batch_size, self.num_labels) )\r\r\r\r\r\r\tdef \tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t( self ):\r\t\tlowercase = self.prepare_config_and_inputs()\r\t\tlowercase ,\t\t\t\t\tlowercase = config_and_inputs\r\t\tlowercase = {'pixel_values': pixel_values}\r\t\treturn config, inputs_dict\r\r\r\r\r\r\r\r@require_flax\rclass A_ (\t\t\t__lowerCamelCase ,\tunittest.TestCase ):\r\r\r\r\r\t'''simple docstring'''\r\r\r\t_UpperCamelCase : Dict\t\t\t\t\t\t\t = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()\r\r\t_UpperCamelCase : str\t\t\t\t\t\t\t = False\r\t_UpperCamelCase : str\t\t\t\t\t\t\t = False\r\t_UpperCamelCase : Tuple\t\t\t\t\t\t\t = False\r\r\r\r\r\r\tdef \tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t( self ):\r\t\tlowercase = FlaxRegNetModelTester(self )\r\t\tlowercase = ConfigTester(self\t\t, config_class=snake_case\t\t, has_text_modality=snake_case )\r\r\r\r\r\r\tdef \tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t( self ):\r\t\tself.create_and_test_config_common_properties()\r\t\tself.config_tester.create_and_test_config_to_json_string()\r\t\tself.config_tester.create_and_test_config_to_json_file()\r\t\tself.config_tester.create_and_test_config_from_and_save_pretrained()\r\t\tself.config_tester.create_and_test_config_with_num_labels()\r\t\tself.config_tester.check_config_can_be_init_without_params()\r\t\tself.config_tester.check_config_arguments_init()\r\r\r\r\r\r\tdef \tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t( self ):\r\t\treturn\r\r\r\r\r\r\tdef \tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t( self ):\r\t\tlowercase = self.model_tester.prepare_config_and_inputs()\r\t\tself.model_tester.create_and_check_model(*snake_case )\r\r\r\r\r\r\tdef \tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t( self ):\r\t\tlowercase = self.model_tester.prepare_config_and_inputs()\r\t\tself.model_tester.create_and_check_for_image_classification(*snake_case )\r\r\r\r\r\r\t@unittest.skip(reason='RegNet does not use inputs_embeds' )\r\tdef \tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t( self ):\r\t\tpass\r\r\r\r\r\r\t@unittest.skip(reason='RegNet does not support input and output embeddings' )\r\tdef \tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t( self ):\r\t\tpass\r\r\r\r\r\r\tdef \tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t( self ):\r\t\tlowercase ,\t\t\t\t\tlowercase = self.model_tester.prepare_config_and_inputs_for_common()\r\r\t\tfor model_class in self.all_model_classes:\r\t\t\tlowercase = model_class(snake_case )\r\t\t\tlowercase = inspect.signature(model.__call__ )\r\t\t\t# signature.parameters is an OrderedDict => so arg_names order is deterministic\r\t\t\tlowercase = [*signature.parameters.keys()]\r\r\t\t\tlowercase = ['pixel_values']\r\t\t\tself.assertListEqual(arg_names[:1]\t\t, snake_case )\r\r\r\r\r\r\tdef \tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t( self ):\r\t\tdef check_hidden_states_output(snake_case\t\t, snake_case\t\t, snake_case ):\r\t\t\tlowercase = model_class(snake_case )\r\r\t\t\tlowercase = model(**self._prepare_for_class(snake_case\t\t, snake_case ) )\r\r\t\t\tlowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states\r\r\t\t\tlowercase = self.model_tester.num_stages\r\t\t\tself.assertEqual(len(snake_case )\t\t, expected_num_stages + 1 )\r\r\t\tlowercase ,\t\t\t\t\tlowercase = self.model_tester.prepare_config_and_inputs_for_common()\r\r\t\tfor model_class in self.all_model_classes:\r\t\t\tlowercase = True\r\t\t\tcheck_hidden_states_output(snake_case\t\t, snake_case\t\t, snake_case )\r\r\t\t\t# check that output_hidden_states also work using config\r\t\t\tdel inputs_dict[\"output_hidden_states\"]\r\t\t\tlowercase = True\r\r\t\t\tcheck_hidden_states_output(snake_case\t\t, snake_case\t\t, snake_case )\r\r\r\r\r\r\tdef \tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t( self ):\r\t\tlowercase ,\t\t\t\t\tlowercase = self.model_tester.prepare_config_and_inputs_for_common()\r\r\t\tfor model_class in self.all_model_classes:\r\t\t\twith self.subTest(model_class.__name__ ):\r\t\t\t\tlowercase = self._prepare_for_class(snake_case\t\t, snake_case )\r\t\t\t\tlowercase = model_class(snake_case )\r\r\t\t\t\t@jax.jit\r\t\t\t\tdef model_jitted(snake_case\t\t, **snake_case ):\r\t\t\t\t\treturn model(pixel_values=snake_case\t\t, **snake_case )\r\r\t\t\t\twith self.subTest('JIT Enabled' ):\r\t\t\t\t\tlowercase = model_jitted(**snake_case ).to_tuple()\r\r\t\t\t\twith self.subTest('JIT Disabled' ):\r\t\t\t\t\twith jax.disable_jit():\r\t\t\t\t\t\tlowercase = model_jitted(**snake_case ).to_tuple()\r\r\t\t\t\tself.assertEqual(len(snake_case )\t\t, len(snake_case ) )\r\t\t\t\tfor jitted_output, output in zip(snake_case\t\t, snake_case ):\r\t\t\t\t\tself.assertEqual(jitted_output.shape\t\t, output.shape )\r\r\r\r\r\r\rdef \t\t\t\t\t\t\tUpperCAmelCase_\t\t(\t\t\t\t\t\t\t):\r\tlowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png'\t\t\t\t\t\t\t)\r\treturn image\r\r\r\r\r\r\r\r@require_flax\rclass A_ (\t\t\tunittest.TestCase ):\r\r\r\r\r\t'''simple docstring'''\r\r\r\t@cached_property\r\tdef \tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t( self ):\r\t\treturn AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None\r\r\r\r\r\r\t@slow\r\tdef \tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t( self ):\r\t\tlowercase = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )\r\r\t\tlowercase = self.default_image_processor\r\t\tlowercase = prepare_img()\r\t\tlowercase = image_processor(images=snake_case\t\t, return_tensors='np' )\r\r\t\tlowercase = model(**snake_case )\r\r\t\t# verify the logits\r\t\tlowercase = (1, 1000)\r\t\tself.assertEqual(outputs.logits.shape\t\t, snake_case )\r\r\t\tlowercase = jnp.array([-0.4_180, -1.5_051, -3.4_836] )\r\r\t\tself.assertTrue(jnp.allclose(outputs.logits[0, :3]\t\t, snake_case\t\t, atol=1E-4 ) )\r\r"},"code_codestyle":{"kind":"number","value":195,"string":"195"},"style_context":{"kind":"string","value":"\rimport argparse\rimport os\rfrom io import BytesIO\rfrom pathlib import Path\r\rimport requests\rfrom clip_retrieval.clip_client import ClipClient\rfrom PIL import Image\rfrom tqdm import tqdm\r\r\r\r\r\r\rdef \t\t\t\t\t\t\tUpperCAmelCase_\t\t(\t\t\t\t\t\t\t__SCREAMING_SNAKE_CASE ,\t\t\t\t\t\t\t__SCREAMING_SNAKE_CASE ,\t\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t):\r\tlowercase = 1.5\r\tlowercase = int(factor * num_class_images\t\t\t\t\t\t\t)\r\tlowercase = ClipClient(\r\t url='https://knn.laion.ai/knn-service' ,\t\t\t\t\t\t\tindice_name='laion_400m' ,\t\t\t\t\t\t\tnum_images=__SCREAMING_SNAKE_CASE ,\t\t\t\t\t\t\taesthetic_weight=0.1\t\t\t\t\t\t\t)\r\r\tos.makedirs(F'''{class_data_dir}/images''' ,\t\t\t\t\t\t\texist_ok=__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\tif len(list(Path(F'''{class_data_dir}/images'''\t\t\t\t\t\t\t).iterdir()\t\t\t\t\t\t\t)\t\t\t\t\t\t\t) >= num_class_images:\r\t\treturn\r\r\twhile True:\r\t\tlowercase = client.query(text=__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\t\tif len(__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t) >= factor * num_class_images or num_images > 1e4:\r\t\t\tbreak\r\t\telse:\r\t\t\tlowercase = int(factor * num_images\t\t\t\t\t\t\t)\r\t\t\tlowercase = ClipClient(\r\t\t\t url='https://knn.laion.ai/knn-service' ,\t\t\t\t\t\t\tindice_name='laion_400m' ,\t\t\t\t\t\t\tnum_images=__SCREAMING_SNAKE_CASE ,\t\t\t\t\t\t\taesthetic_weight=0.1 ,\t\t\t\t\t\t\t)\r\r\tlowercase = 0\r\tlowercase = 0\r\tlowercase = tqdm(desc='downloading real regularization images' ,\t\t\t\t\t\t\ttotal=__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\r\twith open(F'''{class_data_dir}/caption.txt''' ,\t\t\t\t\t\t\t'w'\t\t\t\t\t\t\t) as fa, open(F'''{class_data_dir}/urls.txt''' ,\t\t\t\t\t\t\t'w'\t\t\t\t\t\t\t) as fa, open(\r\t F'''{class_data_dir}/images.txt''' ,\t\t\t\t\t\t\t'w'\t\t\t\t\t\t\t) as fa:\r\t\twhile total < num_class_images:\r\t\t\tlowercase = class_images[count]\r\t\t\tcount += 1\r\t\t\ttry:\r\t\t\t\tlowercase = requests.get(images['url']\t\t\t\t\t\t\t)\r\t\t\t\tif img.status_code == 200:\r\t\t\t\t\tlowercase = Image.open(BytesIO(img.content\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\t\t\t\t\twith open(F'''{class_data_dir}/images/{total}.jpg''' ,\t\t\t\t\t\t\t'wb'\t\t\t\t\t\t\t) as f:\r\t\t\t\t\t\tf.write(img.content\t\t\t\t\t\t\t)\r\t\t\t\t\tfa.write(images['caption'] + '\\n'\t\t\t\t\t\t\t)\r\t\t\t\t\tfa.write(images['url'] + '\\n'\t\t\t\t\t\t\t)\r\t\t\t\t\tfa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\\n'\t\t\t\t\t\t\t)\r\t\t\t\t\ttotal += 1\r\t\t\t\t\tpbar.update(1\t\t\t\t\t\t\t)\r\t\t\t\telse:\r\t\t\t\t\tcontinue\r\t\t\texcept Exception:\r\t\t\t\tcontinue\r\treturn\r\r\r\r\r\r\rdef \t\t\t\t\t\t\tUpperCAmelCase_\t\t(\t\t\t\t\t\t\t):\r\tlowercase = argparse.ArgumentParser('' ,\t\t\t\t\t\t\tadd_help=__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\tparser.add_argument('--class_prompt' ,\t\t\t\t\t\t\thelp='text prompt to retrieve images' ,\t\t\t\t\t\t\trequired=__SCREAMING_SNAKE_CASE ,\t\t\t\t\t\t\ttype=__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\tparser.add_argument('--class_data_dir' ,\t\t\t\t\t\t\thelp='path to save images' ,\t\t\t\t\t\t\trequired=__SCREAMING_SNAKE_CASE ,\t\t\t\t\t\t\ttype=__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\tparser.add_argument('--num_class_images' ,\t\t\t\t\t\t\thelp='number of images to download' ,\t\t\t\t\t\t\tdefault=200 ,\t\t\t\t\t\t\ttype=__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\treturn parser.parse_args()\r\r\rif __name__ == \"__main__\":\r\tUpperCAmelCase\t = parse_args()\r\tretrieve(args.class_prompt, args.class_data_dir, args.num_class_images)\r\r"},"style_context_codestyle":{"kind":"number","value":195,"string":"195"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":835,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\nimport queue\r\n\r\nclass A__ :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n def __init__(\tself , __snake_case ):\r\n snake_case\t\t\t\t\t= data\r\n snake_case\t\t\t\t\t= None\r\n snake_case\t\t\t\t\t= None\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef UpperCAmelCase__\t\t\t\t\t\t():\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n print('''\\n********Press N to stop entering at any point of time********\\n''' )\r\n snake_case\t\t\t\t\t= input('''Enter the value of the root node: ''' ).strip().lower()\r\n snake_case\t\t\t\t\t= queue.Queue()\r\n snake_case\t\t\t\t\t= TreeNode(int(UpperCamelCase_ ) )\r\n q.put(UpperCamelCase_ )\r\n while not q.empty():\r\n snake_case\t\t\t\t\t= q.get()\r\n snake_case\t\t\t\t\t= F'''Enter the left node of {node_found.data}: '''\r\n snake_case\t\t\t\t\t= input(UpperCamelCase_ ).strip().lower() or '''n'''\r\n if check == \"n\":\r\n return tree_node\r\n snake_case\t\t\t\t\t= TreeNode(int(UpperCamelCase_ ) )\r\n snake_case\t\t\t\t\t= left_node\r\n q.put(UpperCamelCase_ )\r\n snake_case\t\t\t\t\t= F'''Enter the right node of {node_found.data}: '''\r\n snake_case\t\t\t\t\t= input(UpperCamelCase_ ).strip().lower() or '''n'''\r\n if check == \"n\":\r\n return tree_node\r\n snake_case\t\t\t\t\t= TreeNode(int(UpperCamelCase_ ) )\r\n snake_case\t\t\t\t\t= right_node\r\n q.put(UpperCamelCase_ )\r\n raise\r\ndef UpperCAmelCase__\t\t\t\t\t\t(UpperCamelCase_ ):\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n if not isinstance(UpperCamelCase_\t\t\t\t\t\t\t,UpperCamelCase_ ) or not node:\r\n return\r\n print(node.data\t\t\t\t\t\t\t,end=''',''' )\r\n pre_order(node.left )\r\n pre_order(node.right )\r\ndef UpperCAmelCase__\t\t\t\t\t\t(UpperCamelCase_ ):\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n if not isinstance(UpperCamelCase_\t\t\t\t\t\t\t,UpperCamelCase_ ) or not node:\r\n return\r\n in_order(node.left )\r\n print(node.data\t\t\t\t\t\t\t,end=''',''' )\r\n in_order(node.right )\r\ndef UpperCAmelCase__\t\t\t\t\t\t(UpperCamelCase_ ):\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n if not isinstance(UpperCamelCase_\t\t\t\t\t\t\t,UpperCamelCase_ ) or not node:\r\n return\r\n post_order(node.left )\r\n post_order(node.right )\r\n print(node.data\t\t\t\t\t\t\t,end=''',''' )\r\ndef UpperCAmelCase__\t\t\t\t\t\t(UpperCamelCase_ ):\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n if not isinstance(UpperCamelCase_\t\t\t\t\t\t\t,UpperCamelCase_ ) or not node:\r\n return\r\n snake_case\t\t\t\t\t= queue.Queue()\r\n q.put(UpperCamelCase_ )\r\n while not q.empty():\r\n snake_case\t\t\t\t\t= q.get()\r\n print(node_dequeued.data\t\t\t\t\t\t\t,end=''',''' )\r\n if node_dequeued.left:\r\n q.put(node_dequeued.left )\r\n if node_dequeued.right:\r\n q.put(node_dequeued.right )\r\ndef UpperCAmelCase__\t\t\t\t\t\t(UpperCamelCase_ ):\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n if not isinstance(UpperCamelCase_\t\t\t\t\t\t\t,UpperCamelCase_ ) or not node:\r\n return\r\n snake_case\t\t\t\t\t= queue.Queue()\r\n q.put(UpperCamelCase_ )\r\n while not q.empty():\r\n snake_case\t\t\t\t\t= []\r\n while not q.empty():\r\n snake_case\t\t\t\t\t= q.get()\r\n print(node_dequeued.data\t\t\t\t\t\t\t,end=''',''' )\r\n if node_dequeued.left:\r\n list_.append(node_dequeued.left )\r\n if node_dequeued.right:\r\n list_.append(node_dequeued.right )\r\n print()\r\n for node in list_:\r\n q.put(UpperCamelCase_ )\r\ndef UpperCAmelCase__\t\t\t\t\t\t(UpperCamelCase_ ):\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n if not isinstance(UpperCamelCase_\t\t\t\t\t\t\t,UpperCamelCase_ ) or not node:\r\n return\r\n snake_case\t\t\t\t\t= []\r\n snake_case\t\t\t\t\t= node\r\n while n or stack:\r\n while n: # start from root node, find its left child\r\n print(n.data\t\t\t\t\t\t\t,end=''',''' )\r\n stack.append(UpperCamelCase_ )\r\n snake_case\t\t\t\t\t= n.left\r\n # end of while means current node doesn't have left child\r\n snake_case\t\t\t\t\t= stack.pop()\r\n # start to traverse its right child\r\n snake_case\t\t\t\t\t= n.right\r\ndef UpperCAmelCase__\t\t\t\t\t\t(UpperCamelCase_ ):\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n if not isinstance(UpperCamelCase_\t\t\t\t\t\t\t,UpperCamelCase_ ) or not node:\r\n return\r\n snake_case\t\t\t\t\t= []\r\n snake_case\t\t\t\t\t= node\r\n while n or stack:\r\n while n:\r\n stack.append(UpperCamelCase_ )\r\n snake_case\t\t\t\t\t= n.left\r\n snake_case\t\t\t\t\t= stack.pop()\r\n print(n.data\t\t\t\t\t\t\t,end=''',''' )\r\n snake_case\t\t\t\t\t= n.right\r\ndef UpperCAmelCase__\t\t\t\t\t\t(UpperCamelCase_ ):\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n if not isinstance(UpperCamelCase_\t\t\t\t\t\t\t,UpperCamelCase_ ) or not node:\r\n return\r\n snake_case\t\t\t, snake_case\t\t\t\t\t= [], []\r\n snake_case\t\t\t\t\t= node\r\n stacka.append(UpperCamelCase_ )\r\n while stacka: # to find the reversed order of post order, store it in stack2\r\n snake_case\t\t\t\t\t= stacka.pop()\r\n if n.left:\r\n stacka.append(n.left )\r\n if n.right:\r\n stacka.append(n.right )\r\n stacka.append(UpperCamelCase_ )\r\n while stacka: # pop up from stack2 will be the post order\r\n print(stacka.pop().data\t\t\t\t\t\t\t,end=''',''' )\r\ndef UpperCAmelCase__\t\t\t\t\t\t(UpperCamelCase_ = \"\"\t\t\t\t\t\t\t,UpperCamelCase_=50\t\t\t\t\t\t\t,UpperCamelCase_=\"*\" ):\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n if not s:\r\n return \"\\n\" + width * char\r\n snake_case\t\t\t, snake_case\t\t\t\t\t= divmod(width - len(UpperCamelCase_ ) - 2\t\t\t\t\t\t\t,2 )\r\n return F'''{left * char} {s} {(left + extra) * char}'''\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import doctest\r\n\r\n doctest.testmod()\r\n print(prompt(\"Binary Tree Traversals\"))\r\n\r\n _SCREAMING_SNAKE_CASE :\t\t\tTreeNode \t\t\t\t= build_tree()\r\n print(prompt(\"Pre Order Traversal\"))\r\n pre_order(node)\r\n print(prompt() + \"\\n\")\r\n\r\n print(prompt(\"In Order Traversal\"))\r\n in_order(node)\r\n print(prompt() + \"\\n\")\r\n\r\n print(prompt(\"Post Order Traversal\"))\r\n post_order(node)\r\n print(prompt() + \"\\n\")\r\n\r\n print(prompt(\"Level Order Traversal\"))\r\n level_order(node)\r\n print(prompt() + \"\\n\")\r\n\r\n print(prompt(\"Actual Level Order Traversal\"))\r\n level_order_actual(node)\r\n print(\"*\" * 50 + \"\\n\")\r\n\r\n print(prompt(\"Pre Order Traversal - Iteration Version\"))\r\n pre_order_iter(node)\r\n print(prompt() + \"\\n\")\r\n\r\n print(prompt(\"In Order Traversal - Iteration Version\"))\r\n in_order_iter(node)\r\n print(prompt() + \"\\n\")\r\n\r\n print(prompt(\"Post Order Traversal - Iteration Version\"))\r\n post_order_iter(node)\r\n print(prompt())\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":213,"string":"213"},"style_context":{"kind":"string","value":"\r\n\r\n\r\nimport random\r\nimport unittest\r\n\r\nimport numpy as np\r\n\r\nfrom diffusers import (\r\n DPMSolverMultistepScheduler,\r\n EulerAncestralDiscreteScheduler,\r\n EulerDiscreteScheduler,\r\n LMSDiscreteScheduler,\r\n OnnxStableDiffusionImgaImgPipeline,\r\n PNDMScheduler,\r\n)\r\nfrom diffusers.utils import floats_tensor\r\nfrom diffusers.utils.testing_utils import (\r\n is_onnx_available,\r\n load_image,\r\n nightly,\r\n require_onnxruntime,\r\n require_torch_gpu,\r\n)\r\n\r\nfrom ..test_pipelines_onnx_common import OnnxPipelineTesterMixin\r\n\r\n\r\nif is_onnx_available():\r\n import onnxruntime as ort\r\n\r\nclass A__ ( snake_case__\t, unittest.TestCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n __magic_name__ =\t\t'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'\r\n def \t\ta_ (\tself , __snake_case=0 ):\r\n snake_case\t\t\t\t\t= floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__snake_case ) )\r\n snake_case\t\t\t\t\t= np.random.RandomState(__snake_case )\r\n snake_case\t\t\t\t\t= {\r\n '''prompt''': '''A painting of a squirrel eating a burger''',\r\n '''image''': image,\r\n '''generator''': generator,\r\n '''num_inference_steps''': 3,\r\n '''strength''': 0.75,\r\n '''guidance_scale''': 7.5,\r\n '''output_type''': '''numpy''',\r\n }\r\n return inputs\r\n def \t\ta_ (\tself ):\r\n snake_case\t\t\t\t\t= OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )\r\n pipe.set_progress_bar_config(disable=__snake_case )\r\n\r\n snake_case\t\t\t\t\t= self.get_dummy_inputs()\r\n snake_case\t\t\t\t\t= pipe(**__snake_case ).images\r\n snake_case\t\t\t\t\t= image[0, -3:, -3:, -1].flatten()\r\n\r\n assert image.shape == (1, 1_2_8, 1_2_8, 3)\r\n snake_case\t\t\t\t\t= np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] )\r\n assert np.abs(image_slice - expected_slice ).max() < 1E-1\r\n def \t\ta_ (\tself ):\r\n snake_case\t\t\t\t\t= OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )\r\n snake_case\t\t\t\t\t= PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__snake_case )\r\n pipe.set_progress_bar_config(disable=__snake_case )\r\n\r\n snake_case\t\t\t\t\t= self.get_dummy_inputs()\r\n snake_case\t\t\t\t\t= pipe(**__snake_case ).images\r\n snake_case\t\t\t\t\t= image[0, -3:, -3:, -1]\r\n\r\n assert image.shape == (1, 1_2_8, 1_2_8, 3)\r\n snake_case\t\t\t\t\t= np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] )\r\n\r\n assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1\r\n def \t\ta_ (\tself ):\r\n snake_case\t\t\t\t\t= OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )\r\n snake_case\t\t\t\t\t= LMSDiscreteScheduler.from_config(pipe.scheduler.config )\r\n pipe.set_progress_bar_config(disable=__snake_case )\r\n\r\n # warmup pass to apply optimizations\r\n snake_case\t\t\t\t\t= pipe(**self.get_dummy_inputs() )\r\n\r\n snake_case\t\t\t\t\t= self.get_dummy_inputs()\r\n snake_case\t\t\t\t\t= pipe(**__snake_case ).images\r\n snake_case\t\t\t\t\t= image[0, -3:, -3:, -1]\r\n\r\n assert image.shape == (1, 1_2_8, 1_2_8, 3)\r\n snake_case\t\t\t\t\t= np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] )\r\n\r\n assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1\r\n def \t\ta_ (\tself ):\r\n snake_case\t\t\t\t\t= OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )\r\n snake_case\t\t\t\t\t= EulerDiscreteScheduler.from_config(pipe.scheduler.config )\r\n pipe.set_progress_bar_config(disable=__snake_case )\r\n\r\n snake_case\t\t\t\t\t= self.get_dummy_inputs()\r\n snake_case\t\t\t\t\t= pipe(**__snake_case ).images\r\n snake_case\t\t\t\t\t= image[0, -3:, -3:, -1]\r\n\r\n assert image.shape == (1, 1_2_8, 1_2_8, 3)\r\n snake_case\t\t\t\t\t= np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )\r\n\r\n assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1\r\n def \t\ta_ (\tself ):\r\n snake_case\t\t\t\t\t= OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )\r\n snake_case\t\t\t\t\t= EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )\r\n pipe.set_progress_bar_config(disable=__snake_case )\r\n\r\n snake_case\t\t\t\t\t= self.get_dummy_inputs()\r\n snake_case\t\t\t\t\t= pipe(**__snake_case ).images\r\n snake_case\t\t\t\t\t= image[0, -3:, -3:, -1]\r\n\r\n assert image.shape == (1, 1_2_8, 1_2_8, 3)\r\n snake_case\t\t\t\t\t= np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )\r\n\r\n assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1\r\n\r\n\r\n\r\n\r\n def \t\ta_ (\tself ):\r\n snake_case\t\t\t\t\t= OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )\r\n snake_case\t\t\t\t\t= DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )\r\n pipe.set_progress_bar_config(disable=__snake_case )\r\n\r\n snake_case\t\t\t\t\t= self.get_dummy_inputs()\r\n snake_case\t\t\t\t\t= pipe(**__snake_case ).images\r\n snake_case\t\t\t\t\t= image[0, -3:, -3:, -1]\r\n\r\n assert image.shape == (1, 1_2_8, 1_2_8, 3)\r\n snake_case\t\t\t\t\t= np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] )\r\n\r\n assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1\r\n\r\n\r\n\r\n\r\n\r\n\r\n@nightly\r\n@require_onnxruntime\r\n@require_torch_gpu\r\nclass A__ ( unittest.TestCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n @property\r\n def \t\ta_ (\tself ):\r\n return (\r\n \"CUDAExecutionProvider\",\r\n {\r\n \"gpu_mem_limit\": \"15000000000\", # 15GB\r\n \"arena_extend_strategy\": \"kSameAsRequested\",\r\n },\r\n )\r\n @property\r\n def \t\ta_ (\tself ):\r\n snake_case\t\t\t\t\t= ort.SessionOptions()\r\n snake_case\t\t\t\t\t= False\r\n return options\r\n def \t\ta_ (\tself ):\r\n snake_case\t\t\t\t\t= load_image(\r\n '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''\r\n '''/img2img/sketch-mountains-input.jpg''' )\r\n snake_case\t\t\t\t\t= init_image.resize((7_6_8, 5_1_2) )\r\n # using the PNDM scheduler by default\r\n snake_case\t\t\t\t\t= OnnxStableDiffusionImgaImgPipeline.from_pretrained(\r\n '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )\r\n pipe.set_progress_bar_config(disable=__snake_case )\r\n\r\n snake_case\t\t\t\t\t= '''A fantasy landscape, trending on artstation'''\r\n\r\n snake_case\t\t\t\t\t= np.random.RandomState(0 )\r\n snake_case\t\t\t\t\t= pipe(\r\n prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__snake_case , output_type='''np''' , )\r\n snake_case\t\t\t\t\t= output.images\r\n snake_case\t\t\t\t\t= images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]\r\n\r\n assert images.shape == (1, 5_1_2, 7_6_8, 3)\r\n snake_case\t\t\t\t\t= np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )\r\n # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues\r\n\r\n assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2\r\n\r\n\r\n\r\n\r\n def \t\ta_ (\tself ):\r\n snake_case\t\t\t\t\t= load_image(\r\n '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''\r\n '''/img2img/sketch-mountains-input.jpg''' )\r\n snake_case\t\t\t\t\t= init_image.resize((7_6_8, 5_1_2) )\r\n snake_case\t\t\t\t\t= LMSDiscreteScheduler.from_pretrained(\r\n '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )\r\n snake_case\t\t\t\t\t= OnnxStableDiffusionImgaImgPipeline.from_pretrained(\r\n '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )\r\n pipe.set_progress_bar_config(disable=__snake_case )\r\n\r\n snake_case\t\t\t\t\t= '''A fantasy landscape, trending on artstation'''\r\n\r\n snake_case\t\t\t\t\t= np.random.RandomState(0 )\r\n snake_case\t\t\t\t\t= pipe(\r\n prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__snake_case , output_type='''np''' , )\r\n snake_case\t\t\t\t\t= output.images\r\n snake_case\t\t\t\t\t= images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]\r\n\r\n assert images.shape == (1, 5_1_2, 7_6_8, 3)\r\n snake_case\t\t\t\t\t= np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )\r\n # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues\r\n\r\n assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":213,"string":"213"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":836,"cells":{"code":{"kind":"string","value":"\n\n\n\nfrom typing import TYPE_CHECKING\n\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available\n\n\n__snake_case\t\t\t\t\t\t:str \t\t= {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}\n\ntry:\n if not is_vision_available():\n raise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n pass\nelse:\n __snake_case\t\t\t\t\t\t:List[Any] \t\t= ['''YolosFeatureExtractor''']\n __snake_case\t\t\t\t\t\t:Optional[Any] \t\t= ['''YolosImageProcessor''']\n\ntry:\n if not is_torch_available():\n raise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n pass\nelse:\n __snake_case\t\t\t\t\t\t:List[str] \t\t= [\n '''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',\n '''YolosForObjectDetection''',\n '''YolosModel''',\n '''YolosPreTrainedModel''',\n ]\n\n\nif TYPE_CHECKING:\n from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig\n\n try:\n if not is_vision_available():\n raise OptionalDependencyNotAvailable()\n except OptionalDependencyNotAvailable:\n pass\n else:\n from .feature_extraction_yolos import YolosFeatureExtractor\n from .image_processing_yolos import YolosImageProcessor\n\n try:\n if not is_torch_available():\n raise OptionalDependencyNotAvailable()\n except OptionalDependencyNotAvailable:\n pass\n else:\n from .modeling_yolos import (\n YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,\n YolosForObjectDetection,\n YolosModel,\n YolosPreTrainedModel,\n )\n\n\nelse:\n import sys\n\n __snake_case\t\t\t\t\t\t:Dict \t\t= _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)\n\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":49,"string":"49"},"style_context":{"kind":"string","value":"\r\r\rimport numpy as np\rfrom scipy.spatial.distance import cdist\rfrom sklearn.metrics import fa_score\r\rimport datasets\r\r\r__lowerCAmelCase\t: Optional[int] \t\t\t\t='\\\\n @inproceedings{kakwani2020indicnlpsuite,\\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\\n year={2020},\\n booktitle={Findings of EMNLP},\\n}\\n'\r\r__lowerCAmelCase\t: Optional[Any] \t\t\t\t='\\\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\\n'\r\r__lowerCAmelCase\t: Dict \t\t\t\t='\\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\\nArgs:\\n predictions: list of predictions to score (as int64),\\n except for \\'cvit-mkb-clsr\\' where each prediction is a vector (of float32).\\n references: list of ground truth labels corresponding to the predictions (as int64),\\n except for \\'cvit-mkb-clsr\\' where each reference is a vector (of float32).\\nReturns: depending on the IndicGLUE subset, one or several of:\\n \"accuracy\": Accuracy\\n \"f1\": F1 score\\n \"precision\": Precision@10\\nExamples:\\n\\n >>> indic_glue_metric = datasets.load_metric(\\'indic_glue\\', \\'wnli\\') # \\'wnli\\' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\\n >>> references = [0, 1]\\n >>> predictions = [0, 1]\\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\\n >>> print(results)\\n {\\'accuracy\\': 1.0}\\n\\n >>> indic_glue_metric = datasets.load_metric(\\'indic_glue\\', \\'wiki-ner\\')\\n >>> references = [0, 1]\\n >>> predictions = [0, 1]\\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\\n >>> print(results)\\n {\\'accuracy\\': 1.0, \\'f1\\': 1.0}\\n\\n >>> indic_glue_metric = datasets.load_metric(\\'indic_glue\\', \\'cvit-mkb-clsr\\')\\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\\n >>> print(results)\\n {\\'precision@10\\': 1.0}\\n\\n'\r\r\rdef _UpperCamelCase\t\t\t\t\t\t\t( lowercase__ , lowercase__ ):\r\t\t\t\t\t\treturn float((preds == labels).mean() )\r\r\rdef _UpperCamelCase\t\t\t\t\t\t\t( lowercase__ , lowercase__ ):\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t: Optional[Any]\t\t\t\t\t=\t\tsimple_accuracy(lowercase__ , lowercase__ )\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t: List[str]\t\t\t\t\t=\t\tfloat(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) )\r\t\t\t\t\t\treturn {\r\t\t\t\t\t\t \"accuracy\": acc,\r\t\t\t\t\t\t \"f1\": fa,\r\t\t\t\t\t\t}\r\r\rdef _UpperCamelCase\t\t\t\t\t\t\t( lowercase__ , lowercase__ ):\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t: Optional[int]\t\t\t\t\t=\t\tnp.array(lowercase__ )\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t: str\t\t\t\t\t=\t\tnp.array(lowercase__ )\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t: str\t\t\t\t\t=\t\ten_sentvecs.shape[0]\r\r\t\t\t\t\t\t# mean centering\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t: Tuple\t\t\t\t\t=\t\ten_sentvecs - np.mean(lowercase__ , axis=0 )\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t: Optional[int]\t\t\t\t\t=\t\tin_sentvecs - np.mean(lowercase__ , axis=0 )\r\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t: str\t\t\t\t\t=\t\tcdist(lowercase__ , lowercase__ , '''cosine''' )\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t: int\t\t\t\t\t=\t\tnp.array(range(lowercase__ ) )\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t: Optional[Any]\t\t\t\t\t=\t\tsim.argsort(axis=1 )[:, :10]\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t: str\t\t\t\t\t=\t\tnp.any(preds == actual[:, None] , axis=1 )\r\t\t\t\t\t\treturn float(matches.mean() )\r@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION\t\t\t\t\t\t,\t\t\t_KWARGS_DESCRIPTION )\rclass _lowercase (\t\t\tdatasets.Metric ):\r\r\r\r\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\r\t\t\t\tdef \t\t__magic_name__( self\t\t\t:Tuple\t)\t\t->\t\t\tTuple:\r\t\t\t\t\t\t\t\t\t\tif self.config_name not in [\r\t\t\t\t\t\t\t\t\t\t \"wnli\",\r\t\t\t\t\t\t\t\t\t\t \"copa\",\r\t\t\t\t\t\t\t\t\t\t \"sna\",\r\t\t\t\t\t\t\t\t\t\t \"csqa\",\r\t\t\t\t\t\t\t\t\t\t \"wstp\",\r\t\t\t\t\t\t\t\t\t\t \"inltkh\",\r\t\t\t\t\t\t\t\t\t\t \"bbca\",\r\t\t\t\t\t\t\t\t\t\t \"cvit-mkb-clsr\",\r\t\t\t\t\t\t\t\t\t\t \"iitp-mr\",\r\t\t\t\t\t\t\t\t\t\t \"iitp-pr\",\r\t\t\t\t\t\t\t\t\t\t \"actsa-sc\",\r\t\t\t\t\t\t\t\t\t\t \"md\",\r\t\t\t\t\t\t\t\t\t\t \"wiki-ner\",\r\t\t\t\t\t\t\t\t\t\t]:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise KeyError(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t '''You should supply a configuration name selected in '''\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t '''[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", '''\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t '''\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", '''\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t '''\"wiki-ner\"]'''\t)\r\t\t\t\t\t\t\t\t\t\treturn datasets.MetricInfo(\r\t\t\t\t\t\t\t\t\t\t description=_DESCRIPTION\t\t\t,\t\t\tcitation=_CITATION\t\t\t,\t\t\tinputs_description=_KWARGS_DESCRIPTION\t\t\t,\t\t\tfeatures=datasets.Features(\r\t\t\t\t\t\t\t\t\t\t {\r\t\t\t\t\t\t\t\t\t\t '''predictions''': datasets.Value('''int64'''\t)\r\t\t\t\t\t\t\t\t\t\t if self.config_name != '''cvit-mkb-clsr'''\r\t\t\t\t\t\t\t\t\t\t else datasets.Sequence(datasets.Value('''float32'''\t)\t),\r\t\t\t\t\t\t\t\t\t\t '''references''': datasets.Value('''int64'''\t)\r\t\t\t\t\t\t\t\t\t\t if self.config_name != '''cvit-mkb-clsr'''\r\t\t\t\t\t\t\t\t\t\t else datasets.Sequence(datasets.Value('''float32'''\t)\t),\r\t\t\t\t\t\t\t\t\t\t }\t)\t\t\t,\t\t\tcodebase_urls=[]\t\t\t,\t\t\treference_urls=[]\t\t\t,\t\t\tformat='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None\t\t\t,\t\t\t)\r\r\r\r\t\t\t\tdef \t\t__magic_name__( self\t\t\t:List[str]\t\t\t,\t\t\tlowerCAmelCase__\t\t\t:Optional[Any]\t\t\t,\t\t\tlowerCAmelCase__\t\t\t:Tuple\t)\t\t->\t\t\tstr:\r\t\t\t\t\t\t\t\t\t\tif self.config_name == \"cvit-mkb-clsr\":\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn {\"precision@10\": precision_at_aa(lowerCAmelCase__\t\t\t,\t\t\tlowerCAmelCase__\t)}\r\t\t\t\t\t\t\t\t\t\telif self.config_name in [\"wiki-ner\"]:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn acc_and_fa(lowerCAmelCase__\t\t\t,\t\t\tlowerCAmelCase__\t)\r\t\t\t\t\t\t\t\t\t\telif self.config_name in [\r\t\t\t\t\t\t\t\t\t\t \"wnli\",\r\t\t\t\t\t\t\t\t\t\t \"copa\",\r\t\t\t\t\t\t\t\t\t\t \"sna\",\r\t\t\t\t\t\t\t\t\t\t \"csqa\",\r\t\t\t\t\t\t\t\t\t\t \"wstp\",\r\t\t\t\t\t\t\t\t\t\t \"inltkh\",\r\t\t\t\t\t\t\t\t\t\t \"bbca\",\r\t\t\t\t\t\t\t\t\t\t \"iitp-mr\",\r\t\t\t\t\t\t\t\t\t\t \"iitp-pr\",\r\t\t\t\t\t\t\t\t\t\t \"actsa-sc\",\r\t\t\t\t\t\t\t\t\t\t \"md\",\r\t\t\t\t\t\t\t\t\t\t]:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn {\"accuracy\": simple_accuracy(lowerCAmelCase__\t\t\t,\t\t\tlowerCAmelCase__\t)}\r\t\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise KeyError(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t '''You should supply a configuration name selected in '''\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t '''[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", '''\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t '''\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", '''\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t '''\"wiki-ner\"]'''\t)\r"},"style_context_codestyle":{"kind":"number","value":9,"string":"9"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":837,"cells":{"code":{"kind":"string","value":"\r\r\r\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\rimport warnings\r\rfrom ...processing_utils import ProcessorMixin\rfrom ...tokenization_utils_base import BatchEncoding\r\rclass \t__SCREAMING_SNAKE_CASE (\t\t\t\tlowerCAmelCase_ ):\r\r\r\t\t\t\t\t'''simple docstring'''\r\r\r\r\r\r\t\t\t\t\t_a =\t\t\t\t\t\t\t['image_processor', 'tokenizer']\r\t\t\t\t\t_a =\t\t\t\t\t\t\t'CLIPImageProcessor'\r\t\t\t\t\t_a =\t\t\t\t\t\t\t('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')\r\r\r\r\t\t\t\t\tdef __init__(\tself\t\t\t\t\t\t\t: List[str], lowerCamelCase\t\t\t\t\t\t\t: Dict=None, lowerCamelCase\t\t\t\t\t\t\t: str=None, **lowerCamelCase\t\t\t\t\t\t\t: int\t\t\t\t\t)-> Tuple:\r\t\t\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tList[str]\t\t =None\r\t\t\t\t\t\t\t\tif \"feature_extractor\" in kwargs:\r\t\t\t\t\t\t\t\t\t\t\twarnings.warn(\r\t\t\t\t\t\t\t\t\t\t\t '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''\r\t\t\t\t\t\t\t\t\t\t\t ''' instead.''', lowerCamelCase, )\r\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tOptional[Any]\t\t =kwargs.pop('''feature_extractor'''\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tOptional[int]\t\t =image_processor if image_processor is not None else feature_extractor\r\t\t\t\t\t\t\t\tif image_processor is None:\r\t\t\t\t\t\t\t\t\t\t\traise ValueError('''You need to specify an `image_processor`.'''\t\t\t\t\t)\r\t\t\t\t\t\t\t\tif tokenizer is None:\r\t\t\t\t\t\t\t\t\t\t\traise ValueError('''You need to specify a `tokenizer`.'''\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\tsuper().__init__(lowerCamelCase, lowerCamelCase\t\t\t\t\t)\r\r\r\r\t\t\t\t\tdef __call__(\tself\t\t\t\t\t\t\t: Any, lowerCamelCase\t\t\t\t\t\t\t: Tuple=None, lowerCamelCase\t\t\t\t\t\t\t: Any=None, lowerCamelCase\t\t\t\t\t\t\t: Any=None, **lowerCamelCase\t\t\t\t\t\t\t: List[Any]\t\t\t\t\t)-> Optional[int]:\r\r\t\t\t\t\t\t\t\tif text is None and images is None:\r\t\t\t\t\t\t\t\t\t\t\traise ValueError('''You have to specify either text or images. Both cannot be none.'''\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\tif text is not None:\r\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tint\t\t =self.tokenizer(lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\tif images is not None:\r\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tList[str]\t\t =self.image_processor(lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\tif text is not None and images is not None:\r\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tList[Any]\t\t =image_features.pixel_values\r\t\t\t\t\t\t\t\t\t\t\treturn encoding\r\t\t\t\t\t\t\t\telif text is not None:\r\t\t\t\t\t\t\t\t\t\t\treturn encoding\r\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\treturn BatchEncoding(data=dict(**lowerCamelCase\t\t\t\t\t), tensor_type=lowerCamelCase\t\t\t\t\t)\r\r\r\r\t\t\t\t\tdef \t\tsnake_case\t(\tself\t\t\t\t\t\t\t: str, *lowerCamelCase\t\t\t\t\t\t\t: Any, **lowerCamelCase\t\t\t\t\t\t\t: List[str]\t\t\t\t\t)-> int:\r\t\t\t\t\t\t\t\treturn self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase\t\t\t\t\t)\r\r\r\r\t\t\t\t\tdef \t\tsnake_case\t(\tself\t\t\t\t\t\t\t: List[str], *lowerCamelCase\t\t\t\t\t\t\t: Tuple, **lowerCamelCase\t\t\t\t\t\t\t: Optional[int]\t\t\t\t\t)-> Any:\r\t\t\t\t\t\t\t\treturn self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase\t\t\t\t\t)\r\r\r\r\r\t\t\t\t\t@property\r\t\t\t\t\tdef \t\tsnake_case\t(\tself\t\t\t\t\t\t\t: List[str]\t\t\t\t\t)-> Optional[int]:\r\t\t\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tAny\t\t =self.tokenizer.model_input_names\r\t\t\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tList[Any]\t\t =self.image_processor.model_input_names\r\t\t\t\t\t\t\t\treturn list(dict.fromkeys(tokenizer_input_names + image_processor_input_names\t\t\t\t\t)\t\t\t\t\t)\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":272,"string":"272"},"style_context":{"kind":"string","value":"\r\r\r\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\rimport argparse\r\rimport torch\r\rfrom transformers import YosoConfig, YosoForMaskedLM\r\r\r\r\r\r\rdef \t\t\t\t\tsnake_case__ (\t\t__lowerCamelCase : str ):\r\t\t\t\"\"\"simple docstring\"\"\"\r\t\t\tif \"model\" in orig_key:\r\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tOptional[int]\t\t =orig_key.replace('''model.'''\t\t\t\t\t\t\t,\t\t'''''' )\r\t\t\tif \"norm1\" in orig_key:\r\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tUnion[str, Any]\t\t =orig_key.replace('''norm1'''\t\t\t\t\t\t\t,\t\t'''attention.output.LayerNorm''' )\r\t\t\tif \"norm2\" in orig_key:\r\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tList[Any]\t\t =orig_key.replace('''norm2'''\t\t\t\t\t\t\t,\t\t'''output.LayerNorm''' )\r\t\t\tif \"norm\" in orig_key:\r\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tList[str]\t\t =orig_key.replace('''norm'''\t\t\t\t\t\t\t,\t\t'''LayerNorm''' )\r\t\t\tif \"transformer\" in orig_key:\r\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tstr\t\t =orig_key.split('''.''' )[0].split('''_''' )[-1]\r\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tDict\t\t =orig_key.replace(f'''transformer_{layer_num}'''\t\t\t\t\t\t\t,\t\tf'''encoder.layer.{layer_num}''' )\r\t\t\tif \"mha.attn\" in orig_key:\r\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tUnion[str, Any]\t\t =orig_key.replace('''mha.attn'''\t\t\t\t\t\t\t,\t\t'''attention.self''' )\r\t\t\tif \"mha\" in orig_key:\r\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tstr\t\t =orig_key.replace('''mha'''\t\t\t\t\t\t\t,\t\t'''attention''' )\r\t\t\tif \"W_q\" in orig_key:\r\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tUnion[str, Any]\t\t =orig_key.replace('''W_q'''\t\t\t\t\t\t\t,\t\t'''self.query''' )\r\t\t\tif \"W_k\" in orig_key:\r\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tOptional[int]\t\t =orig_key.replace('''W_k'''\t\t\t\t\t\t\t,\t\t'''self.key''' )\r\t\t\tif \"W_v\" in orig_key:\r\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tList[str]\t\t =orig_key.replace('''W_v'''\t\t\t\t\t\t\t,\t\t'''self.value''' )\r\t\t\tif \"ff1\" in orig_key:\r\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tDict\t\t =orig_key.replace('''ff1'''\t\t\t\t\t\t\t,\t\t'''intermediate.dense''' )\r\t\t\tif \"ff2\" in orig_key:\r\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tUnion[str, Any]\t\t =orig_key.replace('''ff2'''\t\t\t\t\t\t\t,\t\t'''output.dense''' )\r\t\t\tif \"ff\" in orig_key:\r\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tstr\t\t =orig_key.replace('''ff'''\t\t\t\t\t\t\t,\t\t'''output.dense''' )\r\t\t\tif \"mlm_class\" in orig_key:\r\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tTuple\t\t =orig_key.replace('''mlm.mlm_class'''\t\t\t\t\t\t\t,\t\t'''cls.predictions.decoder''' )\r\t\t\tif \"mlm\" in orig_key:\r\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tOptional[int]\t\t =orig_key.replace('''mlm'''\t\t\t\t\t\t\t,\t\t'''cls.predictions.transform''' )\r\t\t\tif \"cls\" not in orig_key:\r\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tOptional[int]\t\t ='''yoso.''' + orig_key\r\r\t\t\treturn orig_key\r\r\r\r\r\r\rdef \t\t\t\t\tsnake_case__ (\t\t__lowerCamelCase : List[str]\t\t\t\t\t\t\t,\t\t__lowerCamelCase : Any ):\r\t\t\t\"\"\"simple docstring\"\"\"\r\t\t\tfor key in orig_state_dict.copy().keys():\r\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tOptional[Any]\t\t =orig_state_dict.pop(__lowerCamelCase )\r\r\t\t\t\t\t\tif (\"pooler\" in key) or (\"sen_class\" in key):\r\t\t\t\t\t\t\t\t\tcontinue\r\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\tlowerCamelCase__\t\t:\t\tList[str]\t\t =val\r\r\t\t\tlowerCamelCase__\t\t:\t\tOptional[int]\t\t =orig_state_dict['''cls.predictions.decoder.bias''']\r\t\t\tlowerCamelCase__\t\t:\t\tstr\t\t =torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2\r\r\t\t\treturn orig_state_dict\r\r\r\r\r\r\rdef \t\t\t\t\tsnake_case__ (\t\t__lowerCamelCase : str\t\t\t\t\t\t\t,\t\t__lowerCamelCase : Tuple\t\t\t\t\t\t\t,\t\t__lowerCamelCase : Tuple ):\r\t\t\t\"\"\"simple docstring\"\"\"\r\t\t\tlowerCamelCase__\t\t:\t\tUnion[str, Any]\t\t =torch.load(__lowerCamelCase\t\t\t\t\t\t\t,\t\tmap_location='''cpu''' )['''model_state_dict''']\r\t\t\tlowerCamelCase__\t\t:\t\tList[Any]\t\t =YosoConfig.from_json_file(__lowerCamelCase )\r\t\t\tlowerCamelCase__\t\t:\t\tList[str]\t\t =YosoForMaskedLM(__lowerCamelCase )\r\r\t\t\tlowerCamelCase__\t\t:\t\tTuple\t\t =convert_checkpoint_helper(config.max_position_embeddings\t\t\t\t\t\t\t,\t\t__lowerCamelCase )\r\r\t\t\tprint(model.load_state_dict(__lowerCamelCase ) )\r\t\t\tmodel.eval()\r\t\t\tmodel.save_pretrained(__lowerCamelCase )\r\r\t\t\tprint(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )\r\r\rif __name__ == \"__main__\":\r\t\t_lowercase\t\t\t:\t\t\tint\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\targparse.ArgumentParser()\r\t\t# Required parameters\r\t\tparser.add_argument(\r\t\t \"--pytorch_model_path\", default=None, type=str, required=True, help=\"Path to YOSO pytorch checkpoint.\"\r\t\t)\r\t\tparser.add_argument(\r\t\t \"--config_file\",\r\t\t default=None,\r\t\t type=str,\r\t\t required=True,\r\t\t help=\"The json file for YOSO model config.\",\r\t\t)\r\t\tparser.add_argument(\r\t\t \"--pytorch_dump_path\", default=None, type=str, required=True, help=\"Path to the output PyTorch model.\"\r\t\t)\r\t\t_lowercase\t\t\t:\t\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tparser.parse_args()\r\t\tconvert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":272,"string":"272"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":838,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\nimport warnings\r\n\r\nfrom ...utils import logging\r\nfrom .image_processing_flava import FlavaImageProcessor\r\n\r\n\r\na_ \t\t= logging.get_logger(__name__)\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass __SCREAMING_SNAKE_CASE\t\t\t\t\t(\t_UpperCAmelCase ):\r\n\r\n\r\n\r\n\r\n def __init__(\t\t\t\t\tself\t\t:\t\t\t\t\t\t\tAny ,\t\t\t\t\t*__lowercase\t\t:\t\t\t\t\t\t\tDict ,\t\t\t\t\t**__lowercase\t\t:\t\t\t\t\t\t\tList[Any]\t\t\t\t\t)\t\t-> List[str]:\r\n warnings.warn(\r\n '''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''\r\n ''' use FlavaImageProcessor instead.''' ,\t\t\t\t\tUpperCAmelCase_ ,\t\t\t\t\t)\r\n super().__init__(*UpperCAmelCase_ ,\t\t\t\t\t**UpperCAmelCase_\t\t\t\t\t)"},"code_codestyle":{"kind":"number","value":152,"string":"152"},"style_context":{"kind":"string","value":"\nimport os\n\n\n\n\n\n\ndef \t\t\t\t\t\t\t_lowercase\t\t\t\t\t\t\t( )\t\t->\t\tList[str]:\n\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\twith open(os.path.dirname(UpperCamelCase_\t\t) + '/p022_names.txt'\t\t) as file:\n\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t\t\t\t\t\t= str(file.readlines()[0]\t\t)\n\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t\t\t\t\t\t= names.replace('\"'\t,\t''\t\t).split(','\t\t)\n\n\t\t\t\t\tnames.sort()\n\n\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t\t\t\t\t\t= 0\n\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t\t\t\t\t\t= 0\n\n\t\t\t\t\tfor i, name in enumerate(UpperCamelCase_\t\t):\n\t\t\t\t\t\t\t\t\t\tfor letter in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tname_score += ord(UpperCamelCase_\t\t) - 64\n\n\t\t\t\t\t\t\t\t\t\ttotal_score += (i + 1) * name_score\n\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t\t\t\t\t\t= 0\n\t\t\t\t\treturn total_score\n\n\nif __name__ == \"__main__\":\n\t\t\t\tprint(solution())\n\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":176,"string":"176"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":839,"cells":{"code":{"kind":"string","value":"\n\nfrom collections import OrderedDict\nfrom typing import Any, Mapping, Optional\n\nfrom ... import PreTrainedTokenizer, TensorType, is_torch_available\nfrom ...configuration_utils import PretrainedConfig\nfrom ...onnx import OnnxConfigWithPast\nfrom ...utils import logging\n\n\nlowerCAmelCase_ = logging.get_logger(__name__)\n\nlowerCAmelCase_ = {\n 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',\n # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo\n}\n\n\n\nclass __lowerCAmelCase\t( _UpperCamelCase\t\t\t\t\t):\n\t\t\tlowerCamelCase_\t\t\t\t\t:\t\t\t\t\tAny\t =\t\t\t\t'gpt_neo'\n\t\t\tlowerCamelCase_\t\t\t\t\t:\t\t\t\t\tList[str]\t =\t\t\t\t['past_key_values']\n\t\t\tlowerCamelCase_\t\t\t\t\t:\t\t\t\t\tstr\t =\t\t\t\t{'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}\n\n\n\n\n\n\n\t\t\tdef __init__(self ,\t\t\t\t\t\t\t__magic_name__=5_0257 ,\t\t\t\t\t\t\t__magic_name__=2048 ,\t\t\t\t\t\t\t__magic_name__=2048 ,\t\t\t\t\t\t\t__magic_name__=24 ,\t\t\t\t\t\t\t__magic_name__=[[[\"global\", \"local\"], 12]] ,\t\t\t\t\t\t\t__magic_name__=16 ,\t\t\t\t\t\t\t__magic_name__=None ,\t\t\t\t\t\t\t__magic_name__=256 ,\t\t\t\t\t\t\t__magic_name__=\"gelu_new\" ,\t\t\t\t\t\t\t__magic_name__=0.0 ,\t\t\t\t\t\t\t__magic_name__=0.0 ,\t\t\t\t\t\t\t__magic_name__=0.0 ,\t\t\t\t\t\t\t__magic_name__=0.1 ,\t\t\t\t\t\t\t__magic_name__=1e-5 ,\t\t\t\t\t\t\t__magic_name__=0.02 ,\t\t\t\t\t\t\t__magic_name__=True ,\t\t\t\t\t\t\t__magic_name__=5_0256 ,\t\t\t\t\t\t\t__magic_name__=5_0256 ,\t\t\t\t\t\t\t**__magic_name__ ,\t\t\t\t\t\t\t)\t\t\t\t\t-> List[Any]:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Union[str, Any] = vocab_size\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Any = max_position_embeddings\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Union[str, Any] = hidden_size\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Tuple = num_layers\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[Any] = num_heads\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: List[str] = intermediate_size\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: str = window_size\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: str = activation_function\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: List[Any] = resid_dropout\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[Any] = embed_dropout\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Tuple = attention_dropout\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: List[str] = classifier_dropout\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: List[str] = layer_norm_epsilon\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[int] = initializer_range\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Any = use_cache\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Tuple = bos_token_id\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: List[str] = eos_token_id\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: List[str] = attention_types\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[Any] = self.expand_attention_types_params(_SCREAMING_SNAKE_CASE )\n\n\t\t\t\t\t\t\tif len(self.attention_layers ) != self.num_layers:\n\t\t\t\t\t\t\t\t\t\t\traise ValueError(\n\t\t\t\t\t\t\t\t\t\t\t '''Configuration for convolutional module is incorrect. '''\n\t\t\t\t\t\t\t\t\t\t\t '''It is required that `len(config.attention_layers)` == `config.num_layers` '''\n\t\t\t\t\t\t\t\t\t\t\t F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''\n\t\t\t\t\t\t\t\t\t\t\t F'''`config.num_layers = {self.num_layers}`. '''\n\t\t\t\t\t\t\t\t\t\t\t '''`config.attention_layers` is prepared using `config.attention_types`. '''\n\t\t\t\t\t\t\t\t\t\t\t '''Please verify the value of `config.attention_types` argument.''' )\n\n\t\t\t\t\t\t\tsuper().__init__(bos_token_id=_SCREAMING_SNAKE_CASE ,\t\t\t\t\t\t\teos_token_id=_SCREAMING_SNAKE_CASE ,\t\t\t\t\t\t\t**_SCREAMING_SNAKE_CASE )\n\n\n\n\n\n\n\t\t\t@staticmethod\n\t\t\tdef lowerCamelCase (__magic_name__ )\t\t\t\t\t-> Union[str, Any]:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Any = []\n\t\t\t\t\t\t\tfor item in attention_types:\n\t\t\t\t\t\t\t\t\t\t\tfor _ in range(item[1] ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tattentions.extend(item[0] )\n\t\t\t\t\t\t\treturn attentions\n\n\n\n\n\n\ndef \t\t\t\t\tlowerCamelCase_\t\t\t\t\t\t( _UpperCamelCase\t\t\t, _UpperCamelCase\t\t\t, _UpperCamelCase\t\t\t, _UpperCamelCase\t\t\t)\t\t->\t\t\t\t\t\tstr:\n\n\n\n\n\n\n\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\t\t\t\timport torch\n\n\t\t\t\tsnake_case_\t\t\t\t\t\t: Union[str, Any] = input.size()\n\t\t\t\tsnake_case_\t\t\t\t\t\t: List[Any] = len(_UpperCamelCase\t\t\t)\n\t\t\t\tsnake_case_\t\t\t\t\t\t: Any = shape[dimension]\n\n\t\t\t\tsnake_case_\t\t\t\t\t\t: str = torch.arange(0\t\t\t, _UpperCamelCase\t\t\t, _UpperCamelCase\t\t\t)\n\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[Any] = torch.div(sizedim - size\t\t\t, _UpperCamelCase\t\t\t, rounding_mode='''floor'''\t\t\t) + 1\n\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[int] = torch.arange(_UpperCamelCase\t\t\t) + low_indices[:min_length][:, None]\n\n\t\t\t\tsnake_case_\t\t\t\t\t\t: List[str] = [slice(_UpperCamelCase\t\t\t)] * rank\n\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[int] = indices\n\t\t\t\tsnake_case_\t\t\t\t\t\t: Union[str, Any] = input[s]\n\n\t\t\t\tsnake_case_\t\t\t\t\t\t: int = list(range(0\t\t\t, rank + 1\t\t\t)\t\t\t)\n\t\t\t\tperm.append(perm.pop(dimension + 1\t\t\t)\t\t\t)\n\n\t\t\t\treturn sliced.permute(_UpperCamelCase\t\t\t)\n\n\n\n\n\n\ndef \t\t\t\t\tlowerCamelCase_\t\t\t\t\t\t( _UpperCamelCase\t\t\t, _UpperCamelCase\t\t\t)\t\t->\t\t\t\t\t\tstr:\n\n\n\n\n\n\n\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\t\t\t\timport torch\n\n\t\t\t\tsnake_case_\t\t\t\t\t\t: str = torch.arange(1\t\t\t, _UpperCamelCase\t\t\t)\n\t\t\t\tsnake_case_\t\t\t\t\t\t: Dict = torch.remainder(_UpperCamelCase\t\t\t, _UpperCamelCase\t\t\t)\n\t\t\t\tsnake_case_\t\t\t\t\t\t: Dict = remainders == 0\n\t\t\t\tsnake_case_\t\t\t\t\t\t: List[Any] = candidates[divisor_indices]\n\t\t\t\tsnake_case_\t\t\t\t\t\t: int = torch.max(_UpperCamelCase\t\t\t)\n\t\t\t\treturn largest_divisor, torch.div(_UpperCamelCase\t\t\t, _UpperCamelCase\t\t\t, rounding_mode='''floor'''\t\t\t)\n\n\n\n\n\nclass __lowerCAmelCase\t( _UpperCamelCase\t\t\t\t\t):\n\n\n\n\n\n\n\t\t\t@property\n\t\t\tdef lowerCamelCase (self )\t\t\t\t\t-> Mapping[str, Mapping[int, str]]:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )\n\t\t\t\t\t\t\tif self.use_past:\n\t\t\t\t\t\t\t\t\t\t\tself.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t\t\tdirection='''inputs''' )\n\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: int = {0: '''batch''', 1: '''past_sequence + sequence'''}\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Union[str, Any] = {0: '''batch''', 1: '''sequence'''}\n\n\t\t\t\t\t\t\treturn common_inputs\n\n\n\n\n\n\n\t\t\t@property\n\t\t\tdef lowerCamelCase (self )\t\t\t\t\t-> int:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\treturn self._config.num_heads\n\n\n\n\n\n\n\t\t\tdef lowerCamelCase (self ,\t\t\t\t\t\t\t__magic_name__ ,\t\t\t\t\t\t\t__magic_name__ = -1 ,\t\t\t\t\t\t\t__magic_name__ = -1 ,\t\t\t\t\t\t\t__magic_name__ = False ,\t\t\t\t\t\t\t__magic_name__ = None ,\t\t\t\t\t\t\t)\t\t\t\t\t-> Mapping[str, Any]:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Any = super(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t\t\tself ).generate_dummy_inputs(\n\t\t\t\t\t\t\t _SCREAMING_SNAKE_CASE ,\t\t\t\t\t\t\tbatch_size=_SCREAMING_SNAKE_CASE ,\t\t\t\t\t\t\tseq_length=_SCREAMING_SNAKE_CASE ,\t\t\t\t\t\t\tis_pair=_SCREAMING_SNAKE_CASE ,\t\t\t\t\t\t\tframework=_SCREAMING_SNAKE_CASE )\n\n\t\t\t\t\t\t\t# We need to order the input in the way they appears in the forward()\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: List[str] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )\n\n\t\t\t\t\t\t\t# Need to add the past_keys\n\t\t\t\t\t\t\tif self.use_past:\n\t\t\t\t\t\t\t\t\t\t\tif not is_torch_available():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )\n\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\timport torch\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_ ,\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Dict = common_inputs['''input_ids'''].shape\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Not using the same length for past_key_values\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: int = seqlen + 2\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Tuple = (\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t batch,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t self.num_attention_heads,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t past_key_values_length,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t self._config.hidden_size // self.num_attention_heads,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Any = [\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t (torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t]\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: str = common_inputs['''attention_mask''']\n\t\t\t\t\t\t\tif self.use_past:\n\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[Any] = ordered_inputs['''attention_mask'''].dtype\n\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: int = torch.cat(\n\t\t\t\t\t\t\t\t\t\t\t [ordered_inputs['''attention_mask'''], torch.ones(_SCREAMING_SNAKE_CASE ,\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE ,\t\t\t\t\t\t\tdtype=_SCREAMING_SNAKE_CASE )] ,\t\t\t\t\t\t\tdim=1 )\n\n\t\t\t\t\t\t\treturn ordered_inputs\n\n\n\n\n\n\n\t\t\t@property\n\t\t\tdef lowerCamelCase (self )\t\t\t\t\t-> int:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\treturn 13\n"},"code_codestyle":{"kind":"number","value":368,"string":"368"},"style_context":{"kind":"string","value":"\n\n\n\n\n\nimport unittest\n\nimport numpy as np\nimport torch\n\nfrom diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel\nfrom diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device\n\n\nenable_full_determinism()\n\n\n\nclass __lowerCAmelCase\t( unittest.TestCase\t\t\t\t\t):\n\n\n\n\n\n\n\t\t\t@property\n\t\t\tdef lowerCamelCase (self )\t\t\t\t\t-> Union[str, Any]:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\ttorch.manual_seed(0 )\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: int = UNetaDModel(\n\t\t\t\t\t\t\t block_out_channels=(32, 64) ,\t\t\t\t\t\t\tlayers_per_block=2 ,\t\t\t\t\t\t\tsample_size=32 ,\t\t\t\t\t\t\tin_channels=3 ,\t\t\t\t\t\t\tout_channels=3 ,\t\t\t\t\t\t\tdown_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') ,\t\t\t\t\t\t\tup_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') ,\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\treturn model\n\n\n\n\n\n\n\t\t\tdef lowerCamelCase (self )\t\t\t\t\t-> Dict:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Any = self.dummy_uncond_unet\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[Any] = PNDMScheduler()\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[Any] = PNDMPipeline(unet=__magic_name__ ,\t\t\t\t\t\t\tscheduler=__magic_name__ )\n\t\t\t\t\t\t\tpndm.to(__magic_name__ )\n\t\t\t\t\t\t\tpndm.set_progress_bar_config(disable=__magic_name__ )\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: str = torch.manual_seed(0 )\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Dict = pndm(generator=__magic_name__ ,\t\t\t\t\t\t\tnum_inference_steps=20 ,\t\t\t\t\t\t\toutput_type='''numpy''' ).images\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: str = torch.manual_seed(0 )\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: str = pndm(generator=__magic_name__ ,\t\t\t\t\t\t\tnum_inference_steps=20 ,\t\t\t\t\t\t\toutput_type='''numpy''' ,\t\t\t\t\t\t\treturn_dict=__magic_name__ )[0]\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Any = image[0, -3:, -3:, -1]\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: str = image_from_tuple[0, -3:, -3:, -1]\n\n\t\t\t\t\t\t\tassert image.shape == (1, 32, 32, 3)\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: str = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )\n\n\t\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2\n\t\t\t\t\t\t\tassert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2\n\n\n\n\n@slow\n@require_torch\nclass __lowerCAmelCase\t( unittest.TestCase\t\t\t\t\t):\n\n\n\n\n\n\n\t\t\tdef lowerCamelCase (self )\t\t\t\t\t-> Any:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Tuple = '''google/ddpm-cifar10-32'''\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Tuple = UNetaDModel.from_pretrained(__magic_name__ )\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[Any] = PNDMScheduler()\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Any = PNDMPipeline(unet=__magic_name__ ,\t\t\t\t\t\t\tscheduler=__magic_name__ )\n\t\t\t\t\t\t\tpndm.to(__magic_name__ )\n\t\t\t\t\t\t\tpndm.set_progress_bar_config(disable=__magic_name__ )\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: int = torch.manual_seed(0 )\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Tuple = pndm(generator=__magic_name__ ,\t\t\t\t\t\t\toutput_type='''numpy''' ).images\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: List[str] = image[0, -3:, -3:, -1]\n\n\t\t\t\t\t\t\tassert image.shape == (1, 32, 32, 3)\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: str = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )\n\n\t\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2\n"},"style_context_codestyle":{"kind":"number","value":279,"string":"279"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":840,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\nimport inspect\r\nimport os\r\nimport re\r\n\r\nfrom transformers.configuration_utils import PretrainedConfig\r\nfrom transformers.utils import direct_transformers_import\r\n\r\n\r\n# All paths are set with the intent you should run this script from the root of the repo with the command\r\n# python utils/check_config_docstrings.py\r\nsnake_case_\t\t\t\t=\t\t\t'src/transformers'\r\n\r\n\r\n# This is to make sure the transformers module imported is the one in the repo.\r\nsnake_case_\t\t\t\t=\t\t\tdirect_transformers_import(PATH_TO_TRANSFORMERS)\r\n\r\nsnake_case_\t\t\t\t=\t\t\ttransformers.models.auto.configuration_auto.CONFIG_MAPPING\r\n\r\nsnake_case_\t\t\t\t=\t\t\t{\r\n # used to compute the property `self.chunk_length`\r\n 'EncodecConfig': ['overlap'],\r\n # used as `self.bert_model = BertModel(config, ...)`\r\n 'DPRConfig': True,\r\n # not used in modeling files, but it's an important information\r\n 'FSMTConfig': ['langs'],\r\n # used internally in the configuration class file\r\n 'GPTNeoConfig': ['attention_types'],\r\n # used internally in the configuration class file\r\n 'EsmConfig': ['is_folding_model'],\r\n # used during training (despite we don't have training script for these models yet)\r\n 'Mask2FormerConfig': ['ignore_value'],\r\n # `ignore_value` used during training (despite we don't have training script for these models yet)\r\n # `norm` used in conversion script (despite not using in the modeling file)\r\n 'OneFormerConfig': ['ignore_value', 'norm'],\r\n # used during preprocessing and collation, see `collating_graphormer.py`\r\n 'GraphormerConfig': ['spatial_pos_max'],\r\n # used internally in the configuration class file\r\n 'T5Config': ['feed_forward_proj'],\r\n # used internally in the configuration class file\r\n # `tokenizer_class` get default value `T5Tokenizer` intentionally\r\n 'MT5Config': ['feed_forward_proj', 'tokenizer_class'],\r\n 'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],\r\n # used internally in the configuration class file\r\n 'LongT5Config': ['feed_forward_proj'],\r\n # used internally in the configuration class file\r\n 'SwitchTransformersConfig': ['feed_forward_proj'],\r\n # having default values other than `1e-5` - we can't fix them without breaking\r\n 'BioGptConfig': ['layer_norm_eps'],\r\n # having default values other than `1e-5` - we can't fix them without breaking\r\n 'GLPNConfig': ['layer_norm_eps'],\r\n # having default values other than `1e-5` - we can't fix them without breaking\r\n 'SegformerConfig': ['layer_norm_eps'],\r\n # having default values other than `1e-5` - we can't fix them without breaking\r\n 'CvtConfig': ['layer_norm_eps'],\r\n # having default values other than `1e-5` - we can't fix them without breaking\r\n 'PerceiverConfig': ['layer_norm_eps'],\r\n # used internally to calculate the feature size\r\n 'InformerConfig': ['num_static_real_features', 'num_time_features'],\r\n # used internally to calculate the feature size\r\n 'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],\r\n # used internally to calculate the feature size\r\n 'AutoformerConfig': ['num_static_real_features', 'num_time_features'],\r\n # used internally to calculate `mlp_dim`\r\n 'SamVisionConfig': ['mlp_ratio'],\r\n # For (head) training, but so far not implemented\r\n 'ClapAudioConfig': ['num_classes'],\r\n # Not used, but providing useful information to users\r\n 'SpeechT5HifiGanConfig': ['sampling_rate'],\r\n}\r\n\r\n\r\n# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure\r\nSPECIAL_CASES_TO_ALLOW.update(\r\n {\r\n 'CLIPSegConfig': True,\r\n 'DeformableDetrConfig': True,\r\n 'DetaConfig': True,\r\n 'DinatConfig': True,\r\n 'DonutSwinConfig': True,\r\n 'EfficientFormerConfig': True,\r\n 'FSMTConfig': True,\r\n 'JukeboxConfig': True,\r\n 'LayoutLMv2Config': True,\r\n 'MaskFormerSwinConfig': True,\r\n 'MT5Config': True,\r\n 'NatConfig': True,\r\n 'OneFormerConfig': True,\r\n 'PerceiverConfig': True,\r\n 'RagConfig': True,\r\n 'SpeechT5Config': True,\r\n 'SwinConfig': True,\r\n 'Swin2SRConfig': True,\r\n 'Swinv2Config': True,\r\n 'SwitchTransformersConfig': True,\r\n 'TableTransformerConfig': True,\r\n 'TapasConfig': True,\r\n 'TransfoXLConfig': True,\r\n 'UniSpeechConfig': True,\r\n 'UniSpeechSatConfig': True,\r\n 'WavLMConfig': True,\r\n 'WhisperConfig': True,\r\n # TODO: @Arthur (for `alignment_head` and `alignment_layer`)\r\n 'JukeboxPriorConfig': True,\r\n # TODO: @Younes (for `is_decoder`)\r\n 'Pix2StructTextConfig': True,\r\n }\r\n)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef lowerCamelCase__ (\t\t\t\t\t\t\tsnake_case_\t: List[str] , snake_case_\t: Tuple , snake_case_\t: Any , snake_case_\t: List[Any] ) ->\t\t\t\tUnion[str, Any]:\r\n __snake_case\t\t\t\t\t\t\t = False\r\n for attribute in attributes:\r\n for modeling_source in source_strings:\r\n # check if we can find `config.xxx`, `getattr(config, \"xxx\", ...)` or `getattr(self.config, \"xxx\", ...)`\r\n if (\r\n f\"\"\"config.{attribute}\"\"\" in modeling_source\r\n or f\"\"\"getattr(config, \\\"{attribute}\\\"\"\"\" in modeling_source\r\n or f\"\"\"getattr(self.config, \\\"{attribute}\\\"\"\"\" in modeling_source\r\n ):\r\n __snake_case\t\t\t\t\t\t\t = True\r\n # Deal with multi-line cases\r\n elif (\r\n re.search(\r\n Rf\"\"\"getattr[ \\t\\v\\n\\r\\f]*\\([ \\t\\v\\n\\r\\f]*(self\\.)?config,[ \\t\\v\\n\\r\\f]*\\\"{attribute}\\\"\"\"\" , snake_case_ , )\r\n is not None\r\n ):\r\n __snake_case\t\t\t\t\t\t\t = True\r\n # `SequenceSummary` is called with `SequenceSummary(config)`\r\n elif attribute in [\r\n \"summary_type\",\r\n \"summary_use_proj\",\r\n \"summary_activation\",\r\n \"summary_last_dropout\",\r\n \"summary_proj_to_labels\",\r\n \"summary_first_dropout\",\r\n ]:\r\n if \"SequenceSummary\" in modeling_source:\r\n __snake_case\t\t\t\t\t\t\t = True\r\n if attribute_used:\r\n break\r\n if attribute_used:\r\n break\r\n\r\n # common and important attributes, even if they do not always appear in the modeling files\r\n __snake_case\t\t\t\t\t\t\t = [\r\n '''bos_index''',\r\n '''eos_index''',\r\n '''pad_index''',\r\n '''unk_index''',\r\n '''mask_index''',\r\n '''image_size''',\r\n '''use_cache''',\r\n '''out_features''',\r\n '''out_indices''',\r\n ]\r\n __snake_case\t\t\t\t\t\t\t = ['''encoder_no_repeat_ngram_size''']\r\n\r\n # Special cases to be allowed\r\n __snake_case\t\t\t\t\t\t\t = True\r\n if not attribute_used:\r\n __snake_case\t\t\t\t\t\t\t = False\r\n for attribute in attributes:\r\n # Allow if the default value in the configuration class is different from the one in `PretrainedConfig`\r\n if attribute in [\"is_encoder_decoder\"] and default_value is True:\r\n __snake_case\t\t\t\t\t\t\t = True\r\n elif attribute in [\"tie_word_embeddings\"] and default_value is False:\r\n __snake_case\t\t\t\t\t\t\t = True\r\n\r\n # Allow cases without checking the default value in the configuration class\r\n elif attribute in attributes_to_allow + attributes_used_in_generation:\r\n __snake_case\t\t\t\t\t\t\t = True\r\n elif attribute.endswith('''_token_id''' ):\r\n __snake_case\t\t\t\t\t\t\t = True\r\n\r\n # configuration class specific cases\r\n if not case_allowed:\r\n __snake_case\t\t\t\t\t\t\t = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )\r\n __snake_case\t\t\t\t\t\t\t = allowed_cases is True or attribute in allowed_cases\r\n\r\n return attribute_used or case_allowed\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef lowerCamelCase__ (\t\t\t\t\t\t\tsnake_case_\t: Union[str, Any] ) ->\t\t\t\tTuple:\r\n __snake_case\t\t\t\t\t\t\t = dict(inspect.signature(config_class.__init__ ).parameters )\r\n __snake_case\t\t\t\t\t\t\t = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]\r\n __snake_case\t\t\t\t\t\t\t = [signature[param].default for param in parameter_names]\r\n\r\n # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long\r\n # as one variant is used, the test should pass\r\n __snake_case\t\t\t\t\t\t\t = {}\r\n if len(config_class.attribute_map ) > 0:\r\n __snake_case\t\t\t\t\t\t\t = {v: k for k, v in config_class.attribute_map.items()}\r\n\r\n # Get the path to modeling source files\r\n __snake_case\t\t\t\t\t\t\t = inspect.getsourcefile(snake_case_ )\r\n __snake_case\t\t\t\t\t\t\t = os.path.dirname(snake_case_ )\r\n # Let's check against all frameworks: as long as one framework uses an attribute, we are good.\r\n __snake_case\t\t\t\t\t\t\t = [os.path.join(snake_case_ , snake_case_ ) for fn in os.listdir(snake_case_ ) if fn.startswith('''modeling_''' )]\r\n\r\n # Get the source code strings\r\n __snake_case\t\t\t\t\t\t\t = []\r\n for path in modeling_paths:\r\n if os.path.isfile(snake_case_ ):\r\n with open(snake_case_ ) as fp:\r\n modeling_sources.append(fp.read() )\r\n\r\n __snake_case\t\t\t\t\t\t\t = []\r\n for config_param, default_value in zip(snake_case_ , snake_case_ ):\r\n # `attributes` here is all the variant names for `config_param`\r\n __snake_case\t\t\t\t\t\t\t = [config_param]\r\n # some configuration classes have non-empty `attribute_map`, and both names could be used in the\r\n # corresponding modeling files. As long as one of them appears, it is fine.\r\n if config_param in reversed_attribute_map:\r\n attributes.append(reversed_attribute_map[config_param] )\r\n\r\n if not check_attribute_being_used(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):\r\n unused_attributes.append(attributes[0] )\r\n\r\n return sorted(snake_case_ )\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef lowerCamelCase__ (\t\t\t\t\t\t\t) ->\t\t\t\tUnion[str, Any]:\r\n __snake_case\t\t\t\t\t\t\t = {}\r\n for _config_class in list(CONFIG_MAPPING.values() ):\r\n # Skip deprecated models\r\n if \"models.deprecated\" in _config_class.__module__:\r\n continue\r\n # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)\r\n __snake_case\t\t\t\t\t\t\t = [\r\n cls\r\n for name, cls in inspect.getmembers(\r\n inspect.getmodule(_config_class ) , lambda snake_case_ : inspect.isclass(snake_case_ )\r\n and issubclass(snake_case_ , snake_case_ )\r\n and inspect.getmodule(snake_case_ ) == inspect.getmodule(_config_class ) , )\r\n ]\r\n for config_class in config_classes_in_module:\r\n __snake_case\t\t\t\t\t\t\t = check_config_attributes_being_used(snake_case_ )\r\n if len(snake_case_ ) > 0:\r\n __snake_case\t\t\t\t\t\t\t = unused_attributes\r\n\r\n if len(snake_case_ ) > 0:\r\n __snake_case\t\t\t\t\t\t\t = '''The following configuration classes contain unused attributes in the corresponding modeling files:\\n'''\r\n for name, attributes in configs_with_unused_attributes.items():\r\n error += f\"\"\"{name}: {attributes}\\n\"\"\"\r\n\r\n raise ValueError(snake_case_ )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n check_config_attributes()\r\n\r\n"},"code_codestyle":{"kind":"number","value":24,"string":"24"},"style_context":{"kind":"string","value":"\r\r\r\r'''simple docstring'''\rimport json\rimport os\rfrom typing import Optional\r\rimport numpy as np\r\rfrom ...feature_extraction_utils import BatchFeature\rfrom ...processing_utils import ProcessorMixin\rfrom ...utils import logging\rfrom ...utils.hub import get_file_from_repo\rfrom ..auto import AutoTokenizer\r\r\rsnake_case_ :\tList[Any] = logging.get_logger(__name__)\r\r\r\r\r\r\rclass \t\t\t\t\t\t\tlowercase__\t(\t\t\t\t\t\t\tlowercase ):\r\tlowercase__ \t\t\t\t=\t\t\t\"\"\"AutoTokenizer\"\"\"\r\tlowercase__ \t\t\t\t=\t\t\t[\"\"\"tokenizer\"\"\"]\r\r\tlowercase__ \t\t\t\t=\t\t\t{\r\t \"\"\"semantic_prompt\"\"\": 1,\r\t \"\"\"coarse_prompt\"\"\": 2,\r\t \"\"\"fine_prompt\"\"\": 2,\r\t}\r\r\r\r\r\r\tdef __init__( self\t\t\t\t\t:\t\t\t\tList[str] ,lowerCamelCase__\t\t\t\t\t:\t\t\t\tTuple ,lowerCamelCase__\t\t\t\t\t:\t\t\t\tTuple=None ):\r\r\r\r\r\r\r\r\t\t\t\t\t\t\t'''simple docstring'''\r\t\t\t\t\t\t\tsuper().__init__(lowerCamelCase__ )\r\r\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t:\t\tDict \t\t\t\t= speaker_embeddings\r\r\r\r\r\r\t@classmethod\r\tdef \tUpperCamelCase_ ( cls\t\t\t\t\t:\t\t\t\tUnion[str, Any] ,lowerCamelCase__\t\t\t\t\t:\t\t\t\tint ,lowerCamelCase__\t\t\t\t\t:\t\t\t\tstr=\"speaker_embeddings_path.json\" ,**lowerCamelCase__\t\t\t\t\t:\t\t\t\tOptional[Any] ):\r\r\r\r\r\r\r\r\t\t\t\t\t\t\t'''simple docstring'''\r\r\t\t\t\t\t\t\tif speaker_embeddings_dict_path is not None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t:\t\tOptional[Any] \t\t\t\t= get_file_from_repo(\r\t\t\t\t\t\t\t\t\t\t\t\t\t lowerCamelCase__ ,lowerCamelCase__ ,subfolder=kwargs.pop('subfolder' ,lowerCamelCase__ ) ,cache_dir=kwargs.pop('cache_dir' ,lowerCamelCase__ ) ,force_download=kwargs.pop('force_download' ,lowerCamelCase__ ) ,proxies=kwargs.pop('proxies' ,lowerCamelCase__ ) ,resume_download=kwargs.pop('resume_download' ,lowerCamelCase__ ) ,local_files_only=kwargs.pop('local_files_only' ,lowerCamelCase__ ) ,use_auth_token=kwargs.pop('use_auth_token' ,lowerCamelCase__ ) ,revision=kwargs.pop('revision' ,lowerCamelCase__ ) ,)\r\t\t\t\t\t\t\t\t\t\t\t\t\tif speaker_embeddings_path is None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.warning(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t F'`{os.path.join(lowerCamelCase__ ,lowerCamelCase__ )}` does not exists\\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t:\t\tUnion[str, Any] \t\t\t\t= None\r\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twith open(lowerCamelCase__ ) as speaker_embeddings_json:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t:\t\tOptional[int] \t\t\t\t= json.load(lowerCamelCase__ )\r\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t:\t\tTuple \t\t\t\t= None\r\r\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t:\t\tTuple \t\t\t\t= AutoTokenizer.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )\r\r\t\t\t\t\t\t\treturn cls(tokenizer=lowerCamelCase__ ,speaker_embeddings=lowerCamelCase__ )\r\r\r\r\r\r\tdef \tUpperCamelCase_ ( self\t\t\t\t\t:\t\t\t\tTuple ,lowerCamelCase__\t\t\t\t\t:\t\t\t\tUnion[str, Any] ,lowerCamelCase__\t\t\t\t\t:\t\t\t\tint=\"speaker_embeddings_path.json\" ,lowerCamelCase__\t\t\t\t\t:\t\t\t\tDict=\"speaker_embeddings\" ,lowerCamelCase__\t\t\t\t\t:\t\t\t\tbool = False ,**lowerCamelCase__\t\t\t\t\t:\t\t\t\tTuple ,):\r\r\r\r\r\r\r\r\t\t\t\t\t\t\t'''simple docstring'''\r\t\t\t\t\t\t\tif self.speaker_embeddings is not None:\r\t\t\t\t\t\t\t\t\t\t\t\t\tos.makedirs(os.path.join(lowerCamelCase__ ,lowerCamelCase__ ,'v2' ) ,exist_ok=lowerCamelCase__ )\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t:\t\tTuple \t\t\t\t= {}\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t:\t\tOptional[Any] \t\t\t\t= save_directory\r\r\t\t\t\t\t\t\t\t\t\t\t\t\tfor prompt_key in self.speaker_embeddings:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif prompt_key != \"repo_or_path\":\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t:\t\tAny \t\t\t\t= self._load_voice_preset(lowerCamelCase__ )\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t:\t\tUnion[str, Any] \t\t\t\t= {}\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor key in self.speaker_embeddings[prompt_key]:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.save(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t os.path.join(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t embeddings_dict['repo_or_path'] ,lowerCamelCase__ ,F'{prompt_key}_{key}' ) ,voice_preset[key] ,allow_pickle=lowerCamelCase__ ,)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t:\t\tList[str] \t\t\t\t= os.path.join(lowerCamelCase__ ,F'{prompt_key}_{key}.npy' )\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t:\t\tstr \t\t\t\t= tmp_dict\r\r\t\t\t\t\t\t\t\t\t\t\t\t\twith open(os.path.join(lowerCamelCase__ ,lowerCamelCase__ ) ,'w' ) as fp:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tjson.dump(lowerCamelCase__ ,lowerCamelCase__ )\r\r\t\t\t\t\t\t\tsuper().save_pretrained(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )\r\r\r\r\r\r\tdef \tUpperCamelCase_ ( self\t\t\t\t\t:\t\t\t\tUnion[str, Any] ,lowerCamelCase__\t\t\t\t\t:\t\t\t\tstr = None ,**lowerCamelCase__\t\t\t\t\t:\t\t\t\tDict ):\r\r\r\r\r\r\r\r\t\t\t\t\t\t\t'''simple docstring'''\r\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t:\t\tTuple \t\t\t\t= self.speaker_embeddings[voice_preset]\r\r\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t:\t\tUnion[str, Any] \t\t\t\t= {}\r\t\t\t\t\t\t\tfor key in [\"semantic_prompt\", \"coarse_prompt\", \"fine_prompt\"]:\r\t\t\t\t\t\t\t\t\t\t\t\t\tif key not in voice_preset_paths:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t:\t\tDict \t\t\t\t= get_file_from_repo(\r\t\t\t\t\t\t\t\t\t\t\t\t\t self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] ,subfolder=kwargs.pop('subfolder' ,lowerCamelCase__ ) ,cache_dir=kwargs.pop('cache_dir' ,lowerCamelCase__ ) ,force_download=kwargs.pop('force_download' ,lowerCamelCase__ ) ,proxies=kwargs.pop('proxies' ,lowerCamelCase__ ) ,resume_download=kwargs.pop('resume_download' ,lowerCamelCase__ ) ,local_files_only=kwargs.pop('local_files_only' ,lowerCamelCase__ ) ,use_auth_token=kwargs.pop('use_auth_token' ,lowerCamelCase__ ) ,revision=kwargs.pop('revision' ,lowerCamelCase__ ) ,)\r\t\t\t\t\t\t\t\t\t\t\t\t\tif path is None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t F'`{os.path.join(self.speaker_embeddings.get(\"repo_or_path\" ,\"/\" ) ,voice_preset_paths[key] )}` does not exists\\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\\n embeddings.' )\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t:\t\tList[str] \t\t\t\t= np.load(lowerCamelCase__ )\r\r\t\t\t\t\t\t\treturn voice_preset_dict\r\r\r\r\r\r\tdef \tUpperCamelCase_ ( self\t\t\t\t\t:\t\t\t\tAny ,lowerCamelCase__\t\t\t\t\t:\t\t\t\tOptional[dict] = None ):\r\r\r\r\r\r\r\r\t\t\t\t\t\t\t'''simple docstring'''\r\t\t\t\t\t\t\tfor key in [\"semantic_prompt\", \"coarse_prompt\", \"fine_prompt\"]:\r\t\t\t\t\t\t\t\t\t\t\t\t\tif key not in voice_preset:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )\r\r\t\t\t\t\t\t\t\t\t\t\t\t\tif not isinstance(voice_preset[key] ,np.ndarray ):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )\r\r\t\t\t\t\t\t\t\t\t\t\t\t\tif len(voice_preset[key].shape ) != self.preset_shape[key]:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )\r\r\r\r\r\r\tdef __call__( self\t\t\t\t\t:\t\t\t\tAny ,lowerCamelCase__\t\t\t\t\t:\t\t\t\tOptional[Any]=None ,lowerCamelCase__\t\t\t\t\t:\t\t\t\tUnion[str, Any]=None ,lowerCamelCase__\t\t\t\t\t:\t\t\t\tAny=\"pt\" ,lowerCamelCase__\t\t\t\t\t:\t\t\t\tDict=256 ,lowerCamelCase__\t\t\t\t\t:\t\t\t\tint=False ,lowerCamelCase__\t\t\t\t\t:\t\t\t\tint=True ,lowerCamelCase__\t\t\t\t\t:\t\t\t\tList[str]=False ,**lowerCamelCase__\t\t\t\t\t:\t\t\t\tUnion[str, Any] ,):\r\r\r\r\r\r\r\r\t\t\t\t\t\t\t'''simple docstring'''\r\t\t\t\t\t\t\tif voice_preset is not None and not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):\r\t\t\t\t\t\t\t\t\t\t\t\t\tif (\r\t\t\t\t\t\t\t\t\t\t\t\t\t isinstance(lowerCamelCase__ ,lowerCamelCase__ )\r\t\t\t\t\t\t\t\t\t\t\t\t\t and self.speaker_embeddings is not None\r\t\t\t\t\t\t\t\t\t\t\t\t\t and voice_preset in self.speaker_embeddings\r\t\t\t\t\t\t\t\t\t\t\t\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t:\t\tOptional[int] \t\t\t\t= self._load_voice_preset(lowerCamelCase__ )\r\r\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and not voice_preset.endswith('.npz' ):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t:\t\tTuple \t\t\t\t= voice_preset + '.npz'\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t:\t\tstr \t\t\t\t= np.load(lowerCamelCase__ )\r\r\t\t\t\t\t\t\tif voice_preset is not None:\r\t\t\t\t\t\t\t\t\t\t\t\t\tself._validate_voice_preset_dict(lowerCamelCase__ ,**lowerCamelCase__ )\r\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t:\t\tUnion[str, Any] \t\t\t\t= BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ )\r\r\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t:\t\tUnion[str, Any] \t\t\t\t= self.tokenizer(\r\t\t\t\t\t\t\t lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,padding='max_length' ,max_length=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,)\r\r\t\t\t\t\t\t\tif voice_preset is not None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t:\t\tOptional[Any] \t\t\t\t= voice_preset\r\r\t\t\t\t\t\t\treturn encoded_text\r\r\r"},"style_context_codestyle":{"kind":"number","value":83,"string":"83"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":841,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\nfrom collections import OrderedDict\r\nfrom typing import Mapping\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...onnx import OnnxConfig\r\nfrom ...utils import logging\r\n\r\n\r\nA\t:\t\tUnion[str, Any] = logging.get_logger(__name__)\r\n\r\nA\t:\t\tList[Any] = {\r\n 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',\r\n 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',\r\n 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',\r\n 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',\r\n 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',\r\n 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',\r\n}\r\n\r\n\r\n\r\nclass \t\t\t\t\t\t\tA (\tUpperCAmelCase__\t\t\t\t\t\t\t):\r\n\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tA__ =\t\t\t'''roberta'''\r\n\r\n\t\t\t\t\tdef __init__(self :\t\t\t\t\t\tUnion[str, Any]\t\t\t\t, _UpperCAmelCase :\t\t\t\t\t\tUnion[str, Any]=5_0265\t\t\t\t, _UpperCAmelCase :\t\t\t\t\t\tOptional[int]=768\t\t\t\t, _UpperCAmelCase :\t\t\t\t\t\tAny=12\t\t\t\t, _UpperCAmelCase :\t\t\t\t\t\tOptional[int]=12\t\t\t\t, _UpperCAmelCase :\t\t\t\t\t\tTuple=3072\t\t\t\t, _UpperCAmelCase :\t\t\t\t\t\tList[str]=\"gelu\"\t\t\t\t, _UpperCAmelCase :\t\t\t\t\t\tOptional[Any]=0.1\t\t\t\t, _UpperCAmelCase :\t\t\t\t\t\tOptional[Any]=0.1\t\t\t\t, _UpperCAmelCase :\t\t\t\t\t\tOptional[int]=512\t\t\t\t, _UpperCAmelCase :\t\t\t\t\t\tAny=2\t\t\t\t, _UpperCAmelCase :\t\t\t\t\t\tint=0.02\t\t\t\t, _UpperCAmelCase :\t\t\t\t\t\tint=1E-1_2\t\t\t\t, _UpperCAmelCase :\t\t\t\t\t\tstr=1\t\t\t\t, _UpperCAmelCase :\t\t\t\t\t\tint=0\t\t\t\t, _UpperCAmelCase :\t\t\t\t\t\tTuple=2\t\t\t\t, _UpperCAmelCase :\t\t\t\t\t\tList[Any]=\"absolute\"\t\t\t\t, _UpperCAmelCase :\t\t\t\t\t\tAny=True\t\t\t\t, _UpperCAmelCase :\t\t\t\t\t\tAny=None\t\t\t\t, **_UpperCAmelCase :\t\t\t\t\t\tstr\t\t\t\t, )\t\t\t\t\t\t-> int:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\t\t\t\t\tsuper().__init__(pad_token_id=_UpperCAmelCase\t\t\t\t, bos_token_id=_UpperCAmelCase\t\t\t\t, eos_token_id=_UpperCAmelCase\t\t\t\t, **_UpperCAmelCase\t\t)\r\n\r\n\t\t\t\t\t\t\tlowercase__\t\t\t\t\t =\t\tvocab_size\r\n\t\t\t\t\t\t\tlowercase__\t\t\t\t\t =\t\thidden_size\r\n\t\t\t\t\t\t\tlowercase__\t\t\t\t\t =\t\tnum_hidden_layers\r\n\t\t\t\t\t\t\tlowercase__\t\t\t\t\t =\t\tnum_attention_heads\r\n\t\t\t\t\t\t\tlowercase__\t\t\t\t\t =\t\thidden_act\r\n\t\t\t\t\t\t\tlowercase__\t\t\t\t\t =\t\tintermediate_size\r\n\t\t\t\t\t\t\tlowercase__\t\t\t\t\t =\t\thidden_dropout_prob\r\n\t\t\t\t\t\t\tlowercase__\t\t\t\t\t =\t\tattention_probs_dropout_prob\r\n\t\t\t\t\t\t\tlowercase__\t\t\t\t\t =\t\tmax_position_embeddings\r\n\t\t\t\t\t\t\tlowercase__\t\t\t\t\t =\t\ttype_vocab_size\r\n\t\t\t\t\t\t\tlowercase__\t\t\t\t\t =\t\tinitializer_range\r\n\t\t\t\t\t\t\tlowercase__\t\t\t\t\t =\t\tlayer_norm_eps\r\n\t\t\t\t\t\t\tlowercase__\t\t\t\t\t =\t\tposition_embedding_type\r\n\t\t\t\t\t\t\tlowercase__\t\t\t\t\t =\t\tuse_cache\r\n\t\t\t\t\t\t\tlowercase__\t\t\t\t\t =\t\tclassifier_dropout\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\t\t\tA (\tUpperCAmelCase__\t\t\t\t\t\t\t):\r\n\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t@property\r\n\t\t\t\t\tdef lowerCamelCase__\t\t\t\t\t(self :\t\t\t\t\t\tAny\t\t)\t\t\t\t\t\t-> Mapping[str, Mapping[int, str]]:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\t\t\t\t\tif self.task == \"multiple-choice\":\r\n\t\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t =\t\t{0: \"\"\"batch\"\"\", 1: \"\"\"choice\"\"\", 2: \"\"\"sequence\"\"\"}\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t =\t\t{0: \"\"\"batch\"\"\", 1: \"\"\"sequence\"\"\"}\r\n\t\t\t\t\t\t\treturn OrderedDict(\r\n\t\t\t\t\t\t\t [\r\n\t\t\t\t\t\t\t (\"\"\"input_ids\"\"\", dynamic_axis),\r\n\t\t\t\t\t\t\t (\"\"\"attention_mask\"\"\", dynamic_axis),\r\n\t\t\t\t\t\t\t ]\t\t)\r\n\r\n"},"code_codestyle":{"kind":"number","value":369,"string":"369"},"style_context":{"kind":"string","value":"\r\n\r\n\r\nA\t:\t\tTuple = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\tUpperCamelCase ( ) -> None:\r\n\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\tlowercase__\t\t\t\t\t =\t\tinput(\"\"\"Enter message: \"\"\"\t\t\t\t\t)\r\n\t\tlowercase__\t\t\t\t\t =\t\tinput(\"\"\"Enter key [alphanumeric]: \"\"\"\t\t\t\t\t)\r\n\t\tlowercase__\t\t\t\t\t =\t\tinput(\"\"\"Encrypt/Decrypt [e/d]: \"\"\"\t\t\t\t\t)\r\n\r\n\t\tif mode.lower().startswith(\"\"\"e\"\"\"\t\t\t\t\t):\r\n\t\t\t\tlowercase__\t\t\t\t\t =\t\t\"\"\"encrypt\"\"\"\r\n\t\t\t\tlowercase__\t\t\t\t\t =\t\tencrypt_message(__magic_name__ , __magic_name__\t\t\t\t\t)\r\n\t\telif mode.lower().startswith(\"\"\"d\"\"\"\t\t\t\t\t):\r\n\t\t\t\tlowercase__\t\t\t\t\t =\t\t\"\"\"decrypt\"\"\"\r\n\t\t\t\tlowercase__\t\t\t\t\t =\t\tdecrypt_message(__magic_name__ , __magic_name__\t\t\t\t\t)\r\n\r\n\t\tprint(f'''\\n{mode.title()}ed message:'''\t\t\t\t\t)\r\n\t\tprint(__magic_name__\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\tUpperCamelCase ( __magic_name__\t\t\t\t\t:\t\t\t\t\t\t\tstr , __magic_name__\t\t\t\t\t:\t\t\t\t\t\t\tstr\t\t\t\t\t) -> str:\r\n\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\treturn translate_message(__magic_name__ , __magic_name__ , \"\"\"encrypt\"\"\"\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\tUpperCamelCase ( __magic_name__\t\t\t\t\t:\t\t\t\t\t\t\tstr , __magic_name__\t\t\t\t\t:\t\t\t\t\t\t\tstr\t\t\t\t\t) -> str:\r\n\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\treturn translate_message(__magic_name__ , __magic_name__ , \"\"\"decrypt\"\"\"\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\tUpperCamelCase ( __magic_name__\t\t\t\t\t:\t\t\t\t\t\t\tstr , __magic_name__\t\t\t\t\t:\t\t\t\t\t\t\tstr , __magic_name__\t\t\t\t\t:\t\t\t\t\t\t\tstr\t\t\t\t\t) -> str:\r\n\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\tlowercase__\t\t\t\t\t =\t\t[]\r\n\t\tlowercase__\t\t\t\t\t =\t\t0\r\n\t\tlowercase__\t\t\t\t\t =\t\tkey.upper()\r\n\r\n\t\tfor symbol in message:\r\n\t\t\t\tlowercase__\t\t\t\t\t =\t\tLETTERS.find(symbol.upper()\t\t\t\t\t)\r\n\t\t\t\tif num != -1:\r\n\t\t\t\t\t\tif mode == \"encrypt\":\r\n\t\t\t\t\t\t\t\tnum += LETTERS.find(key[key_index]\t\t\t\t\t)\r\n\t\t\t\t\t\telif mode == \"decrypt\":\r\n\t\t\t\t\t\t\t\tnum -= LETTERS.find(key[key_index]\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\tnum %= len(__magic_name__\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\tif symbol.isupper():\r\n\t\t\t\t\t\t\t\ttranslated.append(LETTERS[num]\t\t\t\t\t)\r\n\t\t\t\t\t\telif symbol.islower():\r\n\t\t\t\t\t\t\t\ttranslated.append(LETTERS[num].lower()\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\tkey_index += 1\r\n\t\t\t\t\t\tif key_index == len(__magic_name__\t\t\t\t\t):\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t =\t\t0\r\n\t\t\t\telse:\r\n\t\t\t\t\t\ttranslated.append(__magic_name__\t\t\t\t\t)\r\n\t\treturn \"\".join(__magic_name__\t\t\t\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":146,"string":"146"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":842,"cells":{"code":{"kind":"string","value":"\r\r\r\rfrom __future__ import annotations\r\rimport copy\rimport inspect\rimport unittest\r\rimport numpy as np\r\rfrom transformers import is_tf_available, is_vision_available\rfrom transformers.models.auto import get_values\rfrom transformers.testing_utils import require_tf, slow\rfrom transformers.utils import cached_property\r\rfrom ...test_configuration_common import ConfigTester\rfrom ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask\rfrom ...test_pipeline_mixin import PipelineTesterMixin\r\r\rif is_tf_available():\r\t\t\t\t\t\timport tensorflow as tf\r\r\t\t\t\t\t\tfrom transformers import (\r\t\t\t\t\t\t TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,\r\t\t\t\t\t\t TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,\r\t\t\t\t\t\t TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,\r\t\t\t\t\t\t TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,\r\t\t\t\t\t\t TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,\r\t\t\t\t\t\t LayoutLMvaConfig,\r\t\t\t\t\t\t TFLayoutLMvaForQuestionAnswering,\r\t\t\t\t\t\t TFLayoutLMvaForSequenceClassification,\r\t\t\t\t\t\t TFLayoutLMvaForTokenClassification,\r\t\t\t\t\t\t TFLayoutLMvaModel,\r\t\t\t\t\t\t)\r\rif is_vision_available():\r\t\t\t\t\t\tfrom PIL import Image\r\r\t\t\t\t\t\tfrom transformers import LayoutLMvaImageProcessor\r\r\r\rclass UpperCAmelCase_ :\r\r\r\r\r\r\r\r\t\t\t\t\t\t\tdef __init__( self,\t\t\t__a,\t\t\t__a=2,\t\t\t__a=3,\t\t\t__a=4,\t\t\t__a=2,\t\t\t__a=7,\t\t\t__a=True,\t\t\t__a=True,\t\t\t__a=True,\t\t\t__a=True,\t\t\t__a=99,\t\t\t__a=36,\t\t\t__a=2,\t\t\t__a=4,\t\t\t__a=37,\t\t\t__a=\"gelu\",\t\t\t__a=0.1,\t\t\t__a=0.1,\t\t\t__a=512,\t\t\t__a=16,\t\t\t__a=2,\t\t\t__a=0.02,\t\t\t__a=6,\t\t\t__a=6,\t\t\t__a=3,\t\t\t__a=4,\t\t\t__a=None,\t\t\t__a=1000,\t\t\t):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tDict = parent\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tstr = batch_size\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tList[Any] = num_channels\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tList[Any] = image_size\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tDict = patch_size\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tList[Any] = is_training\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any] = use_input_mask\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tUnion[str, Any] = use_token_type_ids\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any] = use_labels\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tUnion[str, Any] = vocab_size\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tTuple = hidden_size\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tAny = num_hidden_layers\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tDict = num_attention_heads\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tList[str] = intermediate_size\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tTuple = hidden_act\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tTuple = hidden_dropout_prob\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tDict = attention_probs_dropout_prob\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[int] = max_position_embeddings\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any] = type_vocab_size\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tstr = type_sequence_label_size\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any] = initializer_range\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tTuple = coordinate_size\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tTuple = shape_size\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tint = num_labels\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tList[str] = num_choices\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tUnion[str, Any] = scope\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tTuple = range_bbox\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tint = text_seq_length\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tDict = (image_size // patch_size) ** 2 + 1\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tint = self.text_seq_length + self.image_seq_length\r\r\r\r\r\r\r\r\t\t\t\t\t\t\tdef snake_case__ ( self):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tint = ids_tensor([self.batch_size, self.text_seq_length],\t\t\tself.vocab_size)\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tUnion[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4],\t\t\tself.range_bbox)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any] = bbox.numpy()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Ensure that bbox is legal\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor i in range(bbox.shape[0]):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor j in range(bbox.shape[1]):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif bbox[i, j, 3] < bbox[i, j, 1]:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tDict = bbox[i, j, 3]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tAny = bbox[i, j, 1]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tUnion[str, Any] = tmp_coordinate\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif bbox[i, j, 2] < bbox[i, j, 0]:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any] = bbox[i, j, 2]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tstr = bbox[i, j, 0]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tAny = tmp_coordinate\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tstr = tf.constant(__a)\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tAny = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tDict = None\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tif self.use_input_mask:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any] = random_attention_mask([self.batch_size, self.text_seq_length])\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tAny = None\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tif self.use_token_type_ids:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tAny = ids_tensor([self.batch_size, self.text_seq_length],\t\t\tself.type_vocab_size)\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tstr = None\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[int] = None\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tif self.use_labels:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tstr = ids_tensor([self.batch_size],\t\t\tself.type_sequence_label_size)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tList[str] = ids_tensor([self.batch_size, self.text_seq_length],\t\t\tself.num_labels)\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tstr = LayoutLMvaConfig(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t vocab_size=self.vocab_size,\t\t\thidden_size=self.hidden_size,\t\t\tnum_hidden_layers=self.num_hidden_layers,\t\t\tnum_attention_heads=self.num_attention_heads,\t\t\tintermediate_size=self.intermediate_size,\t\t\thidden_act=self.hidden_act,\t\t\thidden_dropout_prob=self.hidden_dropout_prob,\t\t\tattention_probs_dropout_prob=self.attention_probs_dropout_prob,\t\t\tmax_position_embeddings=self.max_position_embeddings,\t\t\ttype_vocab_size=self.type_vocab_size,\t\t\tinitializer_range=self.initializer_range,\t\t\tcoordinate_size=self.coordinate_size,\t\t\tshape_size=self.shape_size,\t\t\tinput_size=self.image_size,\t\t\tpatch_size=self.patch_size,\t\t\t)\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels\r\r\r\r\r\r\r\r\t\t\t\t\t\t\tdef snake_case__ ( self,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any] = TFLayoutLMvaModel(config=__a)\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t# text + image\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tList[Any] = model(__a,\t\t\tpixel_values=__a,\t\t\ttraining=__a)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tAny = model(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t __a,\t\t\tbbox=__a,\t\t\tpixel_values=__a,\t\t\tattention_mask=__a,\t\t\ttoken_type_ids=__a,\t\t\ttraining=__a,\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tint = model(__a,\t\t\tbbox=__a,\t\t\tpixel_values=__a,\t\t\ttraining=__a)\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.parent.assertEqual(result.last_hidden_state.shape,\t\t\t(self.batch_size, self.seq_length, self.hidden_size))\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t# text only\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[int] = model(__a,\t\t\ttraining=__a)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.parent.assertEqual(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t result.last_hidden_state.shape,\t\t\t(self.batch_size, self.text_seq_length, self.hidden_size))\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t# image only\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tUnion[str, Any] = model({\"pixel_values\": pixel_values},\t\t\ttraining=__a)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.parent.assertEqual(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t result.last_hidden_state.shape,\t\t\t(self.batch_size, self.image_seq_length, self.hidden_size))\r\r\r\r\r\r\r\r\t\t\t\t\t\t\tdef snake_case__ ( self,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tint = self.num_labels\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tint = TFLayoutLMvaForSequenceClassification(config=__a)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any] = model(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t __a,\t\t\tbbox=__a,\t\t\tpixel_values=__a,\t\t\tattention_mask=__a,\t\t\ttoken_type_ids=__a,\t\t\tlabels=__a,\t\t\ttraining=__a,\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape,\t\t\t(self.batch_size, self.num_labels))\r\r\r\r\r\r\r\r\t\t\t\t\t\t\tdef snake_case__ ( self,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tDict = self.num_labels\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tstr = TFLayoutLMvaForTokenClassification(config=__a)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any] = model(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t __a,\t\t\tbbox=__a,\t\t\tpixel_values=__a,\t\t\tattention_mask=__a,\t\t\ttoken_type_ids=__a,\t\t\tlabels=__a,\t\t\ttraining=__a,\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape,\t\t\t(self.batch_size, self.text_seq_length, self.num_labels))\r\r\r\r\r\r\r\r\t\t\t\t\t\t\tdef snake_case__ ( self,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tstr = 2\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tTuple = TFLayoutLMvaForQuestionAnswering(config=__a)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tList[Any] = model(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t __a,\t\t\tbbox=__a,\t\t\tpixel_values=__a,\t\t\tattention_mask=__a,\t\t\ttoken_type_ids=__a,\t\t\tstart_positions=__a,\t\t\tend_positions=__a,\t\t\ttraining=__a,\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.parent.assertEqual(result.start_logits.shape,\t\t\t(self.batch_size, self.seq_length))\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.parent.assertEqual(result.end_logits.shape,\t\t\t(self.batch_size, self.seq_length))\r\r\r\r\r\r\r\r\t\t\t\t\t\t\tdef snake_case__ ( self):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tAny = self.prepare_config_and_inputs()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) :\t\t\t\t\t\t\tAny = config_and_inputs\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tstr = {\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"input_ids\": input_ids,\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"bbox\": bbox,\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"pixel_values\": pixel_values,\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"token_type_ids\": token_type_ids,\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"attention_mask\": input_mask,\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t}\r\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn config, inputs_dict\r\r\r\r@require_tf\rclass UpperCAmelCase_ ( a ,\t\t\t\t\ta ,\t\t\t\t\tunittest.TestCase):\r\t\t\t\t\t\t\tlowerCamelCase__ \t\t\t=\t\t(\r\t\t\t\t\t\t\t (\r\t\t\t\t\t\t\t TFLayoutLMvaModel,\r\t\t\t\t\t\t\t TFLayoutLMvaForQuestionAnswering,\r\t\t\t\t\t\t\t TFLayoutLMvaForSequenceClassification,\r\t\t\t\t\t\t\t TFLayoutLMvaForTokenClassification,\r\t\t\t\t\t\t\t )\r\t\t\t\t\t\t\t if is_tf_available()\r\t\t\t\t\t\t\t else ()\r\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\tlowerCamelCase__ \t\t\t=\t\t(\r\t\t\t\t\t\t\t {'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}\r\t\t\t\t\t\t\t if is_tf_available()\r\t\t\t\t\t\t\t else {}\r\t\t\t\t\t\t\t)\r\r\t\t\t\t\t\t\tlowerCamelCase__ \t\t\t=\t\tFalse\r\t\t\t\t\t\t\tlowerCamelCase__ \t\t\t=\t\tFalse\r\t\t\t\t\t\t\tlowerCamelCase__ \t\t\t=\t\tFalse\r\r\r\r\r\r\r\r\t\t\t\t\t\t\tdef snake_case__ ( self,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn True\r\r\r\r\r\r\r\r\t\t\t\t\t\t\tdef snake_case__ ( self,\t\t\t__a,\t\t\t__a,\t\t\t__a=False):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tAny = copy.deepcopy(__a)\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tif model_class in get_values(__a):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tAny = {\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t k: tf.tile(tf.expand_dims(__a,\t\t\t1),\t\t\t(1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t if isinstance(__a,\t\t\ttf.Tensor) and v.ndim > 0\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t else v\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t for k, v in inputs_dict.items()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t}\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tif return_labels:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif model_class in get_values(__a):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tAny = tf.ones(self.model_tester.batch_size,\t\t\tdtype=tf.intaa)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif model_class in get_values(__a):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tAny = tf.zeros(self.model_tester.batch_size,\t\t\tdtype=tf.intaa)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tTuple = tf.zeros(self.model_tester.batch_size,\t\t\tdtype=tf.intaa)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif model_class in get_values(__a):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tAny = tf.zeros(self.model_tester.batch_size,\t\t\tdtype=tf.intaa)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif model_class in get_values(__a):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tUnion[str, Any] = tf.zeros(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t (self.model_tester.batch_size, self.model_tester.text_seq_length),\t\t\tdtype=tf.intaa)\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn inputs_dict\r\r\r\r\r\r\r\r\t\t\t\t\t\t\tdef snake_case__ ( self):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tDict = TFLayoutLMvaModelTester(self)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tDict = ConfigTester(self,\t\t\tconfig_class=__a,\t\t\thidden_size=37)\r\r\r\r\r\r\r\r\t\t\t\t\t\t\tdef snake_case__ ( self):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.config_tester.run_common_tests()\r\r\r\r\r\r\r\r\t\t\t\t\t\t\tdef snake_case__ ( self):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase , _lowerCAmelCase :\t\t\t\t\t\t\tOptional[int] = self.model_tester.prepare_config_and_inputs_for_common()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor model_class in self.all_model_classes:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tList[str] = model_class(__a)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif getattr(__a,\t\t\t\"hf_compute_loss\",\t\t\t__a):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# The number of elements in the loss should be the same as the number of elements in the label\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tint = self._prepare_for_class(inputs_dict.copy(),\t\t\t__a,\t\t\treturn_labels=__a)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tint = prepared_for_class[\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t sorted(prepared_for_class.keys() - inputs_dict.keys(),\t\t\treverse=__a)[0]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tstr = added_label.shape.as_list()[:1]\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Test that model correctly compute the loss with kwargs\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tstr = self._prepare_for_class(inputs_dict.copy(),\t\t\t__a,\t\t\treturn_labels=__a)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[int] = prepared_for_class.pop(\"input_ids\")\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[int] = model(__a,\t\t\t**__a)[0]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Test that model correctly compute the loss when we mask some positions\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[int] = self._prepare_for_class(inputs_dict.copy(),\t\t\t__a,\t\t\treturn_labels=__a)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tUnion[str, Any] = prepared_for_class.pop(\"input_ids\")\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif \"labels\" in prepared_for_class:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tList[str] = prepared_for_class[\"labels\"].numpy()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif len(labels.shape) > 1 and labels.shape[1] != 1:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[int] = -100\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any] = tf.convert_to_tensor(__a)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tAny = model(__a,\t\t\t**__a)[0]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(not np.any(np.isnan(loss.numpy())))\r\r # Test that model correctly compute the loss with a dict\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tTuple = self._prepare_for_class(inputs_dict.copy(),\t\t\t__a,\t\t\treturn_labels=__a)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[int] = model(__a)[0]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Test that model correctly compute the loss with a tuple\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tTuple = self._prepare_for_class(inputs_dict.copy(),\t\t\t__a,\t\t\treturn_labels=__a)\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Get keys that were added with the _prepare_for_class function\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tint = prepared_for_class.keys() - inputs_dict.keys()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tstr = inspect.signature(model.call).parameters\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tint = list(signature.keys())\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Create a dictionary holding the location of the tensors in the tuple\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[int] = {0: \"input_ids\"}\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor label_key in label_keys:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tstr = signature_names.index(__a)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tDict = label_key\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[int] = sorted(tuple_index_mapping.items())\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Initialize a list with their default values, update the values and convert to a tuple\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tAny = []\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor name in signature_names:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif name != \"kwargs\":\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlist_input.append(signature[name].default)\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor index, value in sorted_tuple_index_mapping:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any] = prepared_for_class[value]\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tUnion[str, Any] = tuple(__a)\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Send to model\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tstr = model(tuple_input[:-1])[0]\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])\r\r\r\r\r\r\r\r\t\t\t\t\t\t\tdef snake_case__ ( self):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , \r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) :\t\t\t\t\t\t\tDict = self.model_tester.prepare_config_and_inputs()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.model_tester.create_and_check_model(__a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a)\r\r\r\r\r\r\r\r\t\t\t\t\t\t\tdef snake_case__ ( self):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , \r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) :\t\t\t\t\t\t\tUnion[str, Any] = self.model_tester.prepare_config_and_inputs()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor type in [\"absolute\", \"relative_key\", \"relative_key_query\"]:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tList[str] = type\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.model_tester.create_and_check_model(__a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a)\r\r\r\r\r\r\r\r\t\t\t\t\t\t\tdef snake_case__ ( self):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , \r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) :\t\t\t\t\t\t\tDict = self.model_tester.prepare_config_and_inputs()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.model_tester.create_and_check_for_sequence_classification(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t __a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a)\r\r\r\r\r\r\r\r\t\t\t\t\t\t\tdef snake_case__ ( self):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , \r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) :\t\t\t\t\t\t\tint = self.model_tester.prepare_config_and_inputs()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.model_tester.create_and_check_for_token_classification(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t __a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a)\r\r\r\r\r\r\r\r\t\t\t\t\t\t\tdef snake_case__ ( self):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t _lowerCAmelCase\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) , \r\t\t\t\t\t\t\t\t\t\t\t\t\t\t) :\t\t\t\t\t\t\tList[Any] = self.model_tester.prepare_config_and_inputs()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.model_tester.create_and_check_for_question_answering(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t __a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a,\t\t\t__a)\r\r\r\r\r\r\r\r\t\t\t\t\t\t\t@slow\r\t\t\t\t\t\t\tdef snake_case__ ( self):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tstr = TFLayoutLMvaModel.from_pretrained(__a)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertIsNotNone(__a)\r\r\r\r\r\r\rdef \t\t\t\tA\t\t\t\t(\t\t\t\t\t):\r\r\r\r\r\r\t\t\t\t\t\t\t'''simple docstring'''\r\r\r\r\r\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tAny = Image.open(\"./tests/fixtures/tests_samples/COCO/000000039769.png\" )\r\t\t\t\t\t\t\treturn image\r\r\r\r@require_tf\rclass UpperCAmelCase_ ( unittest.TestCase):\r\r\r\r\r\r\r\r\t\t\t\t\t\t\t@cached_property\r\t\t\t\t\t\t\tdef snake_case__ ( self):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn LayoutLMvaImageProcessor(apply_ocr=__a) if is_vision_available() else None\r\r\r\r\r\r\r\r\t\t\t\t\t\t\t@slow\r\t\t\t\t\t\t\tdef snake_case__ ( self):\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tList[str] = TFLayoutLMvaModel.from_pretrained(\"microsoft/layoutlmv3-base\")\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tstr = self.default_image_processor\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tList[Any] = prepare_img()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tUnion[str, Any] = image_processor(images=__a,\t\t\treturn_tensors=\"tf\").pixel_values\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tList[str] = tf.constant([[1, 2]])\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tAny = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]),\t\t\taxis=0)\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t# forward pass\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any] = model(input_ids=__a,\t\t\tbbox=__a,\t\t\tpixel_values=__a,\t\t\ttraining=__a)\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t# verify the logits\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tAny = (1, 199, 768)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(outputs.last_hidden_state.shape,\t\t\t__a)\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\t\tList[Any] = tf.constant(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t [[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]])\r\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3],\t\t\t__a,\t\t\tatol=1E-4))\r"},"code_codestyle":{"kind":"number","value":36,"string":"36"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\ndef __magic_name__\t( __snake_case\t\t\t\t: list ) ->\t\t\t\tlist:\r\n\r\n if len(__snake_case ) < 2:\r\n return collection\r\n\r\n def circle_sort_util(__snake_case\t\t\t\t: list , __snake_case\t\t\t\t: int , __snake_case\t\t\t\t: int ) -> bool:\r\n lowercase :\t\t\t\tList[Any] \t\t=\t\t\t\t\t\t\tFalse\r\n\r\n if low == high:\r\n return swapped\r\n\r\n lowercase :\t\t\t\tUnion[str, Any] \t\t=\t\t\t\t\t\t\tlow\r\n lowercase :\t\t\t\tstr \t\t=\t\t\t\t\t\t\thigh\r\n\r\n while left < right:\r\n if collection[left] > collection[right]:\r\n lowercase\t\t, lowercase :\t\t\t\tOptional[Any] \t\t=\t\t\t\t\t\t\t(\r\n collection[right],\r\n collection[left],\r\n )\r\n lowercase :\t\t\t\tTuple \t\t=\t\t\t\t\t\t\tTrue\r\n\r\n left += 1\r\n right -= 1\r\n\r\n if left == right and collection[left] > collection[right + 1]:\r\n lowercase\t\t, lowercase :\t\t\t\tstr \t\t=\t\t\t\t\t\t\t(\r\n collection[right + 1],\r\n collection[left],\r\n )\r\n\r\n lowercase :\t\t\t\tUnion[str, Any] \t\t=\t\t\t\t\t\t\tTrue\r\n\r\n lowercase :\t\t\t\tAny \t\t=\t\t\t\t\t\t\tlow + int((high - low) / 2 )\r\n lowercase :\t\t\t\tTuple \t\t=\t\t\t\t\t\t\tcircle_sort_util(__snake_case , __snake_case , __snake_case )\r\n lowercase :\t\t\t\tList[Any] \t\t=\t\t\t\t\t\t\tcircle_sort_util(__snake_case , mid + 1 , __snake_case )\r\n\r\n return swapped or left_swap or right_swap\r\n\r\n lowercase :\t\t\t\tint \t\t=\t\t\t\t\t\t\tTrue\r\n\r\n while is_not_sorted is True:\r\n lowercase :\t\t\t\tint \t\t=\t\t\t\t\t\t\tcircle_sort_util(__snake_case , 0 , len(__snake_case ) - 1 )\r\n\r\n return collection\r\n\r\n\r\nif __name__ == \"__main__\":\r\n _A\t\t\t: str \t\t= input(\"\"\"Enter numbers separated by a comma:\\n\"\"\").strip()\r\n _A\t\t\t: Dict \t\t= [int(item) for item in user_input.split(\"\"\",\"\"\")]\r\n print(circle_sort(unsorted))\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":202,"string":"202"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":843,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport argparse\r\n\r\nimport torch\r\n\r\nfrom transformers import (\r\n WavaVecaConfig,\r\n WavaVecaFeatureExtractor,\r\n WavaVecaForAudioFrameClassification,\r\n WavaVecaForSequenceClassification,\r\n WavaVecaForXVector,\r\n logging,\r\n)\r\n\r\n\r\nlogging.set_verbosity_info()\r\n__UpperCAmelCase =\t\t\t\t\t\t\tlogging.get_logger(__name__)\r\n\r\n\r\ndef _snake_case (\t\t\t\t\tA\t\t\t, A\t\t\t, A\t)\t\t\t\t\t\t-> Tuple:\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = WavaVecaForSequenceClassification.from_pretrained(A\t\t\t, config=A\t)\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = downstream_dict['''projector.weight''']\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = downstream_dict['''projector.bias''']\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = downstream_dict['''model.post_net.linear.weight''']\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = downstream_dict['''model.post_net.linear.bias''']\r\n\t\t\t\t\t\t\treturn model\r\n\r\n\r\ndef _snake_case (\t\t\t\t\tA\t\t\t, A\t\t\t, A\t)\t\t\t\t\t\t-> List[str]:\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = WavaVecaForAudioFrameClassification.from_pretrained(A\t\t\t, config=A\t)\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = downstream_dict['''model.linear.weight''']\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = downstream_dict['''model.linear.bias''']\r\n\t\t\t\t\t\t\treturn model\r\n\r\n\r\ndef _snake_case (\t\t\t\t\tA\t\t\t, A\t\t\t, A\t)\t\t\t\t\t\t-> str:\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = WavaVecaForXVector.from_pretrained(A\t\t\t, config=A\t)\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = downstream_dict['''connector.weight''']\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = downstream_dict['''connector.bias''']\r\n\t\t\t\t\t\t\tfor i, kernel_size in enumerate(hf_config.tdnn_kernel\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__\t = downstream_dict[\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t F\"\"\"model.framelevel_feature_extractor.module.{i}.kernel.weight\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__\t = downstream_dict[F\"\"\"model.framelevel_feature_extractor.module.{i}.kernel.bias\"\"\"]\r\n\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = downstream_dict['''objective.W''']\r\n\t\t\t\t\t\t\treturn model\r\n\r\n\r\n@torch.no_grad()\r\ndef _snake_case (\t\t\t\t\tA\t\t\t, A\t\t\t, A\t\t\t, A\t)\t\t\t\t\t\t-> int:\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = torch.load(A\t\t\t, map_location='''cpu'''\t)\r\n\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = checkpoint['''Downstream''']\r\n\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = WavaVecaConfig.from_pretrained(A\t)\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = WavaVecaFeatureExtractor.from_pretrained(\r\n\t\t\t\t\t\t\t A\t\t\t, return_attention_mask=A\t\t\t, do_normalize=A\t)\r\n\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = hf_config.architectures[0]\r\n\t\t\t\t\t\t\tif arch.endswith('''ForSequenceClassification'''\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__\t = convert_classification(A\t\t\t, A\t\t\t, A\t)\r\n\t\t\t\t\t\t\telif arch.endswith('''ForAudioFrameClassification'''\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__\t = convert_diarization(A\t\t\t, A\t\t\t, A\t)\r\n\t\t\t\t\t\t\telif arch.endswith('''ForXVector'''\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__\t = convert_xvector(A\t\t\t, A\t\t\t, A\t)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\traise NotImplementedError(F\"\"\"S3PRL weights conversion is not supported for {arch}\"\"\"\t)\r\n\r\n\t\t\t\t\t\t\tif hf_config.use_weighted_layer_sum:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__\t = checkpoint['''Featurizer''']['''weights''']\r\n\r\n\t\t\t\t\t\t\thf_feature_extractor.save_pretrained(A\t)\r\n\t\t\t\t\t\t\thf_model.save_pretrained(A\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t__UpperCAmelCase =\t\t\t\t\t\t\targparse.ArgumentParser()\r\n\t\tparser.add_argument(\r\n\t\t '''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''\r\n\t\t)\r\n\t\tparser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')\r\n\t\tparser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')\r\n\t\tparser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')\r\n\t\t__UpperCAmelCase =\t\t\t\t\t\t\tparser.parse_args()\r\n\t\tconvert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)"},"code_codestyle":{"kind":"number","value":363,"string":"363"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef _snake_case (\t\t\t\t\tA = 10\t)\t\t\t\t\t\t-> str:\r\n\t\t\t\t\t\t\tif not isinstance(A\t\t\t, A\t) or n < 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError('''Invalid input'''\t)\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = 10**n\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = 28433 * (pow(2\t\t\t, 7830457\t\t\t, A\t)) + 1\r\n\t\t\t\t\t\t\treturn str(number % modulus\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\tfrom doctest import testmod\r\n\r\n\t\ttestmod()\r\n\t\tprint(f\"\"\"{solution(10) = }\"\"\")"},"style_context_codestyle":{"kind":"number","value":228,"string":"228"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":844,"cells":{"code":{"kind":"string","value":"\n\nfrom typing import List, Optional, Tuple\n\nfrom ...tokenization_utils_fast import PreTrainedTokenizerFast\nfrom ...utils import logging\nfrom .tokenization_herbert import HerbertTokenizer\n\n\nsnake_case_\t\t\t = logging.get_logger(__name__)\n\nsnake_case_\t\t\t = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}\n\nsnake_case_\t\t\t = {\n '''vocab_file''': {\n '''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''\n },\n '''merges_file''': {\n '''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''\n },\n}\n\nsnake_case_\t\t\t = {'''allegro/herbert-base-cased''': 514}\nsnake_case_\t\t\t = {}\n\n\nclass SCREAMING_SNAKE_CASE__\t\t\t\t\t(__snake_case ):\n __lowerCamelCase\t\t\t\t\t\t: Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t\t= VOCAB_FILES_NAMES\n __lowerCamelCase\t\t\t\t\t\t: Dict\t\t\t\t\t\t\t\t\t\t\t\t\t\t= PRETRAINED_VOCAB_FILES_MAP\n __lowerCamelCase\t\t\t\t\t\t: Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= PRETRAINED_INIT_CONFIGURATION\n __lowerCamelCase\t\t\t\t\t\t: Dict\t\t\t\t\t\t\t\t\t\t\t\t\t\t= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n __lowerCamelCase\t\t\t\t\t\t: List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= HerbertTokenizer\n\n\n\n\n\n def __init__( self\t\t\t\t\t\t, a=None\t\t\t\t\t\t, a=None\t\t\t\t\t\t, a=None\t\t\t\t\t\t, a=\"\"\t\t\t\t\t\t, a=\"\"\t\t\t\t\t\t, a=\"\"\t\t\t\t\t\t, a=\"\"\t\t\t\t\t\t, a=\"\"\t\t\t\t\t\t, **a\t\t\t\t\t\t, ):\n super().__init__(\n a\t\t\t\t\t\t, a\t\t\t\t\t\t, tokenizer_file=a\t\t\t\t\t\t, cls_token=a\t\t\t\t\t\t, unk_token=a\t\t\t\t\t\t, pad_token=a\t\t\t\t\t\t, mask_token=a\t\t\t\t\t\t, sep_token=a\t\t\t\t\t\t, **a\t\t\t\t\t\t, )\n\n\n\n\n\n def snake_case_\t\t\t\t( self\t\t\t\t\t\t, a\t\t\t\t\t\t, a = None):\n lowercase__ : Optional[int] = [self.cls_token_id]\n lowercase__ : List[str] = [self.sep_token_id]\n if token_ids_a is None:\n return cls + token_ids_a + sep\n\n return cls + token_ids_a + sep + token_ids_a + sep\n\n\n\n\n\n def snake_case_\t\t\t\t( self\t\t\t\t\t\t, a\t\t\t\t\t\t, a = None\t\t\t\t\t\t, a = False):\n if already_has_special_tokens:\n return super().get_special_tokens_mask(\n token_ids_a=a\t\t\t\t\t\t, token_ids_a=a\t\t\t\t\t\t, already_has_special_tokens=a)\n\n if token_ids_a is None:\n return [1] + ([0] * len(a)) + [1]\n return [1] + ([0] * len(a)) + [1] + ([0] * len(a)) + [1]\n\n\n\n\n\n def snake_case_\t\t\t\t( self\t\t\t\t\t\t, a\t\t\t\t\t\t, a = None):\n lowercase__ : str = [self.sep_token_id]\n lowercase__ : Optional[Any] = [self.cls_token_id]\n\n if token_ids_a is None:\n return len(cls + token_ids_a + sep) * [0]\n return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]\n\n\n\n\n\n def snake_case_\t\t\t\t( self\t\t\t\t\t\t, a\t\t\t\t\t\t, a = None):\n lowercase__ : List[Any] = self._tokenizer.model.save(a\t\t\t\t\t\t, name=a)\n return tuple(a)\n\n"},"code_codestyle":{"kind":"number","value":214,"string":"214"},"style_context":{"kind":"string","value":"\n\nimport unittest\n\nfrom transformers import DonutProcessor\n\n\nsnake_case_\t\t\t = '''naver-clova-ix/donut-base'''\n\n\nclass SCREAMING_SNAKE_CASE__\t\t\t\t\t(unittest.TestCase ):\n\n\n\n\n\n def snake_case_\t\t\t\t( self):\n lowercase__ : Dict = DonutProcessor.from_pretrained(a)\n\n\n\n\n\n def snake_case_\t\t\t\t( self):\n lowercase__ : Tuple = {\n 'name': 'John Doe',\n 'age': '99',\n 'city': 'Atlanta',\n 'state': 'GA',\n 'zip': '30301',\n 'phone': '123-4567',\n 'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],\n }\n\n lowercase__ : Tuple = (\n 'John Doe99Atlanta'\n 'GA30301123-4567'\n 'Johnny'\n 'JD'\n )\n lowercase__ : str = self.processor.tokenajson(a)\n\n self.assertDictEqual(a\t\t\t\t\t\t, a)\n\n"},"style_context_codestyle":{"kind":"number","value":214,"string":"214"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":845,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\nimport unittest\r\n\r\nfrom transformers import AutoTokenizer, MBartConfig, is_tf_available\r\nfrom transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow\r\nfrom transformers.utils import cached_property\r\n\r\nfrom ...test_configuration_common import ConfigTester\r\nfrom ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor\r\nfrom ...test_pipeline_mixin import PipelineTesterMixin\r\n\r\n\r\nif is_tf_available():\r\n import tensorflow as tf\r\n\r\n from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel\r\n\r\n\r\n@require_tf\r\nclass \t\t\t\t\ta__\t\t\t\t\t\t:\r\n _lowerCamelCase \t\t\t\t\t\t\t=\t\t\tMBartConfig\r\n _lowerCamelCase \t\t\t\t\t\t\t=\t\t\t{}\r\n _lowerCamelCase \t\t\t\t\t\t\t=\t\t\t'gelu'\r\n\r\n\r\n def __init__(\t\t\t\t\t\t\tself :\t\t\t\t\t\t\tUnion[str, Any], lowerCAmelCase :\t\t\t\t\t\t\tTuple, lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any]=13, lowerCAmelCase :\t\t\t\t\t\t\tOptional[int]=7, lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any]=True, lowerCAmelCase :\t\t\t\t\t\t\tstr=False, lowerCAmelCase :\t\t\t\t\t\t\tint=99, lowerCAmelCase :\t\t\t\t\t\t\tList[Any]=32, lowerCAmelCase :\t\t\t\t\t\t\tList[str]=2, lowerCAmelCase :\t\t\t\t\t\t\tTuple=4, lowerCAmelCase :\t\t\t\t\t\t\tList[str]=37, lowerCAmelCase :\t\t\t\t\t\t\tDict=0.1, lowerCAmelCase :\t\t\t\t\t\t\tList[str]=0.1, lowerCAmelCase :\t\t\t\t\t\t\tstr=20, lowerCAmelCase :\t\t\t\t\t\t\tAny=2, lowerCAmelCase :\t\t\t\t\t\t\tList[Any]=1, lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any]=0, ) -> Optional[Any]:\r\n lowercase\t\t\t\t\t: Any\t\t\t\t\t=\t\t\t\tparent\r\n lowercase\t\t\t\t\t: Dict\t\t\t\t\t=\t\t\t\tbatch_size\r\n lowercase\t\t\t\t\t: List[str]\t\t\t\t\t=\t\t\t\tseq_length\r\n lowercase\t\t\t\t\t: str\t\t\t\t\t=\t\t\t\tis_training\r\n lowercase\t\t\t\t\t: Union[str, Any]\t\t\t\t\t=\t\t\t\tuse_labels\r\n lowercase\t\t\t\t\t: str\t\t\t\t\t=\t\t\t\tvocab_size\r\n lowercase\t\t\t\t\t: List[Any]\t\t\t\t\t=\t\t\t\thidden_size\r\n lowercase\t\t\t\t\t: Optional[Any]\t\t\t\t\t=\t\t\t\tnum_hidden_layers\r\n lowercase\t\t\t\t\t: Optional[int]\t\t\t\t\t=\t\t\t\tnum_attention_heads\r\n lowercase\t\t\t\t\t: Dict\t\t\t\t\t=\t\t\t\tintermediate_size\r\n lowercase\t\t\t\t\t: int\t\t\t\t\t=\t\t\t\thidden_dropout_prob\r\n lowercase\t\t\t\t\t: Tuple\t\t\t\t\t=\t\t\t\tattention_probs_dropout_prob\r\n lowercase\t\t\t\t\t: Tuple\t\t\t\t\t=\t\t\t\tmax_position_embeddings\r\n lowercase\t\t\t\t\t: Tuple\t\t\t\t\t=\t\t\t\teos_token_id\r\n lowercase\t\t\t\t\t: Any\t\t\t\t\t=\t\t\t\tpad_token_id\r\n lowercase\t\t\t\t\t: int\t\t\t\t\t=\t\t\t\tbos_token_id\r\n\r\n\r\n def lowercase\t\t\t(\t\t\t\t\t\t\tself :\t\t\t\t\t\t\tOptional[Any] ) -> Optional[int]:\r\n lowercase\t\t\t\t\t: Any\t\t\t\t\t=\t\t\t\tids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )\r\n lowercase\t\t\t\t\t: List[str]\t\t\t\t\t=\t\t\t\ttf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )\r\n lowercase\t\t\t\t\t: Tuple\t\t\t\t\t=\t\t\t\ttf.concat([input_ids, eos_tensor], axis=1 )\r\n\r\n lowercase\t\t\t\t\t: Dict\t\t\t\t\t=\t\t\t\tids_tensor([self.batch_size, self.seq_length], self.vocab_size )\r\n\r\n lowercase\t\t\t\t\t: List[Any]\t\t\t\t\t=\t\t\t\tself.config_cls(\r\n vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )\r\n lowercase\t\t\t\t\t: Union[str, Any]\t\t\t\t\t=\t\t\t\tprepare_mbart_inputs_dict(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )\r\n return config, inputs_dict\r\n\r\n\r\n\r\n\r\n\r\n\r\n def lowercase\t\t\t(\t\t\t\t\t\t\tself :\t\t\t\t\t\t\tstr, lowerCAmelCase :\t\t\t\t\t\t\tAny, lowerCAmelCase :\t\t\t\t\t\t\tTuple ) -> List[Any]:\r\n lowercase\t\t\t\t\t: List[str]\t\t\t\t\t=\t\t\t\tTFMBartModel(config=lowerCAmelCase ).get_decoder()\r\n lowercase\t\t\t\t\t: int\t\t\t\t\t=\t\t\t\tinputs_dict['input_ids']\r\n\r\n lowercase\t\t\t\t\t: Any\t\t\t\t\t=\t\t\t\tinput_ids[:1, :]\r\n lowercase\t\t\t\t\t: Any\t\t\t\t\t=\t\t\t\tinputs_dict['attention_mask'][:1, :]\r\n lowercase\t\t\t\t\t: List[str]\t\t\t\t\t=\t\t\t\tinputs_dict['head_mask']\r\n lowercase\t\t\t\t\t: Tuple\t\t\t\t\t=\t\t\t\t1\r\n\r\n # first forward pass\r\n lowercase\t\t\t\t\t: Optional[Any]\t\t\t\t\t=\t\t\t\tmodel(lowerCAmelCase, attention_mask=lowerCAmelCase, head_mask=lowerCAmelCase, use_cache=lowerCAmelCase )\r\n\r\n lowercase\t\t, lowercase\t\t\t\t\t: Tuple\t\t\t\t\t=\t\t\t\toutputs.to_tuple()\r\n lowercase\t\t\t\t\t: Dict\t\t\t\t\t=\t\t\t\tpast_key_values[1]\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\tlowercase__ ( _UpperCAmelCase\t\t, _UpperCAmelCase\t\t, _UpperCAmelCase\t\t, _UpperCAmelCase=None\t\t, _UpperCAmelCase=None\t\t, _UpperCAmelCase=None\t\t, _UpperCAmelCase=None\t\t, _UpperCAmelCase=None\t\t, ) -> Union[str, Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n if attention_mask is None:\r\n lowercase\t\t\t\t\t: List[str]\t\t\t\t\t=\t\t\t\ttf.cast(tf.math.not_equal(_UpperCAmelCase\t\t, config.pad_token_id\t\t\t)\t\t, tf.inta\t\t\t)\r\n if decoder_attention_mask is None:\r\n lowercase\t\t\t\t\t: int\t\t\t\t\t=\t\t\t\ttf.concat(\r\n [\r\n tf.ones(decoder_input_ids[:, :1].shape\t\t, dtype=tf.inta\t\t\t),\r\n tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:]\t\t, config.pad_token_id\t\t\t)\t\t, tf.inta\t\t\t),\r\n ]\t\t, axis=-1\t\t, )\r\n if head_mask is None:\r\n lowercase\t\t\t\t\t: Optional[int]\t\t\t\t\t=\t\t\t\ttf.ones((config.encoder_layers, config.encoder_attention_heads)\t\t\t)\r\n if decoder_head_mask is None:\r\n lowercase\t\t\t\t\t: str\t\t\t\t\t=\t\t\t\ttf.ones((config.decoder_layers, config.decoder_attention_heads)\t\t\t)\r\n if cross_attn_head_mask is None:\r\n lowercase\t\t\t\t\t: Optional[Any]\t\t\t\t\t=\t\t\t\ttf.ones((config.decoder_layers, config.decoder_attention_heads)\t\t\t)\r\n return {\r\n \"input_ids\": input_ids,\r\n \"decoder_input_ids\": decoder_input_ids,\r\n \"attention_mask\": attention_mask,\r\n \"decoder_attention_mask\": decoder_attention_mask,\r\n \"head_mask\": head_mask,\r\n \"decoder_head_mask\": decoder_head_mask,\r\n \"cross_attn_head_mask\": cross_attn_head_mask,\r\n }\r\n\r\n\r\n@require_tf\r\nclass \t\t\t\t\ta__\t\t\t\t\t\t( SCREAMING_SNAKE_CASE__,\t\t\t\t\tSCREAMING_SNAKE_CASE__,\t\t\t\t\tunittest.TestCase ):\r\n _lowerCamelCase \t\t\t\t\t\t\t=\t\t\t(TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()\r\n _lowerCamelCase \t\t\t\t\t\t\t=\t\t\t(TFMBartForConditionalGeneration,) if is_tf_available() else ()\r\n _lowerCamelCase \t\t\t\t\t\t\t=\t\t\t(\r\n {\r\n 'conversational': TFMBartForConditionalGeneration,\r\n 'feature-extraction': TFMBartModel,\r\n 'summarization': TFMBartForConditionalGeneration,\r\n 'text2text-generation': TFMBartForConditionalGeneration,\r\n 'translation': TFMBartForConditionalGeneration,\r\n }\r\n if is_tf_available()\r\n else {}\r\n )\r\n _lowerCamelCase \t\t\t\t\t\t\t=\t\t\tTrue\r\n _lowerCamelCase \t\t\t\t\t\t\t=\t\t\tFalse\r\n _lowerCamelCase \t\t\t\t\t\t\t=\t\t\tFalse\r\n\r\n\r\n def lowercase\t\t\t(\t\t\t\t\t\t\tself :\t\t\t\t\t\t\tDict, lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any], lowerCAmelCase :\t\t\t\t\t\t\tUnion[str, Any], lowerCAmelCase :\t\t\t\t\t\t\tOptional[int], lowerCAmelCase :\t\t\t\t\t\t\tint, lowerCAmelCase :\t\t\t\t\t\t\tDict ) -> List[str]:\r\n if pipeline_test_casse_name != \"FeatureExtractionPipelineTests\":\r\n # Exception encountered when calling layer '...'\r\n return True\r\n\r\n return False\r\n\r\n\r\n def lowercase\t\t\t(\t\t\t\t\t\t\tself :\t\t\t\t\t\t\tUnion[str, Any] ) -> int:\r\n lowercase\t\t\t\t\t: Any\t\t\t\t\t=\t\t\t\tTFMBartModelTester(self )\r\n lowercase\t\t\t\t\t: Optional[int]\t\t\t\t\t=\t\t\t\tConfigTester(self, config_class=lowerCAmelCase )\r\n\r\n\r\n def lowercase\t\t\t(\t\t\t\t\t\t\tself :\t\t\t\t\t\t\tList[Any] ) -> Union[str, Any]:\r\n self.config_tester.run_common_tests()\r\n\r\n\r\n\r\n\r\n\r\n\r\n def lowercase\t\t\t(\t\t\t\t\t\t\tself :\t\t\t\t\t\t\tstr ) -> Dict:\r\n lowercase\t\t\t\t\t: int\t\t\t\t\t=\t\t\t\tself.model_tester.prepare_config_and_inputs_for_common()\r\n self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase )\r\n\r\n\r\n\r\n\r\n@require_sentencepiece\r\n@require_tokenizers\r\n@require_tf\r\nclass \t\t\t\t\ta__\t\t\t\t\t\t( unittest.TestCase ):\r\n _lowerCamelCase \t\t\t\t\t\t\t=\t\t\t[\r\n ' UN Chief Says There Is No Military Solution in Syria',\r\n ]\r\n _lowerCamelCase \t\t\t\t\t\t\t=\t\t\t[\r\n 'Şeful ONU declară că nu există o soluţie militară în Siria',\r\n ]\r\n _lowerCamelCase \t\t\t\t\t\t\t=\t\t\t'facebook/mbart-large-en-ro'\r\n\r\n\r\n @cached_property\r\n def lowercase\t\t\t(\t\t\t\t\t\t\tself :\t\t\t\t\t\t\tList[str] ) -> Optional[int]:\r\n return AutoTokenizer.from_pretrained(self.model_name )\r\n\r\n\r\n @cached_property\r\n def lowercase\t\t\t(\t\t\t\t\t\t\tself :\t\t\t\t\t\t\tList[Any] ) -> Union[str, Any]:\r\n lowercase\t\t\t\t\t: Optional[Any]\t\t\t\t\t=\t\t\t\tTFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )\r\n return model\r\n\r\n\r\n def lowercase\t\t\t(\t\t\t\t\t\t\tself :\t\t\t\t\t\t\tint, **lowerCAmelCase :\t\t\t\t\t\t\tTuple ) -> Tuple:\r\n lowercase\t\t\t\t\t: Optional[Any]\t\t\t\t\t=\t\t\t\tself.translate_src_text(**lowerCAmelCase )\r\n self.assertListEqual(self.expected_text, lowerCAmelCase )\r\n\r\n\r\n def lowercase\t\t\t(\t\t\t\t\t\t\tself :\t\t\t\t\t\t\tDict, **lowerCAmelCase :\t\t\t\t\t\t\tAny ) -> List[Any]:\r\n lowercase\t\t\t\t\t: Optional[Any]\t\t\t\t\t=\t\t\t\tself.tokenizer(self.src_text, **lowerCAmelCase, return_tensors='tf' )\r\n lowercase\t\t\t\t\t: Any\t\t\t\t\t=\t\t\t\tself.model.generate(\r\n model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2 )\r\n lowercase\t\t\t\t\t: str\t\t\t\t\t=\t\t\t\tself.tokenizer.batch_decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase )\r\n return generated_words\r\n\r\n\r\n\r\n\r\n\r\n\r\n @slow\r\n def lowercase\t\t\t(\t\t\t\t\t\t\tself :\t\t\t\t\t\t\tTuple ) -> Optional[int]:\r\n self._assert_generated_batch_equal_expected()\r\n"},"code_codestyle":{"kind":"number","value":53,"string":"53"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n# coding=utf-8\r\n# Copyright 2020 The HuggingFace Inc. team.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n# this script dumps information about the environment\r\n\r\nimport os\r\nimport sys\r\n\r\nimport transformers\r\n\r\n\r\n_UpperCamelCase:\t\t\tAny \t\t\t\t\t\t\t=\t\t\t\t\t'3'\r\n\r\nprint('Python version:', sys.version)\r\nprint('transformers version:', transformers.__version__)\r\n\r\ntry:\r\n import torch\r\n\r\n print('Torch version:', torch.__version__)\r\n print('Cuda available:', torch.cuda.is_available())\r\n print('Cuda version:', torch.version.cuda)\r\n print('CuDNN version:', torch.backends.cudnn.version())\r\n print('Number of GPUs available:', torch.cuda.device_count())\r\n print('NCCL version:', torch.cuda.nccl.version())\r\nexcept ImportError:\r\n print('Torch version:', None)\r\n\r\ntry:\r\n import deepspeed\r\n\r\n print('DeepSpeed version:', deepspeed.__version__)\r\nexcept ImportError:\r\n print('DeepSpeed version:', None)\r\n\r\ntry:\r\n import tensorflow as tf\r\n\r\n print('TensorFlow version:', tf.__version__)\r\n print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))\r\n print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))\r\nexcept ImportError:\r\n print('TensorFlow version:', None)\r\n"},"style_context_codestyle":{"kind":"number","value":53,"string":"53"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":846,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\nimport argparse\r\nimport json\r\nimport os\r\n\r\nimport evaluate\r\nimport torch\r\nfrom datasets import load_dataset\r\nfrom torch.optim import AdamW\r\nfrom torch.utils.data import DataLoader\r\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\r\n\r\nfrom accelerate import Accelerator, DistributedType\r\nfrom accelerate.utils.deepspeed import DummyOptim, DummyScheduler\r\n\r\n\r\na__: Union[str, Any]\t\t\t\t\t\t\t\t\t=\t\t16\r\na__: Union[str, Any]\t\t\t\t\t\t\t\t\t=\t\t32\r\n\r\n\r\ndef \tUpperCamelCase__( UpperCamelCase__\t\t\t: Accelerator , UpperCamelCase__\t\t\t: int = 16 , UpperCamelCase__\t\t\t: str = \"bert-base-cased\" )->Tuple:\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tAutoTokenizer.from_pretrained(UpperCamelCase__ )\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tload_dataset('''glue''' , '''mrpc''' )\r\n\r\n def tokenize_function(UpperCamelCase__\t\t\t: Union[str, Any] ):\r\n # max_length=None => use the model max length (it's actually the default)\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\ttokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )\r\n return outputs\r\n\r\n # Apply the method we just defined to all the examples in all the splits of the dataset\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tdatasets.map(\r\n UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=UpperCamelCase__ )\r\n\r\n # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\r\n # transformers library\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\ttokenized_datasets.rename_column('''label''' , '''labels''' )\r\n\r\n def collate_fn(UpperCamelCase__\t\t\t: Dict ):\r\n # On TPU it's best to pad everything to the same length or training will be very slow.\r\n if accelerator.distributed_type == DistributedType.TPU:\r\n return tokenizer.pad(UpperCamelCase__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )\r\n return tokenizer.pad(UpperCamelCase__ , padding='''longest''' , return_tensors='''pt''' )\r\n\r\n # Instantiate dataloaders.\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tDataLoader(\r\n tokenized_datasets['''train'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tDataLoader(\r\n tokenized_datasets['''validation'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )\r\n\r\n return train_dataloader, eval_dataloader\r\n\r\n\r\ndef \tUpperCamelCase__( UpperCamelCase__\t\t\t: List[str] , UpperCamelCase__\t\t\t: int )->Dict:\r\n # Initialize accelerator\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tAccelerator()\r\n\r\n # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tconfig['''lr''']\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tint(config['''num_epochs'''] )\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tint(config['''seed'''] )\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tint(config['''batch_size'''] )\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\targs.model_name_or_path\r\n\r\n set_seed(UpperCamelCase__ )\r\n A__\t\t\t\t\t,\tA__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tget_dataloaders(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )\r\n\r\n # Instantiate the model (we build the model here so that the seed also control new weights initialization)\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tAutoModelForSequenceClassification.from_pretrained(UpperCamelCase__ , return_dict=UpperCamelCase__ )\r\n\r\n # Instantiate optimizer\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t(\r\n AdamW\r\n if accelerator.state.deepspeed_plugin is None\r\n or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config\r\n else DummyOptim\r\n )\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\toptimizer_cls(params=model.parameters() , lr=UpperCamelCase__ )\r\n\r\n if accelerator.state.deepspeed_plugin is not None:\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\taccelerator.state.deepspeed_plugin.deepspeed_config[\r\n '''gradient_accumulation_steps'''\r\n ]\r\n else:\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t1\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps\r\n\r\n # Instantiate scheduler\r\n if (\r\n accelerator.state.deepspeed_plugin is None\r\n or \"scheduler\" not in accelerator.state.deepspeed_plugin.deepspeed_config\r\n ):\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tget_linear_schedule_with_warmup(\r\n optimizer=UpperCamelCase__ , num_warmup_steps=0 , num_training_steps=UpperCamelCase__ , )\r\n else:\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tDummyScheduler(UpperCamelCase__ , total_num_steps=UpperCamelCase__ , warmup_num_steps=0 )\r\n\r\n # Prepare everything\r\n # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\r\n # prepare method.\r\n A__\t\t\t\t\t,\tA__\t\t\t\t\t,\tA__\t\t\t\t\t,\tA__\t\t\t\t\t,\tA__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\taccelerator.prepare(\r\n UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )\r\n\r\n # We need to keep track of how many total steps we have iterated over\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t0\r\n # We also need to keep track of the stating epoch so files are named properly\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t0\r\n\r\n # Now we train the model\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tevaluate.load('''glue''' , '''mrpc''' )\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t0\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t{}\r\n for epoch in range(UpperCamelCase__ , UpperCamelCase__ ):\r\n model.train()\r\n for step, batch in enumerate(UpperCamelCase__ ):\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tmodel(**UpperCamelCase__ )\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\toutputs.loss\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tloss / gradient_accumulation_steps\r\n accelerator.backward(UpperCamelCase__ )\r\n if step % gradient_accumulation_steps == 0:\r\n optimizer.step()\r\n lr_scheduler.step()\r\n optimizer.zero_grad()\r\n\r\n overall_step += 1\r\n\r\n model.eval()\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t0\r\n for step, batch in enumerate(UpperCamelCase__ ):\r\n # We could avoid this line since we set the accelerator with `device_placement=True`.\r\n batch.to(accelerator.device )\r\n with torch.no_grad():\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tmodel(**UpperCamelCase__ )\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\toutputs.logits.argmax(dim=-1 )\r\n # It is slightly faster to call this once, than multiple times\r\n A__\t\t\t\t\t,\tA__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\taccelerator.gather(\r\n (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates\r\n if accelerator.use_distributed:\r\n if step == len(UpperCamelCase__ ) - 1:\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tpredictions[: len(eval_dataloader.dataset ) - samples_seen]\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\treferences[: len(eval_dataloader.dataset ) - samples_seen]\r\n else:\r\n samples_seen += references.shape[0]\r\n metric.add_batch(\r\n predictions=UpperCamelCase__ , references=UpperCamelCase__ , )\r\n\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tmetric.compute()\r\n # Use accelerator.print to print only on the main process.\r\n accelerator.print(f\"epoch {epoch}:\" , UpperCamelCase__ )\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\teval_metric['''accuracy''']\r\n\r\n if best_performance < eval_metric[\"accuracy\"]:\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\teval_metric['''accuracy''']\r\n\r\n if args.performance_lower_bound is not None:\r\n assert (\r\n args.performance_lower_bound <= best_performance\r\n ), f\"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}\"\r\n\r\n accelerator.wait_for_everyone()\r\n if accelerator.is_main_process:\r\n with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:\r\n json.dump(UpperCamelCase__ , UpperCamelCase__ )\r\n\r\n\r\ndef \tUpperCamelCase__( )->Optional[int]:\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\targparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )\r\n parser.add_argument(\r\n '''--model_name_or_path''' , type=UpperCamelCase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=UpperCamelCase__ , )\r\n parser.add_argument(\r\n '''--output_dir''' , type=UpperCamelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )\r\n parser.add_argument(\r\n '''--performance_lower_bound''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )\r\n parser.add_argument(\r\n '''--num_epochs''' , type=UpperCamelCase__ , default=3 , help='''Number of train epochs.''' , )\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tparser.parse_args()\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t{'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}\r\n training_function(UpperCamelCase__ , UpperCamelCase__ )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":193,"string":"193"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\nimport os\r\nimport re\r\nfrom shutil import copyfile\r\nfrom typing import Any, Dict, List, Optional, Tuple\r\n\r\nimport sentencepiece as spm\r\n\r\nfrom ...tokenization_utils import AddedToken, PreTrainedTokenizer\r\nfrom ...utils import logging\r\n\r\n\r\na__: Union[str, Any]\t\t\t\t\t\t\t\t\t=\t\tlogging.get_logger(__name__)\r\n\r\na__: Union[str, Any]\t\t\t\t\t\t\t\t\t=\t\t{'vocab_file': 'spiece.model'}\r\n\r\na__: Tuple\t\t\t\t\t\t\t\t\t=\t\t{\r\n 'vocab_file': {\r\n 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',\r\n 'google/bigbird-roberta-large': (\r\n 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'\r\n ),\r\n 'google/bigbird-base-trivia-itc': (\r\n 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'\r\n ),\r\n }\r\n}\r\n\r\na__: Any\t\t\t\t\t\t\t\t\t=\t\t{\r\n 'google/bigbird-roberta-base': 4_096,\r\n 'google/bigbird-roberta-large': 4_096,\r\n 'google/bigbird-base-trivia-itc': 4_096,\r\n}\r\n\r\n\r\n\r\n\r\nclass SCREAMING_SNAKE_CASE__ (\t\t\t\t\t\tUpperCamelCase__ ):\r\n __SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t =\tVOCAB_FILES_NAMES\r\n __SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t =\tPRETRAINED_VOCAB_FILES_MAP\r\n __SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t =\tPRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\r\n __SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t =\t['''input_ids''', '''attention_mask''']\r\n __SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t =\t[]\r\n\r\n\r\n\r\n def __init__(\t\t\t\t\t\t\tself,__lowerCamelCase,__lowerCamelCase=\"\",__lowerCamelCase=\"\",__lowerCamelCase=\"\",__lowerCamelCase=\"\",__lowerCamelCase=\"[SEP]\",__lowerCamelCase=\"[MASK]\",__lowerCamelCase=\"[CLS]\",__lowerCamelCase = None,**__lowerCamelCase,):\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tAddedToken(__lowerCamelCase,lstrip=__lowerCamelCase,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else bos_token\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tAddedToken(__lowerCamelCase,lstrip=__lowerCamelCase,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else eos_token\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tAddedToken(__lowerCamelCase,lstrip=__lowerCamelCase,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else unk_token\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tAddedToken(__lowerCamelCase,lstrip=__lowerCamelCase,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else pad_token\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tAddedToken(__lowerCamelCase,lstrip=__lowerCamelCase,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else cls_token\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tAddedToken(__lowerCamelCase,lstrip=__lowerCamelCase,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else sep_token\r\n\r\n # Mask token behave like a normal word, i.e. include the space before it\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tAddedToken(__lowerCamelCase,lstrip=__lowerCamelCase,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else mask_token\r\n\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t{} if sp_model_kwargs is None else sp_model_kwargs\r\n\r\n super().__init__(\r\n bos_token=__lowerCamelCase,eos_token=__lowerCamelCase,unk_token=__lowerCamelCase,pad_token=__lowerCamelCase,sep_token=__lowerCamelCase,mask_token=__lowerCamelCase,cls_token=__lowerCamelCase,sp_model_kwargs=self.sp_model_kwargs,**__lowerCamelCase,)\r\n\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tvocab_file\r\n\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tspm.SentencePieceProcessor(**self.sp_model_kwargs )\r\n self.sp_model.Load(__lowerCamelCase )\r\n\r\n\r\n\r\n @property\r\n def \t\t\t\tUpperCamelCase\t\t\t(\t\t\t\t\t\t\tself ):\r\n return self.sp_model.get_piece_size()\r\n\r\n\r\n\r\n def \t\t\t\tUpperCamelCase\t\t\t(\t\t\t\t\t\t\tself ):\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t{self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}\r\n vocab.update(self.added_tokens_encoder )\r\n return vocab\r\n\r\n\r\n\r\n def __getstate__(\t\t\t\t\t\t\tself ):\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tself.__dict__.copy()\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tNone\r\n return state\r\n\r\n\r\n\r\n def __setstate__(\t\t\t\t\t\t\tself,__lowerCamelCase ):\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\td\r\n\r\n # for backward compatibility\r\n if not hasattr(self,'''sp_model_kwargs''' ):\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t{}\r\n\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tspm.SentencePieceProcessor(**self.sp_model_kwargs )\r\n self.sp_model.Load(self.vocab_file )\r\n\r\n\r\n\r\n def \t\t\t\tUpperCamelCase\t\t\t(\t\t\t\t\t\t\tself,__lowerCamelCase ):\r\n return self.sp_model.encode(__lowerCamelCase,out_type=__lowerCamelCase )\r\n\r\n\r\n\r\n def \t\t\t\tUpperCamelCase\t\t\t(\t\t\t\t\t\t\tself,__lowerCamelCase ):\r\n return self.sp_model.piece_to_id(__lowerCamelCase )\r\n\r\n\r\n\r\n def \t\t\t\tUpperCamelCase\t\t\t(\t\t\t\t\t\t\tself,__lowerCamelCase ):\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tself.sp_model.IdToPiece(__lowerCamelCase )\r\n return token\r\n\r\n\r\n\r\n def \t\t\t\tUpperCamelCase\t\t\t(\t\t\t\t\t\t\tself,__lowerCamelCase ):\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t[]\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t''''''\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tFalse\r\n for token in tokens:\r\n # make sure that special tokens are not decoded using sentencepiece model\r\n if token in self.all_special_tokens:\r\n if not prev_is_special:\r\n out_string += \" \"\r\n out_string += self.sp_model.decode(__lowerCamelCase ) + token\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tTrue\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t[]\r\n else:\r\n current_sub_tokens.append(__lowerCamelCase )\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tFalse\r\n out_string += self.sp_model.decode(__lowerCamelCase )\r\n return out_string.strip()\r\n\r\n\r\n\r\n def \t\t\t\tUpperCamelCase\t\t\t(\t\t\t\t\t\t\tself,__lowerCamelCase,__lowerCamelCase = False,__lowerCamelCase = None,__lowerCamelCase = True,**__lowerCamelCase,):\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tkwargs.pop('''use_source_tokenizer''',__lowerCamelCase )\r\n\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tself.convert_ids_to_tokens(__lowerCamelCase,skip_special_tokens=__lowerCamelCase )\r\n\r\n # To avoid mixing byte-level and unicode for byte-level BPT\r\n # we need to build string separately for added tokens and byte-level tokens\r\n # cf. https://github.com/huggingface/transformers/issues/1133\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t[]\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t[]\r\n for token in filtered_tokens:\r\n if skip_special_tokens and token in self.all_special_ids:\r\n continue\r\n if token in self.added_tokens_encoder:\r\n if current_sub_text:\r\n sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) )\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t[]\r\n sub_texts.append(__lowerCamelCase )\r\n else:\r\n current_sub_text.append(__lowerCamelCase )\r\n if current_sub_text:\r\n sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) )\r\n\r\n # Mimic the behavior of the Rust tokenizer:\r\n # No space before [MASK] and [SEP]\r\n if spaces_between_special_tokens:\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tre.sub(r''' (\\[(MASK|SEP)\\])''',r'''\\1''',''' '''.join(__lowerCamelCase ) )\r\n else:\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t''''''.join(__lowerCamelCase )\r\n\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t(\r\n clean_up_tokenization_spaces\r\n if clean_up_tokenization_spaces is not None\r\n else self.clean_up_tokenization_spaces\r\n )\r\n if clean_up_tokenization_spaces:\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tself.clean_up_tokenization(__lowerCamelCase )\r\n return clean_text\r\n else:\r\n return text\r\n\r\n\r\n\r\n def \t\t\t\tUpperCamelCase\t\t\t(\t\t\t\t\t\t\tself,__lowerCamelCase,__lowerCamelCase = None ):\r\n if not os.path.isdir(__lowerCamelCase ):\r\n logger.error(f\"Vocabulary path ({save_directory}) should be a directory\" )\r\n return\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tos.path.join(\r\n __lowerCamelCase,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )\r\n\r\n if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):\r\n copyfile(self.vocab_file,__lowerCamelCase )\r\n elif not os.path.isfile(self.vocab_file ):\r\n with open(__lowerCamelCase,'''wb''' ) as fi:\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\tself.sp_model.serialized_model_proto()\r\n fi.write(__lowerCamelCase )\r\n\r\n return (out_vocab_file,)\r\n\r\n\r\n\r\n def \t\t\t\tUpperCamelCase\t\t\t(\t\t\t\t\t\t\tself,__lowerCamelCase,__lowerCamelCase = None ):\r\n if token_ids_a is None:\r\n return [self.cls_token_id] + token_ids_a + [self.sep_token_id]\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t[self.cls_token_id]\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t[self.sep_token_id]\r\n return cls + token_ids_a + sep + token_ids_a + sep\r\n\r\n\r\n\r\n def \t\t\t\tUpperCamelCase\t\t\t(\t\t\t\t\t\t\tself,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = False ):\r\n if already_has_special_tokens:\r\n return super().get_special_tokens_mask(\r\n token_ids_a=__lowerCamelCase,token_ids_a=__lowerCamelCase,already_has_special_tokens=__lowerCamelCase )\r\n\r\n if token_ids_a is None:\r\n return [1] + ([0] * len(__lowerCamelCase )) + [1]\r\n return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]\r\n\r\n\r\n\r\n\r\n def \t\t\t\tUpperCamelCase\t\t\t(\t\t\t\t\t\t\tself,__lowerCamelCase,__lowerCamelCase = None ):\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t[self.sep_token_id]\r\n A__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t[self.cls_token_id]\r\n if token_ids_a is None:\r\n return len(cls + token_ids_a + sep ) * [0]\r\n return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":193,"string":"193"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":847,"cells":{"code":{"kind":"string","value":"\n\"\"\"simple docstring\"\"\"\n\n\n\n\nimport argparse\n\nfrom transformers import CLIPImageProcessor, CLIPVisionModelWithProjection\n\nfrom diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline\n\n\nif __name__ == \"__main__\":\n\t\t\t\t__snake_case\t\t\t\t\t\t\t = argparse.ArgumentParser()\n\n\t\t\t\tparser.add_argument(\"\"\"--dump_path\"\"\", default=None, type=str, required=True, help=\"\"\"Path to the output model.\"\"\")\n\n\t\t\t\tparser.add_argument(\n\t\t\t\t \"\"\"--txt2img_unclip\"\"\",\n\t\t\t\t default=\"\"\"kakaobrain/karlo-v1-alpha\"\"\",\n\t\t\t\t type=str,\n\t\t\t\t required=False,\n\t\t\t\t help=\"\"\"The pretrained txt2img unclip.\"\"\",\n\t\t\t\t)\n\n\t\t\t\t__snake_case\t\t\t\t\t\t\t = parser.parse_args()\n\n\t\t\t\t__snake_case\t\t\t\t\t\t\t = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)\n\n\t\t\t\t__snake_case\t\t\t\t\t\t\t = CLIPImageProcessor()\n\t\t\t\t__snake_case\t\t\t\t\t\t\t = CLIPVisionModelWithProjection.from_pretrained(\"\"\"openai/clip-vit-large-patch14\"\"\")\n\n\t\t\t\t__snake_case\t\t\t\t\t\t\t = UnCLIPImageVariationPipeline(\n\t\t\t\t decoder=txtaimg.decoder,\n\t\t\t\t text_encoder=txtaimg.text_encoder,\n\t\t\t\t tokenizer=txtaimg.tokenizer,\n\t\t\t\t text_proj=txtaimg.text_proj,\n\t\t\t\t feature_extractor=feature_extractor,\n\t\t\t\t image_encoder=image_encoder,\n\t\t\t\t super_res_first=txtaimg.super_res_first,\n\t\t\t\t super_res_last=txtaimg.super_res_last,\n\t\t\t\t decoder_scheduler=txtaimg.decoder_scheduler,\n\t\t\t\t super_res_scheduler=txtaimg.super_res_scheduler,\n\t\t\t\t)\n\n\t\t\t\timgaimg.save_pretrained(args.dump_path)\n\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":371,"string":"371"},"style_context":{"kind":"string","value":"\nfrom typing import TYPE_CHECKING\n\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available\n\n\n__snake_case\t\t\t\t\t\t\t = {\n \"\"\"configuration_nezha\"\"\": [\"\"\"NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP\"\"\", \"\"\"NezhaConfig\"\"\"],\n}\n\ntry:\n\t\t\t\tif not is_torch_available():\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n\t\t\t\tpass\nelse:\n\t\t\t\t__snake_case\t\t\t\t\t\t\t = [\n\t\t\t\t \"\"\"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST\"\"\",\n\t\t\t\t \"\"\"NezhaForNextSentencePrediction\"\"\",\n\t\t\t\t \"\"\"NezhaForMaskedLM\"\"\",\n\t\t\t\t \"\"\"NezhaForPreTraining\"\"\",\n\t\t\t\t \"\"\"NezhaForMultipleChoice\"\"\",\n\t\t\t\t \"\"\"NezhaForQuestionAnswering\"\"\",\n\t\t\t\t \"\"\"NezhaForSequenceClassification\"\"\",\n\t\t\t\t \"\"\"NezhaForTokenClassification\"\"\",\n\t\t\t\t \"\"\"NezhaModel\"\"\",\n\t\t\t\t \"\"\"NezhaPreTrainedModel\"\"\",\n\t\t\t\t]\n\n\nif TYPE_CHECKING:\n\t\t\t\tfrom .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig\n\n\t\t\t\ttry:\n\t\t\t\t\t\t\t\tif not is_torch_available():\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\n\t\t\t\texcept OptionalDependencyNotAvailable:\n\t\t\t\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\t\t\t\tfrom .modeling_nezha import (\n\t\t\t\t\t\t\t\t NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,\n\t\t\t\t\t\t\t\t NezhaForMaskedLM,\n\t\t\t\t\t\t\t\t NezhaForMultipleChoice,\n\t\t\t\t\t\t\t\t NezhaForNextSentencePrediction,\n\t\t\t\t\t\t\t\t NezhaForPreTraining,\n\t\t\t\t\t\t\t\t NezhaForQuestionAnswering,\n\t\t\t\t\t\t\t\t NezhaForSequenceClassification,\n\t\t\t\t\t\t\t\t NezhaForTokenClassification,\n\t\t\t\t\t\t\t\t NezhaModel,\n\t\t\t\t\t\t\t\t NezhaPreTrainedModel,\n\t\t\t\t\t\t\t\t)\n\n\nelse:\n\t\t\t\timport sys\n\n\t\t\t\t__snake_case\t\t\t\t\t\t\t = _LazyModule(__name__, globals()[\"\"\"__file__\"\"\"], _import_structure, module_spec=__spec__)\n\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":169,"string":"169"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":848,"cells":{"code":{"kind":"string","value":"\n\nfrom __future__ import annotations\n\nfrom fractions import Fraction\n\n\n\n\ndef \t\t\tUpperCAmelCase__\t\t\t\t(\t\t\t_A : int\t, _A : int\t\t\t\t\t\t\t):\n\n\n\n\n\n '''simple docstring'''\n\n\n\n\n\n\n\n return (\n num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den\n )\n\n\n\n\ndef \t\t\tUpperCAmelCase__\t\t\t\t(\t\t\t_A : int\t\t\t\t\t\t\t):\n\n\n\n\n\n '''simple docstring'''\n\n\n\n\n\n\n\n a__ =[]\n a__ =11\n a__ =int('''1''' + '''0''' * digit_len\t\t\t\t\t\t\t)\n for num in range(lowerCAmelCase_\t, lowerCAmelCase_\t\t\t\t\t\t\t):\n while den <= 99:\n if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):\n if is_digit_cancelling(lowerCAmelCase_\t, lowerCAmelCase_\t\t\t\t\t\t\t):\n solutions.append(F\"\"\"{num}/{den}\"\"\"\t\t\t\t\t\t\t)\n den += 1\n num += 1\n a__ =10\n return solutions\n\n\n\n\ndef \t\t\tUpperCAmelCase__\t\t\t\t(\t\t\t_A : int = 2\t\t\t\t\t\t\t):\n\n\n\n\n\n '''simple docstring'''\n\n\n\n\n\n\n\n a__ =1.0\n for fraction in fraction_list(lowerCAmelCase_\t\t\t\t\t\t\t):\n a__ =Fraction(lowerCAmelCase_\t\t\t\t\t\t\t)\n result *= frac.denominator / frac.numerator\n return int(lowerCAmelCase_\t\t\t\t\t\t\t)\n\n\nif __name__ == \"__main__\":\n print(solution())\n"},"code_codestyle":{"kind":"number","value":188,"string":"188"},"style_context":{"kind":"string","value":"\n\n\n\n\n\n\n# Copyright 2023 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport re\n\nfrom ..models.auto import AutoProcessor\nfrom ..models.vision_encoder_decoder import VisionEncoderDecoderModel\nfrom ..utils import is_vision_available\nfrom .base import PipelineTool\n\n\nif is_vision_available():\n\tfrom PIL import Image\n\nclass \t\t\t\t\t\tlowerCAmelCase ( __a\t):\n\n\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t_A :\tList[str] =\t\t\t\t\t'''naver-clova-ix/donut-base-finetuned-docvqa'''\n\t\t\t\t\t_A :\tAny =\t\t\t\t\t(\n\t\t\t\t\t '''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''\n\t\t\t\t\t '''should be the document containing the information, as well as a `question` that is the question about the '''\n\t\t\t\t\t '''document. It returns a text that contains the answer to the question.'''\n\t\t\t\t\t)\n\t\t\t\t\t_A :\tTuple =\t\t\t\t\t'''document_qa'''\n\t\t\t\t\t_A :\tDict =\t\t\t\t\tAutoProcessor\n\t\t\t\t\t_A :\tTuple =\t\t\t\t\tVisionEncoderDecoderModel\n\n\t\t\t\t\t_A :\tOptional[int] =\t\t\t\t\t['''image''', '''text''']\n\t\t\t\t\t_A :\tOptional[int] =\t\t\t\t\t['''text''']\n\n\n\n\t\t\t\t\tdef __init__( self :\t\tAny\t\t\t\t\t\t, *__a :\t\tList[str]\t\t\t\t\t\t, **__a :\t\tAny\t\t\t\t\t\t\t)\t-> Optional[Any]:\n\n\n\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\t\t\t\t\t\t\t\t\t\t\t\tif not is_vision_available():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"Pillow must be installed to use the DocumentQuestionAnsweringTool.\"\"\"\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\tsuper().__init__(*__a\t\t\t\t\t\t, **__a\t\t\t\t\t\t\t)\n\n\n\n\t\t\t\t\tdef \t\t\t\t\tlowerCAmelCase ( self :\t\tList[Any]\t\t\t\t\t\t, __a :\t\t\"Image\"\t\t\t\t\t\t, __a :\t\tstr\t\t\t\t\t\t\t)\t-> List[str]:\n\n\n\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\t\t\t\t\t\tint\t\t\t= \"\"\"{user_input}\"\"\"\n\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\t\t\t\t\t\tstr\t\t\t= task_prompt.replace(\"\"\"{user_input}\"\"\"\t\t\t\t\t\t, __a\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\t\t\t\t\t\tUnion[str, Any]\t\t\t= self.pre_processor.tokenizer(\n\t\t\t\t\t\t\t\t\t\t\t\t __a\t\t\t\t\t\t, add_special_tokens=__a\t\t\t\t\t\t, return_tensors=\"\"\"pt\"\"\"\t\t\t\t\t\t\t).input_ids\n\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\t\t\t\t\t\tint\t\t\t= self.pre_processor(__a\t\t\t\t\t\t, return_tensors=\"\"\"pt\"\"\"\t\t\t\t\t\t\t).pixel_values\n\n\t\t\t\t\t\t\t\t\t\t\t\treturn {\"decoder_input_ids\": decoder_input_ids, \"pixel_values\": pixel_values}\n\n\n\n\t\t\t\t\tdef \t\t\t\t\tlowerCAmelCase ( self :\t\tOptional[int]\t\t\t\t\t\t, __a :\t\tint\t\t\t\t\t\t\t)\t-> int:\n\n\n\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\t\t\t\t\t\t\t\t\t\t\t\treturn self.model.generate(\n\t\t\t\t\t\t\t\t\t\t\t\t inputs[\"\"\"pixel_values\"\"\"].to(self.device\t\t\t\t\t\t\t)\t\t\t\t\t\t, decoder_input_ids=inputs[\"\"\"decoder_input_ids\"\"\"].to(self.device\t\t\t\t\t\t\t)\t\t\t\t\t\t, max_length=self.model.decoder.config.max_position_embeddings\t\t\t\t\t\t, early_stopping=__a\t\t\t\t\t\t, pad_token_id=self.pre_processor.tokenizer.pad_token_id\t\t\t\t\t\t, eos_token_id=self.pre_processor.tokenizer.eos_token_id\t\t\t\t\t\t, use_cache=__a\t\t\t\t\t\t, num_beams=1\t\t\t\t\t\t, bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]]\t\t\t\t\t\t, return_dict_in_generate=__a\t\t\t\t\t\t, ).sequences\n\n\n\n\t\t\t\t\tdef \t\t\t\t\tlowerCAmelCase ( self :\t\tUnion[str, Any]\t\t\t\t\t\t, __a :\t\tUnion[str, Any]\t\t\t\t\t\t\t)\t-> Union[str, Any]:\n\n\n\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\t\t\t\t\t\tTuple\t\t\t= self.pre_processor.batch_decode(__a\t\t\t\t\t\t\t)[0]\n\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\t\t\t\t\t\tint\t\t\t= sequence.replace(self.pre_processor.tokenizer.eos_token\t\t\t\t\t\t, \"\"\"\"\"\"\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\t\t\t\t\t\tUnion[str, Any]\t\t\t= sequence.replace(self.pre_processor.tokenizer.pad_token\t\t\t\t\t\t, \"\"\"\"\"\"\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\t\t\t\t\t\tOptional[Any]\t\t\t= re.sub(r\"\"\"<.*?>\"\"\"\t\t\t\t\t\t, \"\"\"\"\"\"\t\t\t\t\t\t, __a\t\t\t\t\t\t, count=1\t\t\t\t\t\t\t).strip() # remove first task start token\n\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\t\t\t\t\t\tDict\t\t\t= self.pre_processor.tokenajson(__a\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\treturn sequence[\"answer\"]"},"style_context_codestyle":{"kind":"number","value":233,"string":"233"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":849,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\rfrom collections.abc import Iterator, MutableMapping\rfrom dataclasses import dataclass\rfrom typing import Generic, TypeVar\r\rlowerCamelCase__ = TypeVar('''KEY''')\rlowerCamelCase__ = TypeVar('''VAL''')\r@dataclass(frozen=__lowercase\t\t\t,\t\t\t\t\t\tslots=__lowercase\t\t\t\t)\rclass \t\t\t\t\t__magic_name__\t(Generic[KEY, VAL]\t\t\t\t):\r\t\t\t\t\tlowerCamelCase__ \t\t= 42\r\t\t\t\t\tlowerCamelCase__ \t\t= 42\r\r\r\r\r\rclass \t\t\t\t\t__magic_name__\t(_Item\t\t\t\t):\r\r\r\r\r\t\t\t\t\tdef __init__( self\t\t)\t->\t\t\tNone:\r\t\t\t\t\t\t\tsuper().__init__(_a\t\t\t\t\t, _a\t\t)\r\r\r\r\r\t\t\t\t\tdef __bool__( self\t\t)\t->\t\t\tbool:\r\t\t\t\t\t\t\treturn False\r\r\r\r\r\rlowerCamelCase__ = _DeletedItem()\rclass \t\t\t\t\t__magic_name__\t(MutableMapping[KEY, VAL]\t\t\t\t):\r\r\r\r\r\t\t\t\t\tdef __init__( self\t\t\t\t\t, _a = 8\t\t\t\t\t, _a = 0.7_5\t\t)\t->\t\t\tNone:\r\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t=\t\t\t\t\t\t\tinitial_block_size\r\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t=\t\t\t\t\t\t\t[None] * initial_block_size\r\t\t\t\t\t\t\tassert 0.0 < capacity_factor < 1.0\r\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t=\t\t\t\t\t\t\tcapacity_factor\r\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t=\t\t\t\t\t\t\t0\r\r\r\r\r\t\t\t\t\tdef \t\t\t__a ( self\t\t\t\t\t, _a\t\t)\t->\t\t\tint:\r\t\t\t\t\t\t\treturn hash(_a\t\t) % len(self._buckets\t\t)\r\r\r\r\r\t\t\t\t\tdef \t\t\t__a ( self\t\t\t\t\t, _a\t\t)\t->\t\t\tint:\r\t\t\t\t\t\t\treturn (ind + 1) % len(self._buckets\t\t)\r\r\r\r\r\t\t\t\t\tdef \t\t\t__a ( self\t\t\t\t\t, _a\t\t\t\t\t, _a\t\t\t\t\t, _a\t\t)\t->\t\t\tbool:\r\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t=\t\t\t\t\t\t\tself._buckets[ind]\r\t\t\t\t\t\t\tif not stored:\r\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t=\t\t\t\t\t\t\t_Item(_a\t\t\t\t\t, _a\t\t)\r\t\t\t\t\t\t\t\t\tself._len += 1\r\t\t\t\t\t\t\t\t\treturn True\r\t\t\t\t\t\t\telif stored.key == key:\r\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t=\t\t\t\t\t\t\t_Item(_a\t\t\t\t\t, _a\t\t)\r\t\t\t\t\t\t\t\t\treturn True\r\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\treturn False\r\r\r\r\r\t\t\t\t\tdef \t\t\t__a ( self\t\t)\t->\t\t\tbool:\r\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t=\t\t\t\t\t\t\tlen(self._buckets\t\t) * self._capacity_factor\r\t\t\t\t\t\t\treturn len(self\t\t) >= int(_a\t\t)\r\r\r\r\r\t\t\t\t\tdef \t\t\t__a ( self\t\t)\t->\t\t\tbool:\r\t\t\t\t\t\t\tif len(self._buckets\t\t) <= self._initial_block_size:\r\t\t\t\t\t\t\t\t\treturn False\r\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t=\t\t\t\t\t\t\tlen(self._buckets\t\t) * self._capacity_factor / 2\r\t\t\t\t\t\t\treturn len(self\t\t) < limit\r\r\r\r\r\t\t\t\t\tdef \t\t\t__a ( self\t\t\t\t\t, _a\t\t)\t->\t\t\tNone:\r\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t=\t\t\t\t\t\t\tself._buckets\r\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t=\t\t\t\t\t\t\t[None] * new_size\r\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t=\t\t\t\t\t\t\t0\r\t\t\t\t\t\t\tfor item in old_buckets:\r\t\t\t\t\t\t\t\t\tif item:\r\t\t\t\t\t\t\t\t\t\t\tself._add_item(item.key\t\t\t\t\t, item.val\t\t)\r\r\r\r\r\t\t\t\t\tdef \t\t\t__a ( self\t\t)\t->\t\t\tNone:\r\t\t\t\t\t\t\tself._resize(len(self._buckets\t\t) * 2\t\t)\r\r\r\r\r\t\t\t\t\tdef \t\t\t__a ( self\t\t)\t->\t\t\tNone:\r\t\t\t\t\t\t\tself._resize(len(self._buckets\t\t) // 2\t\t)\r\r\r\r\r\t\t\t\t\tdef \t\t\t__a ( self\t\t\t\t\t, _a\t\t)\t->\t\t\tIterator[int]:\r\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t=\t\t\t\t\t\t\tself._get_bucket_index(_a\t\t)\r\t\t\t\t\t\t\tfor _ in range(len(self._buckets\t\t)\t\t):\r\t\t\t\t\t\t\t\t\tyield ind\r\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t=\t\t\t\t\t\t\tself._get_next_ind(_a\t\t)\r\r\r\r\r\t\t\t\t\tdef \t\t\t__a ( self\t\t\t\t\t, _a\t\t\t\t\t, _a\t\t)\t->\t\t\tNone:\r\t\t\t\t\t\t\tfor ind in self._iterate_buckets(_a\t\t):\r\t\t\t\t\t\t\t\t\tif self._try_set(_a\t\t\t\t\t, _a\t\t\t\t\t, _a\t\t):\r\t\t\t\t\t\t\t\t\t\t\tbreak\r\r\r\r\r\t\t\t\t\tdef __setitem__( self\t\t\t\t\t, _a\t\t\t\t\t, _a\t\t)\t->\t\t\tNone:\r\t\t\t\t\t\t\tif self._is_full():\r\t\t\t\t\t\t\t\t\tself._size_up()\r\r\t\t\t\t\t\t\tself._add_item(_a\t\t\t\t\t, _a\t\t)\r\r\r\r\r\t\t\t\t\tdef __delitem__( self\t\t\t\t\t, _a\t\t)\t->\t\t\tNone:\r\t\t\t\t\t\t\tfor ind in self._iterate_buckets(_a\t\t):\r\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t=\t\t\t\t\t\t\tself._buckets[ind]\r\t\t\t\t\t\t\t\t\tif item is None:\r\t\t\t\t\t\t\t\t\t\t\traise KeyError(_a\t\t)\r\t\t\t\t\t\t\t\t\tif item is _deleted:\r\t\t\t\t\t\t\t\t\t\t\tcontinue\r\t\t\t\t\t\t\t\t\tif item.key == key:\r\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t=\t\t\t\t\t\t\t_deleted\r\t\t\t\t\t\t\t\t\t\t\tself._len -= 1\r\t\t\t\t\t\t\t\t\t\t\tbreak\r\t\t\t\t\t\t\tif self._is_sparse():\r\t\t\t\t\t\t\t\t\tself._size_down()\r\r\r\r\r\t\t\t\t\tdef __getitem__( self\t\t\t\t\t, _a\t\t)\t->\t\t\tVAL:\r\t\t\t\t\t\t\tfor ind in self._iterate_buckets(_a\t\t):\r\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t=\t\t\t\t\t\t\tself._buckets[ind]\r\t\t\t\t\t\t\t\t\tif item is None:\r\t\t\t\t\t\t\t\t\t\t\tbreak\r\t\t\t\t\t\t\t\t\tif item is _deleted:\r\t\t\t\t\t\t\t\t\t\t\tcontinue\r\t\t\t\t\t\t\t\t\tif item.key == key:\r\t\t\t\t\t\t\t\t\t\t\treturn item.val\r\t\t\t\t\t\t\traise KeyError(_a\t\t)\r\r\r\r\r\t\t\t\t\tdef __len__( self\t\t)\t->\t\t\tint:\r\t\t\t\t\t\t\treturn self._len\r\r\r\r\r\t\t\t\t\tdef __iter__( self\t\t)\t->\t\t\tIterator[KEY]:\r\t\t\t\t\t\t\tyield from (item.key for item in self._buckets if item)\r\r\r\r\r\t\t\t\t\tdef __repr__( self\t\t)\t->\t\t\tstr:\r\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t=\t\t\t\t\t\t\t\" ,\".join(\r\t\t\t\t\t\t\t f\"{item.key}: {item.val}\" for item in self._buckets if item\t\t)\r\t\t\t\t\t\t\treturn f\"HashMap({val_string})\"\r\r"},"code_codestyle":{"kind":"number","value":22,"string":"22"},"style_context":{"kind":"string","value":"\r\r\r\r\r\rimport math\rfrom collections.abc import Iterator\rfrom itertools import takewhile\r\rdef A(__a:\tint\t\t\t\t\t\t\t):\r\r\t\tif 1 < number < 4:\r\t\t\t\t# 2 and 3 are primes\r\t\t\t\treturn True\r\t\telif number < 2 or number % 2 == 0 or number % 3 == 0:\r\t\t\t\t# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes\r\t\t\t\treturn False\r\r\t\t# All primes number are in format of 6k +/- 1\r\t\tfor i in range(5\t\t, int(math.sqrt(__a\t\t\t\t\t\t\t) + 1\t\t\t\t\t\t\t)\t\t, 6\t\t\t\t\t\t\t):\r\t\t\t\tif number % i == 0 or number % (i + 2) == 0:\r\t\t\t\t\t\treturn False\r\t\treturn True\r\rdef A():\r\t\tlowerCAmelCase_\t\t\t\t=\t\t\t\t\t\t\t2\r\t\twhile True:\r\t\t\t\tif is_prime(__a\t\t\t\t\t\t\t):\r\t\t\t\t\t\tyield num\r\t\t\t\tnum += 1\r\rdef A(__a:\tint = 200_0000\t\t\t\t\t\t\t):\r\t\treturn sum(takewhile(lambda __a\t\t\t\t\t\t\t: x < n\t\t, prime_generator()\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\r\rif __name__ == \"__main__\":\r\t\tprint(F'''{solution() = }''')\r\r"},"style_context_codestyle":{"kind":"number","value":22,"string":"22"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":850,"cells":{"code":{"kind":"string","value":"\n\n\n\nimport copy\nfrom dataclasses import dataclass, field\nfrom typing import ClassVar, Dict\n\nfrom ..features import ClassLabel, Features, Value\nfrom .base import TaskTemplate\n\n\n\n\n\n\n@dataclass(frozen=lowerCamelCase__ )\nclass SCREAMING_SNAKE_CASE\t(\t\t\t\t\t\t\tlowerCamelCase__ ):\n\t\t\t\t\t\t\t# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization\n\t\t\t\t\t\t\t__lowerCamelCase\t\t:\t\tstr\t\t\t=field(default='text-classification'\t,\t\t\t\t\tmetadata={'include_in_asdict_even_if_is_default': True} )\n\t\t\t\t\t\t\t__lowerCamelCase\t\t:\t\tClassVar[Features]\t\t\t=Features({'text': Value('string' )} )\n\t\t\t\t\t\t\t__lowerCamelCase\t\t:\t\tClassVar[Features]\t\t\t=Features({'labels': ClassLabel} )\n\t\t\t\t\t\t\t__lowerCamelCase\t\t:\t\tstr\t\t\t=\"text\"\n\t\t\t\t\t\t\t__lowerCamelCase\t\t:\t\tstr\t\t\t=\"labels\"\n\n\n\n\n\n\t\t\t\t\t\t\tdef UpperCamelCase_ ( self\t\t\t\t\t\t:\t\t\t\t\t\tAny\t\t\t\t\t, __lowercase\t\t\t\t\t\t:\t\t\t\t\t\tstr\t\t):\n\n\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\n\t\t\t\t\t\t\t\t\t\t\tif self.label_column not in features:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(F\"Column {self.label_column} is not present in features.\"\t\t)\n\t\t\t\t\t\t\t\t\t\t\tif not isinstance(features[self.label_column]\t\t\t\t\t, __lowercase\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(F\"Column {self.label_column} is not a ClassLabel.\"\t\t)\n\t\t\t\t\t\t\t\t\t\t\t__a =\t\t\t\t\t\tcopy.deepcopy(self\t\t)\n\t\t\t\t\t\t\t\t\t\t\t__a =\t\t\t\t\t\tself.label_schema.copy()\n\t\t\t\t\t\t\t\t\t\t\t__a =\t\t\t\t\t\tfeatures[self.label_column]\n\t\t\t\t\t\t\t\t\t\t\t__a =\t\t\t\t\t\tlabel_schema\n\t\t\t\t\t\t\t\t\t\t\treturn task_template\n\n\n\n\n\n\n\n\t\t\t\t\t\t\t@property\n\t\t\t\t\t\t\tdef UpperCamelCase_ ( self\t\t\t\t\t\t:\t\t\t\t\t\tAny\t\t):\n\n\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\n\t\t\t\t\t\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\t\t\t\t\t self.text_column: \"text\",\n\t\t\t\t\t\t\t\t\t\t\t self.label_column: \"labels\",\n\t\t\t\t\t\t\t\t\t\t\t}\n"},"code_codestyle":{"kind":"number","value":302,"string":"302"},"style_context":{"kind":"string","value":"\n\n\n\nfrom typing import TYPE_CHECKING\n\nfrom ...utils import (\n OptionalDependencyNotAvailable,\n _LazyModule,\n is_flax_available,\n is_tf_available,\n is_tokenizers_available,\n is_torch_available,\n)\n\n\nlowerCamelCase__\t= {\n \"\"\"configuration_electra\"\"\": [\"\"\"ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP\"\"\", \"\"\"ElectraConfig\"\"\", \"\"\"ElectraOnnxConfig\"\"\"],\n \"\"\"tokenization_electra\"\"\": [\"\"\"ElectraTokenizer\"\"\"],\n}\n\ntry:\n\t\t\t\t\t\tif not is_tokenizers_available():\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n\t\t\t\t\t\tpass\nelse:\n\t\t\t\t\t\tlowerCamelCase__\t= [\"\"\"ElectraTokenizerFast\"\"\"]\n\ntry:\n\t\t\t\t\t\tif not is_torch_available():\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n\t\t\t\t\t\tpass\nelse:\n\t\t\t\t\t\tlowerCamelCase__\t= [\n\t\t\t\t\t\t \"\"\"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST\"\"\",\n\t\t\t\t\t\t \"\"\"ElectraForCausalLM\"\"\",\n\t\t\t\t\t\t \"\"\"ElectraForMaskedLM\"\"\",\n\t\t\t\t\t\t \"\"\"ElectraForMultipleChoice\"\"\",\n\t\t\t\t\t\t \"\"\"ElectraForPreTraining\"\"\",\n\t\t\t\t\t\t \"\"\"ElectraForQuestionAnswering\"\"\",\n\t\t\t\t\t\t \"\"\"ElectraForSequenceClassification\"\"\",\n\t\t\t\t\t\t \"\"\"ElectraForTokenClassification\"\"\",\n\t\t\t\t\t\t \"\"\"ElectraModel\"\"\",\n\t\t\t\t\t\t \"\"\"ElectraPreTrainedModel\"\"\",\n\t\t\t\t\t\t \"\"\"load_tf_weights_in_electra\"\"\",\n\t\t\t\t\t\t]\n\ntry:\n\t\t\t\t\t\tif not is_tf_available():\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n\t\t\t\t\t\tpass\nelse:\n\t\t\t\t\t\tlowerCamelCase__\t= [\n\t\t\t\t\t\t \"\"\"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST\"\"\",\n\t\t\t\t\t\t \"\"\"TFElectraForMaskedLM\"\"\",\n\t\t\t\t\t\t \"\"\"TFElectraForMultipleChoice\"\"\",\n\t\t\t\t\t\t \"\"\"TFElectraForPreTraining\"\"\",\n\t\t\t\t\t\t \"\"\"TFElectraForQuestionAnswering\"\"\",\n\t\t\t\t\t\t \"\"\"TFElectraForSequenceClassification\"\"\",\n\t\t\t\t\t\t \"\"\"TFElectraForTokenClassification\"\"\",\n\t\t\t\t\t\t \"\"\"TFElectraModel\"\"\",\n\t\t\t\t\t\t \"\"\"TFElectraPreTrainedModel\"\"\",\n\t\t\t\t\t\t]\n\ntry:\n\t\t\t\t\t\tif not is_flax_available():\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n\t\t\t\t\t\tpass\nelse:\n\t\t\t\t\t\tlowerCamelCase__\t= [\n\t\t\t\t\t\t \"\"\"FlaxElectraForCausalLM\"\"\",\n\t\t\t\t\t\t \"\"\"FlaxElectraForMaskedLM\"\"\",\n\t\t\t\t\t\t \"\"\"FlaxElectraForMultipleChoice\"\"\",\n\t\t\t\t\t\t \"\"\"FlaxElectraForPreTraining\"\"\",\n\t\t\t\t\t\t \"\"\"FlaxElectraForQuestionAnswering\"\"\",\n\t\t\t\t\t\t \"\"\"FlaxElectraForSequenceClassification\"\"\",\n\t\t\t\t\t\t \"\"\"FlaxElectraForTokenClassification\"\"\",\n\t\t\t\t\t\t \"\"\"FlaxElectraModel\"\"\",\n\t\t\t\t\t\t \"\"\"FlaxElectraPreTrainedModel\"\"\",\n\t\t\t\t\t\t]\n\n\nif TYPE_CHECKING:\n\t\t\t\t\t\tfrom .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig\n\t\t\t\t\t\tfrom .tokenization_electra import ElectraTokenizer\n\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\t\tif not is_tokenizers_available():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\n\t\t\t\t\t\texcept OptionalDependencyNotAvailable:\n\t\t\t\t\t\t\t\t\t\t\t\tpass\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\tfrom .tokenization_electra_fast import ElectraTokenizerFast\n\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\t\tif not is_torch_available():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\n\t\t\t\t\t\texcept OptionalDependencyNotAvailable:\n\t\t\t\t\t\t\t\t\t\t\t\tpass\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\tfrom .modeling_electra import (\n\t\t\t\t\t\t\t\t\t\t\t\t ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,\n\t\t\t\t\t\t\t\t\t\t\t\t ElectraForCausalLM,\n\t\t\t\t\t\t\t\t\t\t\t\t ElectraForMaskedLM,\n\t\t\t\t\t\t\t\t\t\t\t\t ElectraForMultipleChoice,\n\t\t\t\t\t\t\t\t\t\t\t\t ElectraForPreTraining,\n\t\t\t\t\t\t\t\t\t\t\t\t ElectraForQuestionAnswering,\n\t\t\t\t\t\t\t\t\t\t\t\t ElectraForSequenceClassification,\n\t\t\t\t\t\t\t\t\t\t\t\t ElectraForTokenClassification,\n\t\t\t\t\t\t\t\t\t\t\t\t ElectraModel,\n\t\t\t\t\t\t\t\t\t\t\t\t ElectraPreTrainedModel,\n\t\t\t\t\t\t\t\t\t\t\t\t load_tf_weights_in_electra,\n\t\t\t\t\t\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\t\tif not is_tf_available():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\n\t\t\t\t\t\texcept OptionalDependencyNotAvailable:\n\t\t\t\t\t\t\t\t\t\t\t\tpass\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\tfrom .modeling_tf_electra import (\n\t\t\t\t\t\t\t\t\t\t\t\t TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,\n\t\t\t\t\t\t\t\t\t\t\t\t TFElectraForMaskedLM,\n\t\t\t\t\t\t\t\t\t\t\t\t TFElectraForMultipleChoice,\n\t\t\t\t\t\t\t\t\t\t\t\t TFElectraForPreTraining,\n\t\t\t\t\t\t\t\t\t\t\t\t TFElectraForQuestionAnswering,\n\t\t\t\t\t\t\t\t\t\t\t\t TFElectraForSequenceClassification,\n\t\t\t\t\t\t\t\t\t\t\t\t TFElectraForTokenClassification,\n\t\t\t\t\t\t\t\t\t\t\t\t TFElectraModel,\n\t\t\t\t\t\t\t\t\t\t\t\t TFElectraPreTrainedModel,\n\t\t\t\t\t\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\t\tif not is_flax_available():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\n\t\t\t\t\t\texcept OptionalDependencyNotAvailable:\n\t\t\t\t\t\t\t\t\t\t\t\tpass\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\tfrom .modeling_flax_electra import (\n\t\t\t\t\t\t\t\t\t\t\t\t FlaxElectraForCausalLM,\n\t\t\t\t\t\t\t\t\t\t\t\t FlaxElectraForMaskedLM,\n\t\t\t\t\t\t\t\t\t\t\t\t FlaxElectraForMultipleChoice,\n\t\t\t\t\t\t\t\t\t\t\t\t FlaxElectraForPreTraining,\n\t\t\t\t\t\t\t\t\t\t\t\t FlaxElectraForQuestionAnswering,\n\t\t\t\t\t\t\t\t\t\t\t\t FlaxElectraForSequenceClassification,\n\t\t\t\t\t\t\t\t\t\t\t\t FlaxElectraForTokenClassification,\n\t\t\t\t\t\t\t\t\t\t\t\t FlaxElectraModel,\n\t\t\t\t\t\t\t\t\t\t\t\t FlaxElectraPreTrainedModel,\n\t\t\t\t\t\t\t\t\t\t\t\t)\n\nelse:\n\t\t\t\t\t\timport sys\n\n\t\t\t\t\t\tlowerCamelCase__\t= _LazyModule(__name__, globals()[\"\"\"__file__\"\"\"], _import_structure, module_spec=__spec__)\n"},"style_context_codestyle":{"kind":"number","value":302,"string":"302"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":851,"cells":{"code":{"kind":"string","value":"\rimport unittest\r\rfrom transformers import BertGenerationTokenizer\rfrom transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow\rfrom transformers.utils import cached_property\r\rfrom ...test_tokenization_common import TokenizerTesterMixin\r\r\rlowerCamelCase_\t\t\t\t\t\t =\t\t\t\t\t'''▁'''\r\rlowerCamelCase_\t\t\t\t\t\t =\t\t\t\t\tget_tests_dir('''fixtures/test_sentencepiece.model''')\r\r\r\r\r@require_sentencepiece\rclass __A( __lowerCamelCase ,\t\t\t\tunittest.TestCase\t\t\t):\r\r \"\"\"simple docstring\"\"\"\r SCREAMING_SNAKE_CASE__ = BertGenerationTokenizer\r SCREAMING_SNAKE_CASE__ = False\r SCREAMING_SNAKE_CASE__ = True\r\r\r\r\r\r\r\r def \tUpperCAmelCase_ (self\t\t\t\t\t\t):\r super().setUp()\r\r UpperCamelCase__\t\t\t\t= BertGenerationTokenizer(SCREAMING_SNAKE_CASE_\t, keep_accents=SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\r tokenizer.save_pretrained(self.tmpdirname\t\t\t\t\t\t)\r\r\r\r\r\r\r\r def \tUpperCAmelCase_ (self\t\t\t\t\t\t):\r UpperCamelCase__\t\t\t\t= \"\"\"\"\"\"\r UpperCamelCase__\t\t\t\t= 1\r\r self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\t, SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\r self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\t, SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\r\r\r\r\r\r\r\r def \tUpperCAmelCase_ (self\t\t\t\t\t\t):\r UpperCamelCase__\t\t\t\t= list(self.get_tokenizer().get_vocab().keys()\t\t\t\t\t\t)\r\r self.assertEqual(vocab_keys[0]\t, \"\"\"\"\"\"\t\t\t\t\t\t)\r self.assertEqual(vocab_keys[1]\t, \"\"\"\"\"\"\t\t\t\t\t\t)\r self.assertEqual(vocab_keys[-1]\t, \"\"\"\"\"\"\t\t\t\t\t\t)\r self.assertEqual(len(SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\t, 10_02\t\t\t\t\t\t)\r\r\r\r\r\r\r\r def \tUpperCAmelCase_ (self\t\t\t\t\t\t):\r self.assertEqual(self.get_tokenizer().vocab_size\t, 10_00\t\t\t\t\t\t)\r\r\r\r\r\r\r\r def \tUpperCAmelCase_ (self\t\t\t\t\t\t):\r UpperCamelCase__\t\t\t\t= BertGenerationTokenizer(SCREAMING_SNAKE_CASE_\t, keep_accents=SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\r\r UpperCamelCase__\t\t\t\t= tokenizer.tokenize(\"\"\"This is a test\"\"\"\t\t\t\t\t\t)\r self.assertListEqual(SCREAMING_SNAKE_CASE_\t, [\"\"\"▁This\"\"\", \"\"\"▁is\"\"\", \"\"\"▁a\"\"\", \"\"\"▁t\"\"\", \"\"\"est\"\"\"]\t\t\t\t\t\t)\r\r self.assertListEqual(\r tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\t, [2_85, 46, 10, 1_70, 3_82]\t, )\r\r UpperCamelCase__\t\t\t\t= tokenizer.tokenize(\"\"\"I was born in 92000, and this is falsé.\"\"\"\t\t\t\t\t\t)\r self.assertListEqual(\r SCREAMING_SNAKE_CASE_\t, [\r SPIECE_UNDERLINE + \"\"\"I\"\"\",\r SPIECE_UNDERLINE + \"\"\"was\"\"\",\r SPIECE_UNDERLINE + \"\"\"b\"\"\",\r \"\"\"or\"\"\",\r \"\"\"n\"\"\",\r SPIECE_UNDERLINE + \"\"\"in\"\"\",\r SPIECE_UNDERLINE + \"\"\"\"\"\",\r \"\"\"9\"\"\",\r \"\"\"2\"\"\",\r \"\"\"0\"\"\",\r \"\"\"0\"\"\",\r \"\"\"0\"\"\",\r \"\"\",\"\"\",\r SPIECE_UNDERLINE + \"\"\"and\"\"\",\r SPIECE_UNDERLINE + \"\"\"this\"\"\",\r SPIECE_UNDERLINE + \"\"\"is\"\"\",\r SPIECE_UNDERLINE + \"\"\"f\"\"\",\r \"\"\"al\"\"\",\r \"\"\"s\"\"\",\r \"\"\"é\"\"\",\r \"\"\".\"\"\",\r ]\t, )\r UpperCamelCase__\t\t\t\t= tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\r self.assertListEqual(\r SCREAMING_SNAKE_CASE_\t, [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4]\t, )\r\r UpperCamelCase__\t\t\t\t= tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\r self.assertListEqual(\r SCREAMING_SNAKE_CASE_\t, [\r SPIECE_UNDERLINE + \"\"\"I\"\"\",\r SPIECE_UNDERLINE + \"\"\"was\"\"\",\r SPIECE_UNDERLINE + \"\"\"b\"\"\",\r \"\"\"or\"\"\",\r \"\"\"n\"\"\",\r SPIECE_UNDERLINE + \"\"\"in\"\"\",\r SPIECE_UNDERLINE + \"\"\"\"\"\",\r \"\"\"\"\"\",\r \"\"\"2\"\"\",\r \"\"\"0\"\"\",\r \"\"\"0\"\"\",\r \"\"\"0\"\"\",\r \"\"\",\"\"\",\r SPIECE_UNDERLINE + \"\"\"and\"\"\",\r SPIECE_UNDERLINE + \"\"\"this\"\"\",\r SPIECE_UNDERLINE + \"\"\"is\"\"\",\r SPIECE_UNDERLINE + \"\"\"f\"\"\",\r \"\"\"al\"\"\",\r \"\"\"s\"\"\",\r \"\"\"\"\"\",\r \"\"\".\"\"\",\r ]\t, )\r\r\r\r\r\r\r\r @cached_property\r def \tUpperCAmelCase_ (self\t\t\t\t\t\t):\r return BertGenerationTokenizer.from_pretrained(\"\"\"google/bert_for_seq_generation_L-24_bbc_encoder\"\"\"\t\t\t\t\t\t)\r\r\r\r\r\r\r\r @slow\r def \tUpperCAmelCase_ (self\t\t\t\t\t\t):\r UpperCamelCase__\t\t\t\t= \"\"\"Hello World!\"\"\"\r UpperCamelCase__\t\t\t\t= [1_85_36, 22_60, 1_01]\r\r self.assertListEqual(SCREAMING_SNAKE_CASE_\t, self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\t\t\t\t\t\t)\r\r\r\r\r\r\r\r @slow\r def \tUpperCAmelCase_ (self\t\t\t\t\t\t):\r UpperCamelCase__\t\t\t\t= (\r \"\"\"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \\\" [ ] ! : - . Also we will\"\"\"\r \"\"\" add words that should not exsist and be tokenized to , such as saoneuhaoesuth\"\"\"\r )\r UpperCamelCase__\t\t\t\t= [\r 8_71,\r 4_19,\r 3_58,\r 9_46,\r 9_91,\r 25_21,\r 4_52,\r 3_58,\r 13_57,\r 3_87,\r 77_51,\r 35_36,\r 1_12,\r 9_85,\r 4_56,\r 1_26,\r 8_65,\r 9_38,\r 54_00,\r 57_34,\r 4_58,\r 13_68,\r 4_67,\r 7_86,\r 24_62,\r 52_46,\r 11_59,\r 6_33,\r 8_65,\r 45_19,\r 4_57,\r 5_82,\r 8_52,\r 25_57,\r 4_27,\r 9_16,\r 5_08,\r 4_05,\r 3_43_24,\r 4_97,\r 3_91,\r 4_08,\r 1_13_42,\r 12_44,\r 3_85,\r 1_00,\r 9_38,\r 9_85,\r 4_56,\r 5_74,\r 3_62,\r 1_25_97,\r 32_00,\r 31_29,\r 11_72,\r ]\r\r self.assertListEqual(SCREAMING_SNAKE_CASE_\t, self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\t\t\t\t\t\t)\r\r\r\r\r\r\r\r @require_torch\r @slow\r def \tUpperCAmelCase_ (self\t\t\t\t\t\t):\r import torch\r\r from transformers import BertGenerationConfig, BertGenerationEncoder\r\r # Build sequence\r UpperCamelCase__\t\t\t\t= list(self.big_tokenizer.get_vocab().keys()\t\t\t\t\t\t)[:10]\r UpperCamelCase__\t\t\t\t= \"\"\" \"\"\".join(SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\r UpperCamelCase__\t\t\t\t= self.big_tokenizer.encode_plus(SCREAMING_SNAKE_CASE_\t, return_tensors=\"\"\"pt\"\"\"\t, return_token_type_ids=SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\r UpperCamelCase__\t\t\t\t= self.big_tokenizer.batch_encode_plus(\r [sequence + \"\"\" \"\"\" + sequence]\t, return_tensors=\"\"\"pt\"\"\"\t, return_token_type_ids=SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\r\r UpperCamelCase__\t\t\t\t= BertGenerationConfig()\r UpperCamelCase__\t\t\t\t= BertGenerationEncoder(SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\r\r assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size\r\r with torch.no_grad():\r model(**SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\r model(**SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\r\r\r\r\r\r\r\r @slow\r def \tUpperCAmelCase_ (self\t\t\t\t\t\t):\r # fmt: off\r UpperCamelCase__\t\t\t\t= {\"\"\"input_ids\"\"\": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], \"\"\"attention_mask\"\"\": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501\r # fmt: on\r\r self.tokenizer_integration_test_util(\r expected_encoding=SCREAMING_SNAKE_CASE_\t, model_name=\"\"\"google/bert_for_seq_generation_L-24_bbc_encoder\"\"\"\t, revision=\"\"\"c817d1fd1be2ffa69431227a1fe320544943d4db\"\"\"\t, )\r\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":178,"string":"178"},"style_context":{"kind":"string","value":"\rfrom pathlib import Path\rfrom typing import List\r\rfrom transformers import is_torch_available, is_vision_available\rfrom transformers.testing_utils import get_tests_dir, is_tool_test\rfrom transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText\r\r\rif is_torch_available():\r import torch\r\rif is_vision_available():\r from PIL import Image\r\r\rlowerCamelCase_\t\t\t\t\t\t =\t\t\t\t\t['''text''', '''image''', '''audio''']\r\r\r\r\r\r\rdef \t\t\t\t\t\t__magic_name__\t\t\t( __a :\tList[str]\t\t\t\t\t\t):\r\r\r\r '''simple docstring'''\r\r\r\r\r UpperCamelCase__\t\t\t\t= []\r\r for input_type in input_types:\r if input_type == \"text\":\r inputs.append(\"\"\"Text input\"\"\"\t\t\t\t\t\t)\r elif input_type == \"image\":\r inputs.append(\r Image.open(Path(get_tests_dir(\"\"\"fixtures/tests_samples/COCO\"\"\"\t\t\t\t\t\t)\t\t\t\t\t\t) / \"\"\"000000039769.png\"\"\"\t\t\t\t\t\t).resize((512, 512)\t\t\t\t\t\t)\t\t\t\t\t\t)\r elif input_type == \"audio\":\r inputs.append(torch.ones(3_000\t\t\t\t\t\t)\t\t\t\t\t\t)\r elif isinstance(__a\t,\t\t__a\t\t\t\t\t\t):\r inputs.append(create_inputs(__a\t\t\t\t\t\t)\t\t\t\t\t\t)\r else:\r raise ValueError(f\"Invalid type requested: {input_type}\"\t\t\t\t\t\t)\r\r return inputs\r\r\r\r\r\r\rdef \t\t\t\t\t\t__magic_name__\t\t\t( __a :\tList\t\t\t\t\t\t):\r\r\r\r '''simple docstring'''\r\r\r\r\r UpperCamelCase__\t\t\t\t= []\r\r for output in outputs:\r if isinstance(__a\t,\t\t(str, AgentText)\t\t\t\t\t\t):\r output_types.append(\"\"\"text\"\"\"\t\t\t\t\t\t)\r elif isinstance(__a\t,\t\t(Image.Image, AgentImage)\t\t\t\t\t\t):\r output_types.append(\"\"\"image\"\"\"\t\t\t\t\t\t)\r elif isinstance(__a\t,\t\t(torch.Tensor, AgentAudio)\t\t\t\t\t\t):\r output_types.append(\"\"\"audio\"\"\"\t\t\t\t\t\t)\r else:\r raise ValueError(f\"Invalid output: {output}\"\t\t\t\t\t\t)\r\r return output_types\r\r\r\r\r@is_tool_test\rclass __A:\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r def \tUpperCAmelCase_ (self\t\t\t\t\t\t):\r self.assertTrue(hasattr(self.tool\t, \"\"\"inputs\"\"\"\t\t\t\t\t\t)\t\t\t\t\t\t)\r self.assertTrue(hasattr(self.tool\t, \"\"\"outputs\"\"\"\t\t\t\t\t\t)\t\t\t\t\t\t)\r\r UpperCamelCase__\t\t\t\t= self.tool.inputs\r for _input in inputs:\r if isinstance(_input\t, SCREAMING_SNAKE_CASE_\t\t\t\t\t\t):\r for __input in _input:\r self.assertTrue(__input in authorized_types\t\t\t\t\t\t)\r else:\r self.assertTrue(_input in authorized_types\t\t\t\t\t\t)\r\r UpperCamelCase__\t\t\t\t= self.tool.outputs\r for _output in outputs:\r self.assertTrue(_output in authorized_types\t\t\t\t\t\t)\r\r\r\r\r\r\r\r def \tUpperCAmelCase_ (self\t\t\t\t\t\t):\r UpperCamelCase__\t\t\t\t= create_inputs(self.tool.inputs\t\t\t\t\t\t)\r UpperCamelCase__\t\t\t\t= self.tool(*SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\r\r # There is a single output\r if len(self.tool.outputs\t\t\t\t\t\t) == 1:\r UpperCamelCase__\t\t\t\t= [outputs]\r\r self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\t, self.tool.outputs\t\t\t\t\t\t)\r\r\r\r\r\r\r\r def \tUpperCAmelCase_ (self\t\t\t\t\t\t):\r self.assertTrue(hasattr(self.tool\t, \"\"\"description\"\"\"\t\t\t\t\t\t)\t\t\t\t\t\t)\r self.assertTrue(hasattr(self.tool\t, \"\"\"default_checkpoint\"\"\"\t\t\t\t\t\t)\t\t\t\t\t\t)\r self.assertTrue(self.tool.description.startswith(\"\"\"This is a tool that\"\"\"\t\t\t\t\t\t)\t\t\t\t\t\t)\r\r\r\r\r\r\r\r def \tUpperCAmelCase_ (self\t\t\t\t\t\t):\r UpperCamelCase__\t\t\t\t= create_inputs(self.tool.inputs\t\t\t\t\t\t)\r UpperCamelCase__\t\t\t\t= self.tool(*SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\r\r if not isinstance(SCREAMING_SNAKE_CASE_\t, SCREAMING_SNAKE_CASE_\t\t\t\t\t\t):\r UpperCamelCase__\t\t\t\t= [outputs]\r\r self.assertEqual(len(SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\t, len(self.tool.outputs\t\t\t\t\t\t)\t\t\t\t\t\t)\r\r for output, output_type in zip(SCREAMING_SNAKE_CASE_\t, self.tool.outputs\t\t\t\t\t\t):\r UpperCamelCase__\t\t\t\t= AGENT_TYPE_MAPPING[output_type]\r self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_\t, SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\t\t\t\t\t\t)\r\r\r\r\r\r\r\r def \tUpperCAmelCase_ (self\t\t\t\t\t\t):\r UpperCamelCase__\t\t\t\t= create_inputs(self.tool.inputs\t\t\t\t\t\t)\r\r UpperCamelCase__\t\t\t\t= []\r\r for _input, input_type in zip(SCREAMING_SNAKE_CASE_\t, self.tool.inputs\t\t\t\t\t\t):\r if isinstance(SCREAMING_SNAKE_CASE_\t, SCREAMING_SNAKE_CASE_\t\t\t\t\t\t):\r _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input\t\t\t\t\t\t) for _input_type in input_type]\t\t\t\t\t\t)\r else:\r _inputs.append(AGENT_TYPE_MAPPING[input_type](_input\t\t\t\t\t\t)\t\t\t\t\t\t)\r\r # Should not raise an error\r UpperCamelCase__\t\t\t\t= self.tool(*SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\r\r if not isinstance(SCREAMING_SNAKE_CASE_\t, SCREAMING_SNAKE_CASE_\t\t\t\t\t\t):\r UpperCamelCase__\t\t\t\t= [outputs]\r\r self.assertEqual(len(SCREAMING_SNAKE_CASE_\t\t\t\t\t\t)\t, len(self.tool.outputs\t\t\t\t\t\t)\t\t\t\t\t\t)\r\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":178,"string":"178"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":852,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport unittest\r\n\r\nimport numpy as np\r\n\r\nfrom transformers.testing_utils import require_torch, require_vision\r\nfrom transformers.utils import is_torch_available, is_vision_available\r\n\r\nfrom ...test_image_processing_common import ImageProcessingSavingTestMixin\r\n\r\n\r\nif is_torch_available():\r\n import torch\r\n\r\nif is_vision_available():\r\n from PIL import Image\r\n\r\n from transformers import ChineseCLIPImageProcessor\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\t\t_A ( unittest.TestCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n def __init__(\t\t\t\t\tself\t\t\t\t: Optional[Any]\t\t\t\t, __UpperCAmelCase\t\t\t\t: Dict\t\t\t\t, __UpperCAmelCase\t\t\t\t: str=7\t\t\t\t, __UpperCAmelCase\t\t\t\t: str=3\t\t\t\t, __UpperCAmelCase\t\t\t\t: Tuple=18\t\t\t\t, __UpperCAmelCase\t\t\t\t: Dict=30\t\t\t\t, __UpperCAmelCase\t\t\t\t: Any=400\t\t\t\t, __UpperCAmelCase\t\t\t\t: List[Any]=True\t\t\t\t, __UpperCAmelCase\t\t\t\t: Dict=None\t\t\t\t, __UpperCAmelCase\t\t\t\t: Any=True\t\t\t\t, __UpperCAmelCase\t\t\t\t: Optional[Any]=None\t\t\t\t, __UpperCAmelCase\t\t\t\t: Tuple=True\t\t\t\t, __UpperCAmelCase\t\t\t\t: Tuple=[0.48_145_466, 0.4_578_275, 0.40_821_073]\t\t\t\t, __UpperCAmelCase\t\t\t\t: Optional[Any]=[0.26_862_954, 0.26_130_258, 0.27_577_711]\t\t\t\t, __UpperCAmelCase\t\t\t\t: Union[str, Any]=True\t\t\t\t, ):\r\n a\t\t\t\t\t\t:\t\tint \t\t\t\t\t\t\t=\t\t\tsize if size is not None else {\"height\": 224, \"width\": 224}\r\n a\t\t\t\t\t\t:\t\tList[str] \t\t\t\t\t\t\t=\t\t\tcrop_size if crop_size is not None else {\"height\": 18, \"width\": 18}\r\n a\t\t\t\t\t\t:\t\tList[Any] \t\t\t\t\t\t\t=\t\t\tparent\r\n a\t\t\t\t\t\t:\t\tAny \t\t\t\t\t\t\t=\t\t\tbatch_size\r\n a\t\t\t\t\t\t:\t\tstr \t\t\t\t\t\t\t=\t\t\tnum_channels\r\n a\t\t\t\t\t\t:\t\tOptional[int] \t\t\t\t\t\t\t=\t\t\timage_size\r\n a\t\t\t\t\t\t:\t\tTuple \t\t\t\t\t\t\t=\t\t\tmin_resolution\r\n a\t\t\t\t\t\t:\t\tstr \t\t\t\t\t\t\t=\t\t\tmax_resolution\r\n a\t\t\t\t\t\t:\t\tDict \t\t\t\t\t\t\t=\t\t\tdo_resize\r\n a\t\t\t\t\t\t:\t\tAny \t\t\t\t\t\t\t=\t\t\tsize\r\n a\t\t\t\t\t\t:\t\tDict \t\t\t\t\t\t\t=\t\t\tdo_center_crop\r\n a\t\t\t\t\t\t:\t\tList[str] \t\t\t\t\t\t\t=\t\t\tcrop_size\r\n a\t\t\t\t\t\t:\t\tstr \t\t\t\t\t\t\t=\t\t\tdo_normalize\r\n a\t\t\t\t\t\t:\t\tOptional[int] \t\t\t\t\t\t\t=\t\t\timage_mean\r\n a\t\t\t\t\t\t:\t\tTuple \t\t\t\t\t\t\t=\t\t\timage_std\r\n a\t\t\t\t\t\t:\t\tAny \t\t\t\t\t\t\t=\t\t\tdo_convert_rgb\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: Union[str, Any]):\r\n return {\r\n \"do_resize\": self.do_resize,\r\n \"size\": self.size,\r\n \"do_center_crop\": self.do_center_crop,\r\n \"crop_size\": self.crop_size,\r\n \"do_normalize\": self.do_normalize,\r\n \"image_mean\": self.image_mean,\r\n \"image_std\": self.image_std,\r\n \"do_convert_rgb\": self.do_convert_rgb,\r\n }\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: Dict\t\t\t\t, __UpperCAmelCase\t\t\t\t: Optional[Any]=False\t\t\t\t, __UpperCAmelCase\t\t\t\t: List[Any]=False\t\t\t\t, __UpperCAmelCase\t\t\t\t: Any=False):\r\n assert not (numpify and torchify), \"You cannot specify both numpy and PyTorch tensors at the same time\"\r\n\r\n if equal_resolution:\r\n a\t\t\t\t\t\t:\t\tstr \t\t\t\t\t\t\t=\t\t\t[]\r\n for i in range(self.batch_size):\r\n image_inputs.append(\r\n np.random.randint(\r\n 255\t\t\t\t, size=(self.num_channels, self.max_resolution, self.max_resolution)\t\t\t\t, dtype=np.uinta))\r\n else:\r\n a\t\t\t\t\t\t:\t\tTuple \t\t\t\t\t\t\t=\t\t\t[]\r\n for i in range(self.batch_size):\r\n a\t\t\t\t\t\t\t,\t\ta\t\t\t\t\t\t:\t\tList[str] \t\t\t\t\t\t\t=\t\t\tnp.random.choice(np.arange(self.min_resolution\t\t\t\t, self.max_resolution)\t\t\t\t, 2)\r\n image_inputs.append(np.random.randint(255\t\t\t\t, size=(self.num_channels, width, height)\t\t\t\t, dtype=np.uinta))\r\n\r\n if not numpify and not torchify:\r\n # PIL expects the channel dimension as last dimension\r\n a\t\t\t\t\t\t:\t\tList[Any] \t\t\t\t\t\t\t=\t\t\t[Image.fromarray(np.moveaxis(__UpperCAmelCase\t\t\t\t, 0\t\t\t\t, -1)) for x in image_inputs]\r\n\r\n if torchify:\r\n a\t\t\t\t\t\t:\t\tOptional[Any] \t\t\t\t\t\t\t=\t\t\t[torch.from_numpy(__UpperCAmelCase) for x in image_inputs]\r\n\r\n return image_inputs\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_torch\r\n@require_vision\r\nclass \t\t\t\t\t\t_A ( _a ,unittest.TestCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n UpperCAmelCase :\t\t\t\t\t\t\tUnion[str, Any]\t= ChineseCLIPImageProcessor if is_vision_available() else None\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: List[str]):\r\n a\t\t\t\t\t\t:\t\tDict \t\t\t\t\t\t\t=\t\t\tChineseCLIPImageProcessingTester(self\t\t\t\t, do_center_crop=__UpperCAmelCase)\r\n\r\n\r\n\r\n\r\n\r\n\r\n @property\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: Union[str, Any]):\r\n return self.image_processor_tester.prepare_image_processor_dict()\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: str):\r\n a\t\t\t\t\t\t:\t\tOptional[int] \t\t\t\t\t\t\t=\t\t\tself.image_processing_class(**self.image_processor_dict)\r\n self.assertTrue(hasattr(__UpperCAmelCase\t\t\t\t, \"do_resize\"))\r\n self.assertTrue(hasattr(__UpperCAmelCase\t\t\t\t, \"size\"))\r\n self.assertTrue(hasattr(__UpperCAmelCase\t\t\t\t, \"do_center_crop\"))\r\n self.assertTrue(hasattr(__UpperCAmelCase\t\t\t\t, \"center_crop\"))\r\n self.assertTrue(hasattr(__UpperCAmelCase\t\t\t\t, \"do_normalize\"))\r\n self.assertTrue(hasattr(__UpperCAmelCase\t\t\t\t, \"image_mean\"))\r\n self.assertTrue(hasattr(__UpperCAmelCase\t\t\t\t, \"image_std\"))\r\n self.assertTrue(hasattr(__UpperCAmelCase\t\t\t\t, \"do_convert_rgb\"))\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: Any):\r\n a\t\t\t\t\t\t:\t\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\tself.image_processing_class.from_dict(self.image_processor_dict)\r\n self.assertEqual(image_processor.size\t\t\t\t, {\"height\": 224, \"width\": 224})\r\n self.assertEqual(image_processor.crop_size\t\t\t\t, {\"height\": 18, \"width\": 18})\r\n\r\n a\t\t\t\t\t\t:\t\tint \t\t\t\t\t\t\t=\t\t\tself.image_processing_class.from_dict(self.image_processor_dict\t\t\t\t, size=42\t\t\t\t, crop_size=84)\r\n self.assertEqual(image_processor.size\t\t\t\t, {\"shortest_edge\": 42})\r\n self.assertEqual(image_processor.crop_size\t\t\t\t, {\"height\": 84, \"width\": 84})\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: str):\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: Tuple):\r\n # Initialize image_processing\r\n a\t\t\t\t\t\t:\t\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\tself.image_processing_class(**self.image_processor_dict)\r\n # create random PIL images\r\n a\t\t\t\t\t\t:\t\tAny \t\t\t\t\t\t\t=\t\t\tself.image_processor_tester.prepare_inputs(equal_resolution=__UpperCAmelCase)\r\n for image in image_inputs:\r\n self.assertIsInstance(__UpperCAmelCase\t\t\t\t, Image.Image)\r\n\r\n # Test not batched input\r\n a\t\t\t\t\t\t:\t\tstr \t\t\t\t\t\t\t=\t\t\timage_processing(image_inputs[0]\t\t\t\t, return_tensors=\"pt\").pixel_values\r\n self.assertEqual(\r\n encoded_images.shape\t\t\t\t, (\r\n 1,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.crop_size[\"height\"],\r\n self.image_processor_tester.crop_size[\"width\"],\r\n )\t\t\t\t, )\r\n\r\n # Test batched\r\n a\t\t\t\t\t\t:\t\tDict \t\t\t\t\t\t\t=\t\t\timage_processing(__UpperCAmelCase\t\t\t\t, return_tensors=\"pt\").pixel_values\r\n self.assertEqual(\r\n encoded_images.shape\t\t\t\t, (\r\n self.image_processor_tester.batch_size,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.crop_size[\"height\"],\r\n self.image_processor_tester.crop_size[\"width\"],\r\n )\t\t\t\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: List[Any]):\r\n # Initialize image_processing\r\n a\t\t\t\t\t\t:\t\tstr \t\t\t\t\t\t\t=\t\t\tself.image_processing_class(**self.image_processor_dict)\r\n # create random numpy tensors\r\n a\t\t\t\t\t\t:\t\tAny \t\t\t\t\t\t\t=\t\t\tself.image_processor_tester.prepare_inputs(equal_resolution=__UpperCAmelCase\t\t\t\t, numpify=__UpperCAmelCase)\r\n for image in image_inputs:\r\n self.assertIsInstance(__UpperCAmelCase\t\t\t\t, np.ndarray)\r\n\r\n # Test not batched input\r\n a\t\t\t\t\t\t:\t\tOptional[Any] \t\t\t\t\t\t\t=\t\t\timage_processing(image_inputs[0]\t\t\t\t, return_tensors=\"pt\").pixel_values\r\n self.assertEqual(\r\n encoded_images.shape\t\t\t\t, (\r\n 1,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.crop_size[\"height\"],\r\n self.image_processor_tester.crop_size[\"width\"],\r\n )\t\t\t\t, )\r\n\r\n # Test batched\r\n a\t\t\t\t\t\t:\t\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\timage_processing(__UpperCAmelCase\t\t\t\t, return_tensors=\"pt\").pixel_values\r\n self.assertEqual(\r\n encoded_images.shape\t\t\t\t, (\r\n self.image_processor_tester.batch_size,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.crop_size[\"height\"],\r\n self.image_processor_tester.crop_size[\"width\"],\r\n )\t\t\t\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: List[str]):\r\n # Initialize image_processing\r\n a\t\t\t\t\t\t:\t\tstr \t\t\t\t\t\t\t=\t\t\tself.image_processing_class(**self.image_processor_dict)\r\n # create random PyTorch tensors\r\n a\t\t\t\t\t\t:\t\tTuple \t\t\t\t\t\t\t=\t\t\tself.image_processor_tester.prepare_inputs(equal_resolution=__UpperCAmelCase\t\t\t\t, torchify=__UpperCAmelCase)\r\n for image in image_inputs:\r\n self.assertIsInstance(__UpperCAmelCase\t\t\t\t, torch.Tensor)\r\n\r\n # Test not batched input\r\n a\t\t\t\t\t\t:\t\tList[str] \t\t\t\t\t\t\t=\t\t\timage_processing(image_inputs[0]\t\t\t\t, return_tensors=\"pt\").pixel_values\r\n self.assertEqual(\r\n encoded_images.shape\t\t\t\t, (\r\n 1,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.crop_size[\"height\"],\r\n self.image_processor_tester.crop_size[\"width\"],\r\n )\t\t\t\t, )\r\n\r\n # Test batched\r\n a\t\t\t\t\t\t:\t\tstr \t\t\t\t\t\t\t=\t\t\timage_processing(__UpperCAmelCase\t\t\t\t, return_tensors=\"pt\").pixel_values\r\n self.assertEqual(\r\n encoded_images.shape\t\t\t\t, (\r\n self.image_processor_tester.batch_size,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.crop_size[\"height\"],\r\n self.image_processor_tester.crop_size[\"width\"],\r\n )\t\t\t\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_torch\r\n@require_vision\r\nclass \t\t\t\t\t\t_A ( _a ,unittest.TestCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n UpperCAmelCase :\t\t\t\t\t\t\tOptional[int]\t= ChineseCLIPImageProcessor if is_vision_available() else None\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: Union[str, Any]):\r\n a\t\t\t\t\t\t:\t\tstr \t\t\t\t\t\t\t=\t\t\tChineseCLIPImageProcessingTester(self\t\t\t\t, num_channels=4\t\t\t\t, do_center_crop=__UpperCAmelCase)\r\n a\t\t\t\t\t\t:\t\tDict \t\t\t\t\t\t\t=\t\t\t3\r\n\r\n\r\n\r\n\r\n\r\n\r\n @property\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: Optional[Any]):\r\n return self.image_processor_tester.prepare_image_processor_dict()\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: Optional[int]):\r\n a\t\t\t\t\t\t:\t\tTuple \t\t\t\t\t\t\t=\t\t\tself.image_processing_class(**self.image_processor_dict)\r\n self.assertTrue(hasattr(__UpperCAmelCase\t\t\t\t, \"do_resize\"))\r\n self.assertTrue(hasattr(__UpperCAmelCase\t\t\t\t, \"size\"))\r\n self.assertTrue(hasattr(__UpperCAmelCase\t\t\t\t, \"do_center_crop\"))\r\n self.assertTrue(hasattr(__UpperCAmelCase\t\t\t\t, \"center_crop\"))\r\n self.assertTrue(hasattr(__UpperCAmelCase\t\t\t\t, \"do_normalize\"))\r\n self.assertTrue(hasattr(__UpperCAmelCase\t\t\t\t, \"image_mean\"))\r\n self.assertTrue(hasattr(__UpperCAmelCase\t\t\t\t, \"image_std\"))\r\n self.assertTrue(hasattr(__UpperCAmelCase\t\t\t\t, \"do_convert_rgb\"))\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: Any):\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t__snake_case\t\t\t\t\t\t\t(\t\t\t\t\tself\t\t\t\t: Union[str, Any]):\r\n # Initialize image_processing\r\n a\t\t\t\t\t\t:\t\tList[Any] \t\t\t\t\t\t\t=\t\t\tself.image_processing_class(**self.image_processor_dict)\r\n # create random PIL images\r\n a\t\t\t\t\t\t:\t\tstr \t\t\t\t\t\t\t=\t\t\tself.image_processor_tester.prepare_inputs(equal_resolution=__UpperCAmelCase)\r\n for image in image_inputs:\r\n self.assertIsInstance(__UpperCAmelCase\t\t\t\t, Image.Image)\r\n\r\n # Test not batched input\r\n a\t\t\t\t\t\t:\t\tTuple \t\t\t\t\t\t\t=\t\t\timage_processing(image_inputs[0]\t\t\t\t, return_tensors=\"pt\").pixel_values\r\n self.assertEqual(\r\n encoded_images.shape\t\t\t\t, (\r\n 1,\r\n self.expected_encoded_image_num_channels,\r\n self.image_processor_tester.crop_size[\"height\"],\r\n self.image_processor_tester.crop_size[\"width\"],\r\n )\t\t\t\t, )\r\n\r\n # Test batched\r\n a\t\t\t\t\t\t:\t\tOptional[Any] \t\t\t\t\t\t\t=\t\t\timage_processing(__UpperCAmelCase\t\t\t\t, return_tensors=\"pt\").pixel_values\r\n self.assertEqual(\r\n encoded_images.shape\t\t\t\t, (\r\n self.image_processor_tester.batch_size,\r\n self.expected_encoded_image_num_channels,\r\n self.image_processor_tester.crop_size[\"height\"],\r\n self.image_processor_tester.crop_size[\"width\"],\r\n )\t\t\t\t, )\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":40,"string":"40"},"style_context":{"kind":"string","value":"\r\r\"\"\"simple docstring\"\"\"\r\r\rimport unittest\rfrom typing import Dict, List, Optional, Union\r\rimport numpy as np\r\rfrom transformers.testing_utils import require_torch, require_vision\rfrom transformers.utils import is_torch_available, is_vision_available\r\rfrom ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs\r\r\rif is_torch_available():\r import torch\r\rif is_vision_available():\r from PIL import Image\r\r from transformers import BridgeTowerImageProcessor\r\rclass __a\t\t\t\t\t\t\t(unittest.TestCase):\r\r\r\r\r\r\r '''simple docstring'''\r\r def __init__( self\t\t,\t\t\t_a\t\t,\t\t\t_a = True\t\t,\t\t\t_a = None\t\t,\t\t\t_a = 32\t\t,\t\t\t_a = True\t\t,\t\t\t_a = 1 / 255\t\t,\t\t\t_a = True\t\t,\t\t\t_a = True\t\t,\t\t\t_a = [0.48_145_466, 0.4_578_275, 0.40_821_073]\t\t,\t\t\t_a = [0.26_862_954, 0.26_130_258, 0.27_577_711]\t\t,\t\t\t_a = True\t\t,\t\t\t_a=7\t\t,\t\t\t_a=30\t\t,\t\t\t_a=400\t\t,\t\t\t_a=3\t\t,\t\t\t) ->\t\t\t\t\tDict:\r\r\r \"\"\"simple docstring\"\"\"\r\r\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t\t\t\t\t=\t\tparent\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tTuple\t\t\t\t\t\t\t\t\t\t\t=\t\tdo_resize\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t=\t\tsize if size is not None else {\"\"\"shortest_edge\"\"\": 288}\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t=\t\tsize_divisor\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tTuple\t\t\t\t\t\t\t\t\t\t\t=\t\tdo_rescale\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t=\t\trescale_factor\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t=\t\tdo_normalize\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t=\t\tdo_center_crop\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t=\t\timage_mean\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t=\t\timage_std\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t=\t\tdo_pad\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t=\t\tbatch_size\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tint\t\t\t\t\t\t\t\t\t\t\t=\t\tnum_channels\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tDict\t\t\t\t\t\t\t\t\t\t\t=\t\tmin_resolution\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tstr\t\t\t\t\t\t\t\t\t\t\t=\t\tmax_resolution\r\r def _a\t( self ) ->\t\t\t\t\tList[str]:\r\r\r \"\"\"simple docstring\"\"\"\r\r\r return {\r \"image_mean\": self.image_mean,\r \"image_std\": self.image_std,\r \"do_normalize\": self.do_normalize,\r \"do_resize\": self.do_resize,\r \"size\": self.size,\r \"size_divisor\": self.size_divisor,\r }\r\r\r\r\r\r def _a\t( self\t\t,\t\t\t_a\t\t,\t\t\t_a=False ) ->\t\t\t\t\tint:\r\r\r \"\"\"simple docstring\"\"\"\r\r\r if not batched:\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tList[Any]\t\t\t\t\t\t\t\t\t\t\t=\t\tself.size[\"\"\"shortest_edge\"\"\"]\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t=\t\timage_inputs[0]\r if isinstance(_a\t\t,\t\t\tImage.Image ):\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t,\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ :\t\t\t\t\tTuple\t\t\t\t\t\t\t\t\t\t\t=\t\timage.size\r else:\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t,\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ :\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t=\t\timage.shape[1], image.shape[2]\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tTuple\t\t\t\t\t\t\t\t\t\t\t=\t\tsize / min(_a\t\t,\t\t\t_a )\r if h < w:\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t,\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ :\t\t\t\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t=\t\tsize, scale * w\r else:\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t,\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ :\t\t\t\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t=\t\tscale * h, size\r\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t=\t\tint((1_333 / 800) * size )\r if max(_a\t\t,\t\t\t_a ) > max_size:\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t=\t\tmax_size / max(_a\t\t,\t\t\t_a )\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tAny\t\t\t\t\t\t\t\t\t\t\t=\t\tnewh * scale\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tAny\t\t\t\t\t\t\t\t\t\t\t=\t\tneww * scale\r\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t,\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ :\t\t\t\t\tList[Any]\t\t\t\t\t\t\t\t\t\t\t=\t\tint(newh + 0.5 ), int(neww + 0.5 )\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t,\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ :\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t\t\t\t\t=\t\t(\r newh // self.size_divisor * self.size_divisor,\r neww // self.size_divisor * self.size_divisor,\r )\r\r else:\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tDict\t\t\t\t\t\t\t\t\t\t\t=\t\t[]\r for image in image_inputs:\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t,\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ :\t\t\t\t\tList[Any]\t\t\t\t\t\t\t\t\t\t\t=\t\tself.get_expected_values([image] )\r expected_values.append((expected_height, expected_width) )\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tAny\t\t\t\t\t\t\t\t\t\t\t=\t\tmax(_a\t\t,\t\t\tkey=lambda _a : item[0] )[0]\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tAny\t\t\t\t\t\t\t\t\t\t\t=\t\tmax(_a\t\t,\t\t\tkey=lambda _a : item[1] )[1]\r\r return expected_height, expected_width\r\r\r\r\r\r@require_torch\r@require_vision\rclass __a\t\t\t\t\t\t\t(UpperCamelCase_ ,\t\tunittest.TestCase):\r\r\r\r\r\r\r '''simple docstring'''\r _SCREAMING_SNAKE_CASE :Optional[int]\t\t\t\t= BridgeTowerImageProcessor if is_vision_available() else None\r\r def _a\t( self ) ->\t\t\t\t\tstr:\r\r\r \"\"\"simple docstring\"\"\"\r\r\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tint\t\t\t\t\t\t\t\t\t\t\t=\t\tBridgeTowerImageProcessingTester(self )\r\r @property\r def _a\t( self ) ->\t\t\t\t\tOptional[int]:\r\r\r \"\"\"simple docstring\"\"\"\r\r\r return self.image_processor_tester.prepare_image_processor_dict()\r\r def _a\t( self ) ->\t\t\t\t\tOptional[int]:\r\r\r \"\"\"simple docstring\"\"\"\r\r\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tint\t\t\t\t\t\t\t\t\t\t\t=\t\tself.image_processing_class(**self.image_processor_dict )\r self.assertTrue(hasattr(_a\t\t,\t\t\t\"\"\"image_mean\"\"\" ) )\r self.assertTrue(hasattr(_a\t\t,\t\t\t\"\"\"image_std\"\"\" ) )\r self.assertTrue(hasattr(_a\t\t,\t\t\t\"\"\"do_normalize\"\"\" ) )\r self.assertTrue(hasattr(_a\t\t,\t\t\t\"\"\"do_resize\"\"\" ) )\r self.assertTrue(hasattr(_a\t\t,\t\t\t\"\"\"size\"\"\" ) )\r self.assertTrue(hasattr(_a\t\t,\t\t\t\"\"\"size_divisor\"\"\" ) )\r\r def _a\t( self ) ->\t\t\t\t\tList[str]:\r\r\r \"\"\"simple docstring\"\"\"\r\r\r pass\r\r def _a\t( self ) ->\t\t\t\t\tint:\r\r\r \"\"\"simple docstring\"\"\"\r\r\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t=\t\tself.image_processing_class(**self.image_processor_dict )\r # create random PIL images\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t\t\t\t\t=\t\tprepare_image_inputs(self.image_processor_tester\t\t,\t\t\tequal_resolution=_a )\r for image in image_inputs:\r self.assertIsInstance(_a\t\t,\t\t\tImage.Image )\r\r # Test not batched input\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t=\t\timage_processing(image_inputs[0]\t\t,\t\t\treturn_tensors=\"\"\"pt\"\"\" ).pixel_values\r\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t,\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ :\t\t\t\t\tDict\t\t\t\t\t\t\t\t\t\t\t=\t\tself.image_processor_tester.get_expected_values(_a )\r self.assertEqual(\r encoded_images.shape\t\t,\t\t\t(1, self.image_processor_tester.num_channels, expected_height, expected_width)\t\t,\t\t\t)\r\r # Test batched\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t=\t\timage_processing(_a\t\t,\t\t\treturn_tensors=\"\"\"pt\"\"\" ).pixel_values\r\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t,\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ :\t\t\t\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t=\t\tself.image_processor_tester.get_expected_values(_a\t\t,\t\t\tbatched=_a )\r self.assertEqual(\r encoded_images.shape\t\t,\t\t\t(\r self.image_processor_tester.batch_size,\r self.image_processor_tester.num_channels,\r expected_height,\r expected_width,\r )\t\t,\t\t\t)\r\r def _a\t( self ) ->\t\t\t\t\tList[str]:\r\r\r \"\"\"simple docstring\"\"\"\r\r\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t=\t\tself.image_processing_class(**self.image_processor_dict )\r # create random numpy tensors\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tAny\t\t\t\t\t\t\t\t\t\t\t=\t\tprepare_image_inputs(self.image_processor_tester\t\t,\t\t\tequal_resolution=_a\t\t,\t\t\tnumpify=_a )\r for image in image_inputs:\r self.assertIsInstance(_a\t\t,\t\t\tnp.ndarray )\r\r # Test not batched input\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tAny\t\t\t\t\t\t\t\t\t\t\t=\t\timage_processing(image_inputs[0]\t\t,\t\t\treturn_tensors=\"\"\"pt\"\"\" ).pixel_values\r\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t,\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ :\t\t\t\t\tTuple\t\t\t\t\t\t\t\t\t\t\t=\t\tself.image_processor_tester.get_expected_values(_a )\r self.assertEqual(\r encoded_images.shape\t\t,\t\t\t(1, self.image_processor_tester.num_channels, expected_height, expected_width)\t\t,\t\t\t)\r\r # Test batched\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tTuple\t\t\t\t\t\t\t\t\t\t\t=\t\timage_processing(_a\t\t,\t\t\treturn_tensors=\"\"\"pt\"\"\" ).pixel_values\r\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t,\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ :\t\t\t\t\tTuple\t\t\t\t\t\t\t\t\t\t\t=\t\tself.image_processor_tester.get_expected_values(_a\t\t,\t\t\tbatched=_a )\r self.assertEqual(\r encoded_images.shape\t\t,\t\t\t(\r self.image_processor_tester.batch_size,\r self.image_processor_tester.num_channels,\r expected_height,\r expected_width,\r )\t\t,\t\t\t)\r\r\r\r\r\r def _a\t( self ) ->\t\t\t\t\tDict:\r\r\r \"\"\"simple docstring\"\"\"\r\r\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tList[Any]\t\t\t\t\t\t\t\t\t\t\t=\t\tself.image_processing_class(**self.image_processor_dict )\r # create random PyTorch tensors\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tint\t\t\t\t\t\t\t\t\t\t\t=\t\tprepare_image_inputs(self.image_processor_tester\t\t,\t\t\tequal_resolution=_a\t\t,\t\t\ttorchify=_a )\r for image in image_inputs:\r self.assertIsInstance(_a\t\t,\t\t\ttorch.Tensor )\r\r # Test not batched input\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t=\t\timage_processing(image_inputs[0]\t\t,\t\t\treturn_tensors=\"\"\"pt\"\"\" ).pixel_values\r\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t,\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ :\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t=\t\tself.image_processor_tester.get_expected_values(_a )\r self.assertEqual(\r encoded_images.shape\t\t,\t\t\t(1, self.image_processor_tester.num_channels, expected_height, expected_width)\t\t,\t\t\t)\r\r # Test batched\r SCREAMING_SNAKE_CASE__ :\t\t\t\t\tDict\t\t\t\t\t\t\t\t\t\t\t=\t\timage_processing(_a\t\t,\t\t\treturn_tensors=\"\"\"pt\"\"\" ).pixel_values\r\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t,\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ :\t\t\t\t\tint\t\t\t\t\t\t\t\t\t\t\t=\t\tself.image_processor_tester.get_expected_values(_a\t\t,\t\t\tbatched=_a )\r self.assertEqual(\r encoded_images.shape\t\t,\t\t\t(\r self.image_processor_tester.batch_size,\r self.image_processor_tester.num_channels,\r expected_height,\r expected_width,\r )\t\t,\t\t\t)\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":132,"string":"132"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":853,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available\r\n\r\n\r\nlowerCamelCase_ =\t\t{\r\n \"configuration_clipseg\": [\r\n \"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP\",\r\n \"CLIPSegConfig\",\r\n \"CLIPSegTextConfig\",\r\n \"CLIPSegVisionConfig\",\r\n ],\r\n \"processing_clipseg\": [\"CLIPSegProcessor\"],\r\n}\r\n\r\ntry:\r\n if not is_torch_available():\r\n raise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n pass\r\nelse:\r\n lowerCamelCase_ =\t\t[\r\n \"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST\",\r\n \"CLIPSegModel\",\r\n \"CLIPSegPreTrainedModel\",\r\n \"CLIPSegTextModel\",\r\n \"CLIPSegVisionModel\",\r\n \"CLIPSegForImageSegmentation\",\r\n ]\r\n\r\nif TYPE_CHECKING:\r\n from .configuration_clipseg import (\r\n CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,\r\n CLIPSegConfig,\r\n CLIPSegTextConfig,\r\n CLIPSegVisionConfig,\r\n )\r\n from .processing_clipseg import CLIPSegProcessor\r\n\r\n try:\r\n if not is_torch_available():\r\n raise OptionalDependencyNotAvailable()\r\n except OptionalDependencyNotAvailable:\r\n pass\r\n else:\r\n from .modeling_clipseg import (\r\n CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n CLIPSegForImageSegmentation,\r\n CLIPSegModel,\r\n CLIPSegPreTrainedModel,\r\n CLIPSegTextModel,\r\n CLIPSegVisionModel,\r\n )\r\n\r\nelse:\r\n import sys\r\n\r\n lowerCamelCase_ =\t\t_LazyModule(__name__, globals()[\"__file__\"], _import_structure, module_spec=__spec__)"},"code_codestyle":{"kind":"number","value":239,"string":"239"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\ndef \t\t\t\t\t__lowerCamelCase (\t\t\t\t\t\t\ta_\t\t\t\t\t:\t\tUnion[str, Any]\t\t\t, a_\t\t\t\t\t:\t\tOptional[Any]\t\t\t\t\t\t\t) ->\t\t\t\tUnion[str, Any]:\r\n return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2\r\n\r\n\r\n\r\ndef \t\t\t\t\t__lowerCamelCase (\t\t\t\t\t\t\ta_\t\t\t\t\t:\t\tOptional[int]\t\t\t, a_\t\t\t\t\t:\t\tAny=0\t\t\t\t\t\t\t) ->\t\t\t\tOptional[Any]:\r\n return sorted(a_\t\t\t, key=lambda a_\t\t\t\t\t\t\t: x[column]\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\ndef \t\t\t\t\t__lowerCamelCase (\t\t\t\t\t\t\ta_\t\t\t\t\t:\t\tOptional[Any]\t\t\t, a_\t\t\t\t\t:\t\tOptional[int]\t\t\t, a_\t\t\t\t\t:\t\tstr=float('''inf'''\t\t\t\t\t\t\t)\t\t\t\t\t\t\t) ->\t\t\t\tstr:\r\n\r\n for i in range(points_counts - 1\t\t\t\t\t\t\t):\r\n for j in range(i + 1\t\t\t, a_\t\t\t\t\t\t\t):\r\n __SCREAMING_SNAKE_CASE :Dict\t\t = euclidean_distance_sqr(points[i]\t\t\t, points[j]\t\t\t\t\t\t\t)\r\n if current_dis < min_dis:\r\n __SCREAMING_SNAKE_CASE :Optional[Any]\t\t = current_dis\r\n return min_dis\r\n\r\n\r\n\r\ndef \t\t\t\t\t__lowerCamelCase (\t\t\t\t\t\t\ta_\t\t\t\t\t:\t\tList[Any]\t\t\t, a_\t\t\t\t\t:\t\tAny\t\t\t, a_\t\t\t\t\t:\t\tOptional[int]=float('''inf'''\t\t\t\t\t\t\t)\t\t\t\t\t\t\t) ->\t\t\t\tOptional[Any]:\r\n\r\n for i in range(min(6\t\t\t, points_counts - 1\t\t\t\t\t\t\t)\t\t\t, a_\t\t\t\t\t\t\t):\r\n for j in range(max(0\t\t\t, i - 6\t\t\t\t\t\t\t)\t\t\t, a_\t\t\t\t\t\t\t):\r\n __SCREAMING_SNAKE_CASE :Dict\t\t = euclidean_distance_sqr(points[i]\t\t\t, points[j]\t\t\t\t\t\t\t)\r\n if current_dis < min_dis:\r\n __SCREAMING_SNAKE_CASE :int\t\t = current_dis\r\n return min_dis\r\n\r\n\r\n\r\ndef \t\t\t\t\t__lowerCamelCase (\t\t\t\t\t\t\ta_\t\t\t\t\t:\t\tstr\t\t\t, a_\t\t\t\t\t:\t\tList[Any]\t\t\t, a_\t\t\t\t\t:\t\tint\t\t\t\t\t\t\t) ->\t\t\t\tOptional[int]:\r\n\r\n # base case\r\n if points_counts <= 3:\r\n return dis_between_closest_pair(a_\t\t\t, a_\t\t\t\t\t\t\t)\r\n\r\n # recursion\r\n __SCREAMING_SNAKE_CASE :int\t\t = points_counts // 2\r\n __SCREAMING_SNAKE_CASE :Dict\t\t = closest_pair_of_points_sqr(\r\n a_\t\t\t, points_sorted_on_y[:mid]\t\t\t, a_\t\t\t\t\t\t\t)\r\n __SCREAMING_SNAKE_CASE :Any\t\t = closest_pair_of_points_sqr(\r\n a_\t\t\t, points_sorted_on_y[mid:]\t\t\t, points_counts - mid\t\t\t\t\t\t\t)\r\n __SCREAMING_SNAKE_CASE :Union[str, Any]\t\t = min(a_\t\t\t, a_\t\t\t\t\t\t\t)\r\n __SCREAMING_SNAKE_CASE :str\t\t = []\r\n for point in points_sorted_on_x:\r\n if abs(point[0] - points_sorted_on_x[mid][0]\t\t\t\t\t\t\t) < closest_pair_dis:\r\n cross_strip.append(a_\t\t\t\t\t\t\t)\r\n\r\n __SCREAMING_SNAKE_CASE :Dict\t\t = dis_between_closest_in_strip(\r\n a_\t\t\t, len(a_\t\t\t\t\t\t\t)\t\t\t, a_\t\t\t\t\t\t\t)\r\n return min(a_\t\t\t, a_\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\ndef \t\t\t\t\t__lowerCamelCase (\t\t\t\t\t\t\ta_\t\t\t\t\t:\t\tint\t\t\t, a_\t\t\t\t\t:\t\tTuple\t\t\t\t\t\t\t) ->\t\t\t\tList[Any]:\r\n __SCREAMING_SNAKE_CASE :Union[str, Any]\t\t = column_based_sort(a_\t\t\t, column=0\t\t\t\t\t\t\t)\r\n __SCREAMING_SNAKE_CASE :int\t\t = column_based_sort(a_\t\t\t, column=1\t\t\t\t\t\t\t)\r\n return (\r\n closest_pair_of_points_sqr(\r\n a_\t\t\t, a_\t\t\t, a_\t\t\t\t\t\t\t)\r\n ) ** 0.5\r\n\r\n\r\nif __name__ == \"__main__\":\r\n lowerCamelCase_ =\t\t[(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]\r\n print(\"Distance:\", closest_pair_of_points(points, len(points)))"},"style_context_codestyle":{"kind":"number","value":239,"string":"239"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":854,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\ndef SCREAMING_SNAKE_CASE_\t\t\t\t\t\t(\t\t\t__A :\t\t\tint = 1_00_00_00\t\t\t\t)\t->\t\tint:\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n a_\t\t\t\t\t:\t\tUnion[str, Any]\t\t\t\t\t\t\t= limit + 1\r\n a_\t\t\t\t\t:\t\tOptional[Any]\t\t\t\t\t\t\t= [0] * limit\r\n for first_term in range(1 , __A\t\t\t\t):\r\n for n in range(__A , __A , __A\t\t\t\t):\r\n a_\t\t\t\t\t:\t\tOptional[Any]\t\t\t\t\t\t\t= first_term + n / first_term\r\n if common_difference % 4: # d must be divisble by 4\r\n continue\r\n else:\r\n common_difference /= 4\r\n if (\r\n first_term > common_difference\r\n and first_term < 4 * common_difference\r\n ): # since x,y,z are positive integers\r\n frequency[n] += 1 # so z>0 and a>d ,also 4d\t\tAny:\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n a_\t\t\t\t\t:\t\tOptional[Any]\t\t\t\t\t\t\t= HfArgumentParser(__A\t\t\t\t)\r\n a_\t\t\t\t\t:\t\tOptional[int]\t\t\t\t\t\t\t= parser.parse_args_into_dataclasses()[0]\r\n a_\t\t\t\t\t:\t\tList[Any]\t\t\t\t\t\t\t= TensorFlowBenchmark(args=__A\t\t\t\t)\r\n try:\r\n a_\t\t\t\t\t:\t\tList[str]\t\t\t\t\t\t\t= parser.parse_args_into_dataclasses()[0]\r\n except ValueError as e:\r\n a_\t\t\t\t\t:\t\tDict\t\t\t\t\t\t\t= 'Arg --no_{0} is no longer used, please use --no-{0} instead.'\r\n a_\t\t\t\t\t:\t\tDict\t\t\t\t\t\t\t= ' '.join(str(__A\t\t\t\t).split(' '\t\t\t\t)[:-1]\t\t\t\t)\r\n a_\t\t\t\t\t:\t\tint\t\t\t\t\t\t\t= ''\r\n a_\t\t\t\t\t:\t\tint\t\t\t\t\t\t\t= eval(str(__A\t\t\t\t).split(' '\t\t\t\t)[-1]\t\t\t\t)\r\n a_\t\t\t\t\t:\t\tAny\t\t\t\t\t\t\t= []\r\n for arg in depreciated_args:\r\n # arg[2:] removes '--'\r\n if arg[2:] in TensorFlowBenchmark.deprecated_args:\r\n # arg[5:] removes '--no_'\r\n full_error_msg += arg_error_msg.format(arg[5:]\t\t\t\t)\r\n else:\r\n wrong_args.append(__A\t\t\t\t)\r\n if len(__A\t\t\t\t) > 0:\r\n a_\t\t\t\t\t:\t\tstr\t\t\t\t\t\t\t= full_error_msg + begin_error_msg + str(__A\t\t\t\t)\r\n raise ValueError(__A\t\t\t\t)\r\n benchmark.run()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":32,"string":"32"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":855,"cells":{"code":{"kind":"string","value":"\r\nimport inspect\r\nimport unittest\r\n\r\nfrom transformers import MobileViTVaConfig\r\nfrom transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device\r\nfrom transformers.utils import cached_property, is_torch_available, is_vision_available\r\n\r\nfrom ...test_configuration_common import ConfigTester\r\nfrom ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor\r\nfrom ...test_pipeline_mixin import PipelineTesterMixin\r\n\r\n\r\nif is_torch_available():\r\n import torch\r\n\r\n from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel\r\n from transformers.models.mobilevitva.modeling_mobilevitva import (\r\n MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n make_divisible,\r\n )\r\n\r\n\r\nif is_vision_available():\r\n from PIL import Image\r\n\r\n from transformers import MobileViTImageProcessor\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\tlowerCamelCase\t\t( lowerCAmelCase_ ):\r\n\r\n\r\n\r\n\r\n\r\n def \t\tUpperCAmelCase(self\t\t\t\t: Optional[Any] )\t\t\t\t\t\t->\t\t\t\t\t\tint:\r\n snake_case\t\t\t\t\t\t\t\t= self.config_class(**self.inputs_dict )\r\n self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , \"width_multiplier\" ) )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\tlowerCamelCase\t\t:\r\n\r\n\r\n\r\n\r\n\r\n def __init__(self\t\t\t\t: str , _A\t\t\t\t: Tuple , _A\t\t\t\t: List[str]=1_3 , _A\t\t\t\t: Optional[Any]=6_4 , _A\t\t\t\t: Optional[Any]=2 , _A\t\t\t\t: Tuple=3 , _A\t\t\t\t: Dict=\"swish\" , _A\t\t\t\t: str=3 , _A\t\t\t\t: Dict=3_2 , _A\t\t\t\t: Tuple=0.1 , _A\t\t\t\t: Dict=0.02 , _A\t\t\t\t: Optional[int]=True , _A\t\t\t\t: Dict=True , _A\t\t\t\t: Union[str, Any]=1_0 , _A\t\t\t\t: Optional[int]=None , _A\t\t\t\t: Any=0.25 , _A\t\t\t\t: int=0.0 , _A\t\t\t\t: str=0.0 , )\t\t\t\t\t\t->\t\t\t\t\t\tstr:\r\n snake_case\t\t\t\t\t\t\t\t= parent\r\n snake_case\t\t\t\t\t\t\t\t= batch_size\r\n snake_case\t\t\t\t\t\t\t\t= image_size\r\n snake_case\t\t\t\t\t\t\t\t= patch_size\r\n snake_case\t\t\t\t\t\t\t\t= num_channels\r\n snake_case\t\t\t\t\t\t\t\t= make_divisible(5_1_2 * width_multiplier , divisor=8 )\r\n snake_case\t\t\t\t\t\t\t\t= hidden_act\r\n snake_case\t\t\t\t\t\t\t\t= conv_kernel_size\r\n snake_case\t\t\t\t\t\t\t\t= output_stride\r\n snake_case\t\t\t\t\t\t\t\t= classifier_dropout_prob\r\n snake_case\t\t\t\t\t\t\t\t= use_labels\r\n snake_case\t\t\t\t\t\t\t\t= is_training\r\n snake_case\t\t\t\t\t\t\t\t= num_labels\r\n snake_case\t\t\t\t\t\t\t\t= initializer_range\r\n snake_case\t\t\t\t\t\t\t\t= scope\r\n snake_case\t\t\t\t\t\t\t\t= width_multiplier\r\n snake_case\t\t\t\t\t\t\t\t= ffn_dropout\r\n snake_case\t\t\t\t\t\t\t\t= attn_dropout\r\n\r\n\r\n\r\n\r\n\r\n def \t\tUpperCAmelCase(self\t\t\t\t: List[Any] )\t\t\t\t\t\t->\t\t\t\t\t\tOptional[int]:\r\n snake_case\t\t\t\t\t\t\t\t= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )\r\n\r\n snake_case\t\t\t\t\t\t\t\t= None\r\n snake_case\t\t\t\t\t\t\t\t= None\r\n if self.use_labels:\r\n snake_case\t\t\t\t\t\t\t\t= ids_tensor([self.batch_size] , self.num_labels )\r\n snake_case\t\t\t\t\t\t\t\t= ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )\r\n\r\n snake_case\t\t\t\t\t\t\t\t= self.get_config()\r\n\r\n return config, pixel_values, labels, pixel_labels\r\n\r\n\r\n\r\n\r\n\r\n def \t\tUpperCAmelCase(self\t\t\t\t: int )\t\t\t\t\t\t->\t\t\t\t\t\tDict:\r\n return MobileViTVaConfig(\r\n image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )\r\n\r\n\r\n\r\n\r\n\r\n def \t\tUpperCAmelCase(self\t\t\t\t: Tuple , _A\t\t\t\t: Union[str, Any] , _A\t\t\t\t: Union[str, Any] , _A\t\t\t\t: Optional[int] , _A\t\t\t\t: str )\t\t\t\t\t\t->\t\t\t\t\t\tTuple:\r\n snake_case\t\t\t\t\t\t\t\t= MobileViTVaModel(config=__SCREAMING_SNAKE_CASE )\r\n model.to(__SCREAMING_SNAKE_CASE )\r\n model.eval()\r\n snake_case\t\t\t\t\t\t\t\t= model(__SCREAMING_SNAKE_CASE )\r\n self.parent.assertEqual(\r\n result.last_hidden_state.shape , (\r\n self.batch_size,\r\n self.last_hidden_size,\r\n self.image_size // self.output_stride,\r\n self.image_size // self.output_stride,\r\n ) , )\r\n\r\n\r\n\r\n\r\n\r\n def \t\tUpperCAmelCase(self\t\t\t\t: Tuple , _A\t\t\t\t: Optional[int] , _A\t\t\t\t: Any , _A\t\t\t\t: Any , _A\t\t\t\t: List[str] )\t\t\t\t\t\t->\t\t\t\t\t\tOptional[Any]:\r\n snake_case\t\t\t\t\t\t\t\t= self.num_labels\r\n snake_case\t\t\t\t\t\t\t\t= MobileViTVaForImageClassification(__SCREAMING_SNAKE_CASE )\r\n model.to(__SCREAMING_SNAKE_CASE )\r\n model.eval()\r\n snake_case\t\t\t\t\t\t\t\t= model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )\r\n self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )\r\n\r\n\r\n\r\n\r\n\r\n def \t\tUpperCAmelCase(self\t\t\t\t: int , _A\t\t\t\t: Any , _A\t\t\t\t: List[str] , _A\t\t\t\t: str , _A\t\t\t\t: Optional[int] )\t\t\t\t\t\t->\t\t\t\t\t\tstr:\r\n snake_case\t\t\t\t\t\t\t\t= self.num_labels\r\n snake_case\t\t\t\t\t\t\t\t= MobileViTVaForSemanticSegmentation(__SCREAMING_SNAKE_CASE )\r\n model.to(__SCREAMING_SNAKE_CASE )\r\n model.eval()\r\n snake_case\t\t\t\t\t\t\t\t= model(__SCREAMING_SNAKE_CASE )\r\n self.parent.assertEqual(\r\n result.logits.shape , (\r\n self.batch_size,\r\n self.num_labels,\r\n self.image_size // self.output_stride,\r\n self.image_size // self.output_stride,\r\n ) , )\r\n snake_case\t\t\t\t\t\t\t\t= model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )\r\n self.parent.assertEqual(\r\n result.logits.shape , (\r\n self.batch_size,\r\n self.num_labels,\r\n self.image_size // self.output_stride,\r\n self.image_size // self.output_stride,\r\n ) , )\r\n\r\n\r\n\r\n\r\n\r\n def \t\tUpperCAmelCase(self\t\t\t\t: Optional[int] )\t\t\t\t\t\t->\t\t\t\t\t\tint:\r\n snake_case\t\t\t\t\t\t\t\t= self.prepare_config_and_inputs()\r\n snake_case\t\t\t\t\t,\t\t\t\t\t\t\tsnake_case\t\t\t\t\t,\t\t\t\t\t\t\tsnake_case\t\t\t\t\t,\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t= config_and_inputs\r\n snake_case\t\t\t\t\t\t\t\t= {\"pixel_values\": pixel_values}\r\n return config, inputs_dict\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_torch\r\nclass \t\t\t\t\tlowerCamelCase\t\t( lowerCAmelCase_\t\t\t\t\t\t\t,\t\t\tlowerCAmelCase_\t\t\t\t\t\t\t,\t\t\tunittest.TestCase ):\r\n UpperCAmelCase__ :\t\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t\t\t=\t\t(\r\n (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)\r\n if is_torch_available()\r\n else ()\r\n )\r\n\r\n UpperCAmelCase__ :\t\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t\t\t=\t\t(\r\n {\r\n \"\"\"feature-extraction\"\"\": MobileViTVaModel,\r\n \"\"\"image-classification\"\"\": MobileViTVaForImageClassification,\r\n \"\"\"image-segmentation\"\"\": MobileViTVaForSemanticSegmentation,\r\n }\r\n if is_torch_available()\r\n else {}\r\n )\r\n\r\n UpperCAmelCase__ :\t\t\t\t\t\t\tstr\t\t\t\t\t\t\t\t\t=\t\tFalse\r\n UpperCAmelCase__ :\t\t\t\t\t\t\tTuple\t\t\t\t\t\t\t\t\t=\t\tFalse\r\n UpperCAmelCase__ :\t\t\t\t\t\t\tTuple\t\t\t\t\t\t\t\t\t=\t\tFalse\r\n UpperCAmelCase__ :\t\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t=\t\tFalse\r\n\r\n\r\n\r\n\r\n\r\n def \t\tUpperCAmelCase(self\t\t\t\t: Dict )\t\t\t\t\t\t->\t\t\t\t\t\tDict:\r\n snake_case\t\t\t\t\t\t\t\t= MobileViTVaModelTester(self )\r\n snake_case\t\t\t\t\t\t\t\t= MobileViTVaConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )\r\n\r\n\r\n\r\n\r\n\r\n def \t\tUpperCAmelCase(self\t\t\t\t: List[Any] )\t\t\t\t\t\t->\t\t\t\t\t\tDict:\r\n self.config_tester.run_common_tests()\r\n\r\n\r\n\r\n\r\n\r\n @unittest.skip(reason=\"MobileViTV2 does not use inputs_embeds\" )\r\n def \t\tUpperCAmelCase(self\t\t\t\t: Optional[int] )\t\t\t\t\t\t->\t\t\t\t\t\tOptional[int]:\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n @unittest.skip(reason=\"MobileViTV2 does not support input and output embeddings\" )\r\n def \t\tUpperCAmelCase(self\t\t\t\t: Tuple )\t\t\t\t\t\t->\t\t\t\t\t\tDict:\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n @unittest.skip(reason=\"MobileViTV2 does not output attentions\" )\r\n def \t\tUpperCAmelCase(self\t\t\t\t: str )\t\t\t\t\t\t->\t\t\t\t\t\tAny:\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n @require_torch_multi_gpu\r\n @unittest.skip(reason=\"Got `CUDA error: misaligned address` for tests after this one being run.\" )\r\n def \t\tUpperCAmelCase(self\t\t\t\t: Tuple )\t\t\t\t\t\t->\t\t\t\t\t\tAny:\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n @unittest.skip(\"Will be fixed soon by reducing the size of the model used for common tests.\" )\r\n def \t\tUpperCAmelCase(self\t\t\t\t: Tuple )\t\t\t\t\t\t->\t\t\t\t\t\tOptional[Any]:\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n def \t\tUpperCAmelCase(self\t\t\t\t: List[Any] )\t\t\t\t\t\t->\t\t\t\t\t\tList[str]:\r\n snake_case\t\t\t\t\t,\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t= self.model_tester.prepare_config_and_inputs_for_common()\r\n\r\n for model_class in self.all_model_classes:\r\n snake_case\t\t\t\t\t\t\t\t= model_class(__SCREAMING_SNAKE_CASE )\r\n snake_case\t\t\t\t\t\t\t\t= inspect.signature(model.forward )\r\n # signature.parameters is an OrderedDict => so arg_names order is deterministic\r\n snake_case\t\t\t\t\t\t\t\t= [*signature.parameters.keys()]\r\n\r\n snake_case\t\t\t\t\t\t\t\t= [\"pixel_values\"]\r\n self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )\r\n\r\n\r\n\r\n\r\n\r\n def \t\tUpperCAmelCase(self\t\t\t\t: List[str] )\t\t\t\t\t\t->\t\t\t\t\t\tAny:\r\n snake_case\t\t\t\t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )\r\n\r\n\r\n\r\n\r\n\r\n def \t\tUpperCAmelCase(self\t\t\t\t: int )\t\t\t\t\t\t->\t\t\t\t\t\tList[Any]:\r\n\r\n def check_hidden_states_output(_A\t\t\t\t: str , _A\t\t\t\t: Any , _A\t\t\t\t: Tuple ):\r\n snake_case\t\t\t\t\t\t\t\t= model_class(__SCREAMING_SNAKE_CASE )\r\n model.to(__SCREAMING_SNAKE_CASE )\r\n model.eval()\r\n\r\n with torch.no_grad():\r\n snake_case\t\t\t\t\t\t\t\t= model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )\r\n\r\n snake_case\t\t\t\t\t\t\t\t= outputs.hidden_states\r\n\r\n snake_case\t\t\t\t\t\t\t\t= 5\r\n self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )\r\n\r\n # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)\r\n # with the width and height being successively divided by 2.\r\n snake_case\t\t\t\t\t\t\t\t= 2\r\n for i in range(len(__SCREAMING_SNAKE_CASE ) ):\r\n self.assertListEqual(\r\n list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )\r\n divisor *= 2\r\n\r\n self.assertEqual(self.model_tester.output_stride , divisor // 2 )\r\n\r\n snake_case\t\t\t\t\t,\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t= self.model_tester.prepare_config_and_inputs_for_common()\r\n\r\n for model_class in self.all_model_classes:\r\n snake_case\t\t\t\t\t\t\t\t= True\r\n check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )\r\n\r\n # check that output_hidden_states also work using config\r\n del inputs_dict[\"output_hidden_states\"]\r\n snake_case\t\t\t\t\t\t\t\t= True\r\n\r\n check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )\r\n\r\n\r\n\r\n\r\n\r\n def \t\tUpperCAmelCase(self\t\t\t\t: int )\t\t\t\t\t\t->\t\t\t\t\t\tTuple:\r\n snake_case\t\t\t\t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )\r\n\r\n\r\n\r\n\r\n\r\n def \t\tUpperCAmelCase(self\t\t\t\t: List[str] )\t\t\t\t\t\t->\t\t\t\t\t\tTuple:\r\n snake_case\t\t\t\t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )\r\n\r\n\r\n\r\n\r\n\r\n @slow\r\n def \t\tUpperCAmelCase(self\t\t\t\t: Optional[Any] )\t\t\t\t\t\t->\t\t\t\t\t\tList[str]:\r\n\r\n for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\r\n snake_case\t\t\t\t\t\t\t\t= MobileViTVaModel.from_pretrained(__SCREAMING_SNAKE_CASE )\r\n self.assertIsNotNone(__SCREAMING_SNAKE_CASE )\r\n\r\n\r\n\r\ndef \t\t\t\t\t\tlowercase_ ( ):\r\n \"\"\"simple docstring\"\"\"\r\n\r\n snake_case\t\t\t\t\t\t\t\t= Image.open(\"./tests/fixtures/tests_samples/COCO/000000039769.png\"\t\t)\r\n return image\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_torch\r\n@require_vision\r\nclass \t\t\t\t\tlowerCamelCase\t\t( unittest.TestCase ):\r\n\r\n\r\n\r\n\r\n\r\n @cached_property\r\n def \t\tUpperCAmelCase(self\t\t\t\t: int )\t\t\t\t\t\t->\t\t\t\t\t\tDict:\r\n return (\r\n MobileViTImageProcessor.from_pretrained(\"apple/mobilevitv2-1.0-imagenet1k-256\" )\r\n if is_vision_available()\r\n else None\r\n )\r\n\r\n\r\n\r\n\r\n\r\n @slow\r\n def \t\tUpperCAmelCase(self\t\t\t\t: Optional[int] )\t\t\t\t\t\t->\t\t\t\t\t\tUnion[str, Any]:\r\n snake_case\t\t\t\t\t\t\t\t= MobileViTVaForImageClassification.from_pretrained(\"apple/mobilevitv2-1.0-imagenet1k-256\" ).to(\r\n __SCREAMING_SNAKE_CASE )\r\n\r\n snake_case\t\t\t\t\t\t\t\t= self.default_image_processor\r\n snake_case\t\t\t\t\t\t\t\t= prepare_img()\r\n snake_case\t\t\t\t\t\t\t\t= image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=\"pt\" ).to(__SCREAMING_SNAKE_CASE )\r\n\r\n # forward pass\r\n with torch.no_grad():\r\n snake_case\t\t\t\t\t\t\t\t= model(**__SCREAMING_SNAKE_CASE )\r\n\r\n # verify the logits\r\n snake_case\t\t\t\t\t\t\t\t= torch.Size((1, 1_0_0_0) )\r\n self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )\r\n\r\n snake_case\t\t\t\t\t\t\t\t= torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ).to(__SCREAMING_SNAKE_CASE )\r\n\r\n self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )\r\n\r\n\r\n\r\n\r\n\r\n @slow\r\n def \t\tUpperCAmelCase(self\t\t\t\t: Optional[Any] )\t\t\t\t\t\t->\t\t\t\t\t\tOptional[int]:\r\n snake_case\t\t\t\t\t\t\t\t= MobileViTVaForSemanticSegmentation.from_pretrained(\"shehan97/mobilevitv2-1.0-voc-deeplabv3\" )\r\n snake_case\t\t\t\t\t\t\t\t= model.to(__SCREAMING_SNAKE_CASE )\r\n\r\n snake_case\t\t\t\t\t\t\t\t= MobileViTImageProcessor.from_pretrained(\"shehan97/mobilevitv2-1.0-voc-deeplabv3\" )\r\n\r\n snake_case\t\t\t\t\t\t\t\t= prepare_img()\r\n snake_case\t\t\t\t\t\t\t\t= image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=\"pt\" ).to(__SCREAMING_SNAKE_CASE )\r\n\r\n # forward pass\r\n with torch.no_grad():\r\n snake_case\t\t\t\t\t\t\t\t= model(**__SCREAMING_SNAKE_CASE )\r\n snake_case\t\t\t\t\t\t\t\t= outputs.logits\r\n\r\n # verify the logits\r\n snake_case\t\t\t\t\t\t\t\t= torch.Size((1, 2_1, 3_2, 3_2) )\r\n self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )\r\n\r\n snake_case\t\t\t\t\t\t\t\t= torch.tensor(\r\n [\r\n [[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],\r\n [[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],\r\n [[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],\r\n ] , device=__SCREAMING_SNAKE_CASE , )\r\n\r\n self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )\r\n\r\n\r\n\r\n\r\n\r\n @slow\r\n def \t\tUpperCAmelCase(self\t\t\t\t: str )\t\t\t\t\t\t->\t\t\t\t\t\tDict:\r\n snake_case\t\t\t\t\t\t\t\t= MobileViTVaForSemanticSegmentation.from_pretrained(\"shehan97/mobilevitv2-1.0-voc-deeplabv3\" )\r\n snake_case\t\t\t\t\t\t\t\t= model.to(__SCREAMING_SNAKE_CASE )\r\n\r\n snake_case\t\t\t\t\t\t\t\t= MobileViTImageProcessor.from_pretrained(\"shehan97/mobilevitv2-1.0-voc-deeplabv3\" )\r\n\r\n snake_case\t\t\t\t\t\t\t\t= prepare_img()\r\n snake_case\t\t\t\t\t\t\t\t= image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=\"pt\" ).to(__SCREAMING_SNAKE_CASE )\r\n\r\n # forward pass\r\n with torch.no_grad():\r\n snake_case\t\t\t\t\t\t\t\t= model(**__SCREAMING_SNAKE_CASE )\r\n\r\n snake_case\t\t\t\t\t\t\t\t= outputs.logits.detach().cpu()\r\n\r\n snake_case\t\t\t\t\t\t\t\t= image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE , target_sizes=[(5_0, 6_0)] )\r\n snake_case\t\t\t\t\t\t\t\t= torch.Size((5_0, 6_0) )\r\n self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )\r\n\r\n snake_case\t\t\t\t\t\t\t\t= image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE )\r\n snake_case\t\t\t\t\t\t\t\t= torch.Size((3_2, 3_2) )\r\n self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )\r\n\r\n"},"code_codestyle":{"kind":"number","value":352,"string":"352"},"style_context":{"kind":"string","value":"\r\nimport warnings\r\nfrom typing import List, Optional, Union\r\n\r\nfrom ...processing_utils import ProcessorMixin\r\nfrom ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy\r\nfrom ...utils import TensorType\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\tlowerCamelCase\t\t( A_ ):\r\n UpperCAmelCase__ :\t\t\t\t\t\t\tDict\t\t\t\t\t\t\t\t\t=\t\t[\"image_processor\", \"tokenizer\"]\r\n UpperCAmelCase__ :\t\t\t\t\t\t\tDict\t\t\t\t\t\t\t\t\t=\t\t\"LayoutLMv2ImageProcessor\"\r\n UpperCAmelCase__ :\t\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t\t\t=\t\t(\"LayoutXLMTokenizer\", \"LayoutXLMTokenizerFast\")\r\n\r\n\r\n\r\n\r\n\r\n def __init__(self\t\t\t\t: str , _A\t\t\t\t: Any=None , _A\t\t\t\t: Tuple=None , **_A\t\t\t\t: Optional[Any] )\t\t\t\t\t\t->\t\t\t\t\t\tOptional[int]:\r\n if \"feature_extractor\" in kwargs:\r\n warnings.warn(\r\n \"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`\"\r\n \" instead.\" , _A , )\r\n snake_case\t\t\t\t\t\t\t\t= kwargs.pop(\"feature_extractor\" )\r\n\r\n snake_case\t\t\t\t\t\t\t\t= image_processor if image_processor is not None else feature_extractor\r\n if image_processor is None:\r\n raise ValueError(\"You need to specify an `image_processor`.\" )\r\n if tokenizer is None:\r\n raise ValueError(\"You need to specify a `tokenizer`.\" )\r\n\r\n super().__init__(_A , _A )\r\n\r\n\r\n\r\n\r\n\r\n def __call__(self\t\t\t\t: int , _A\t\t\t\t: List[str] , _A\t\t\t\t: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _A\t\t\t\t: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _A\t\t\t\t: Union[List[List[int]], List[List[List[int]]]] = None , _A\t\t\t\t: Optional[Union[List[int], List[List[int]]]] = None , _A\t\t\t\t: bool = True , _A\t\t\t\t: Union[bool, str, PaddingStrategy] = False , _A\t\t\t\t: Union[bool, str, TruncationStrategy] = None , _A\t\t\t\t: Optional[int] = None , _A\t\t\t\t: int = 0 , _A\t\t\t\t: Optional[int] = None , _A\t\t\t\t: Optional[bool] = None , _A\t\t\t\t: Optional[bool] = None , _A\t\t\t\t: bool = False , _A\t\t\t\t: bool = False , _A\t\t\t\t: bool = False , _A\t\t\t\t: bool = False , _A\t\t\t\t: bool = True , _A\t\t\t\t: Optional[Union[str, TensorType]] = None , **_A\t\t\t\t: Dict , )\t\t\t\t\t\t->\t\t\t\t\t\tBatchEncoding:\r\n # verify input\r\n if self.image_processor.apply_ocr and (boxes is not None):\r\n raise ValueError(\r\n \"You cannot provide bounding boxes \"\r\n \"if you initialized the image processor with apply_ocr set to True.\" )\r\n\r\n if self.image_processor.apply_ocr and (word_labels is not None):\r\n raise ValueError(\r\n \"You cannot provide word labels if you initialized the image processor with apply_ocr set to True.\" )\r\n\r\n if return_overflowing_tokens is True and return_offsets_mapping is False:\r\n raise ValueError(\"You cannot return overflowing tokens without returning the offsets mapping.\" )\r\n\r\n # first, apply the image processor\r\n snake_case\t\t\t\t\t\t\t\t= self.image_processor(images=_A , return_tensors=_A )\r\n\r\n # second, apply the tokenizer\r\n if text is not None and self.image_processor.apply_ocr and text_pair is None:\r\n if isinstance(_A , _A ):\r\n snake_case\t\t\t\t\t\t\t\t= [text] # add batch dimension (as the image processor always adds a batch dimension)\r\n snake_case\t\t\t\t\t\t\t\t= features[\"words\"]\r\n\r\n snake_case\t\t\t\t\t\t\t\t= self.tokenizer(\r\n text=text if text is not None else features[\"words\"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features[\"boxes\"] , word_labels=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_token_type_ids=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )\r\n\r\n # add pixel values\r\n snake_case\t\t\t\t\t\t\t\t= features.pop(\"pixel_values\" )\r\n if return_overflowing_tokens is True:\r\n snake_case\t\t\t\t\t\t\t\t= self.get_overflowing_images(_A , encoded_inputs[\"overflow_to_sample_mapping\"] )\r\n snake_case\t\t\t\t\t\t\t\t= images\r\n\r\n return encoded_inputs\r\n\r\n\r\n\r\n\r\n\r\n def \t\tUpperCAmelCase(self\t\t\t\t: Dict , _A\t\t\t\t: Dict , _A\t\t\t\t: List[str] )\t\t\t\t\t\t->\t\t\t\t\t\tOptional[int]:\r\n # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image\r\n snake_case\t\t\t\t\t\t\t\t= []\r\n for sample_idx in overflow_to_sample_mapping:\r\n images_with_overflow.append(images[sample_idx] )\r\n\r\n if len(_A ) != len(_A ):\r\n raise ValueError(\r\n \"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got\"\r\n f' {len(_A )} and {len(_A )}' )\r\n\r\n return images_with_overflow\r\n\r\n\r\n\r\n\r\n\r\n def \t\tUpperCAmelCase(self\t\t\t\t: Tuple , *_A\t\t\t\t: int , **_A\t\t\t\t: Dict )\t\t\t\t\t\t->\t\t\t\t\t\tstr:\r\n return self.tokenizer.batch_decode(*_A , **_A )\r\n\r\n\r\n\r\n\r\n\r\n def \t\tUpperCAmelCase(self\t\t\t\t: str , *_A\t\t\t\t: List[Any] , **_A\t\t\t\t: List[Any] )\t\t\t\t\t\t->\t\t\t\t\t\tOptional[Any]:\r\n return self.tokenizer.decode(*_A , **_A )\r\n\r\n\r\n\r\n\r\n\r\n @property\r\n def \t\tUpperCAmelCase(self\t\t\t\t: Tuple )\t\t\t\t\t\t->\t\t\t\t\t\tOptional[int]:\r\n return [\"input_ids\", \"bbox\", \"attention_mask\", \"image\"]\r\n\r\n\r\n\r\n\r\n\r\n @property\r\n def \t\tUpperCAmelCase(self\t\t\t\t: List[Any] )\t\t\t\t\t\t->\t\t\t\t\t\tint:\r\n warnings.warn(\r\n \"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.\" , _A , )\r\n return self.image_processor_class\r\n\r\n\r\n\r\n\r\n\r\n @property\r\n def \t\tUpperCAmelCase(self\t\t\t\t: Dict )\t\t\t\t\t\t->\t\t\t\t\t\tUnion[str, Any]:\r\n warnings.warn(\r\n \"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.\" , _A , )\r\n return self.image_processor\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":137,"string":"137"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":856,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\nimport logging\r\nimport os\r\nfrom dataclasses import dataclass\r\nfrom typing import List, Optional, Union\r\n\r\nimport tqdm\r\nfrom filelock import FileLock\r\n\r\nfrom transformers import (\r\n BartTokenizer,\r\n BartTokenizerFast,\r\n DataProcessor,\r\n PreTrainedTokenizer,\r\n RobertaTokenizer,\r\n RobertaTokenizerFast,\r\n XLMRobertaTokenizer,\r\n is_tf_available,\r\n is_torch_available,\r\n)\r\n\r\n\r\nsnake_case_\t\t\t\t=\t\t\tlogging.getLogger(__name__)\r\n\r\n@dataclass(frozen=_UpperCAmelCase )\r\nclass \t\tSCREAMING_SNAKE_CASE__\t\t\t\t:\r\n A_\t\t\t\t\t\t:\t\t\t\tstr\r\n A_\t\t\t\t\t\t:\t\t\t\tstr\r\n A_\t\t\t\t\t\t:\t\t\t\tOptional[str]\t\t\t\t\t\t\t= None\r\n A_\t\t\t\t\t\t:\t\t\t\tOptional[str]\t\t\t\t\t\t\t= None\r\n A_\t\t\t\t\t\t:\t\t\t\tOptional[str]\t\t\t\t\t\t\t= None\r\n\r\n\r\n\r\n\r\n\r\n\r\n@dataclass(frozen=_UpperCAmelCase )\r\nclass \t\tSCREAMING_SNAKE_CASE__\t\t\t\t:\r\n A_\t\t\t\t\t\t:\t\t\t\tList[int]\r\n A_\t\t\t\t\t\t:\t\t\t\tOptional[List[int]]\t\t\t\t\t\t\t= None\r\n A_\t\t\t\t\t\t:\t\t\t\tOptional[List[int]]\t\t\t\t\t\t\t= None\r\n A_\t\t\t\t\t\t:\t\t\t\tOptional[Union[int, float]]\t\t\t\t\t\t\t= None\r\n A_\t\t\t\t\t\t:\t\t\t\tOptional[int]\t\t\t\t\t\t\t= None\r\n\r\n\r\nif is_torch_available():\r\n import torch\r\n from torch.utils.data import Dataset\r\n\r\n class \t\tSCREAMING_SNAKE_CASE__\t\t\t\t( _UpperCAmelCase ):\r\n A_\t\t\t\t\t\t:\t\t\t\tList[InputFeatures]\r\n\r\n\r\n def __init__(self : int\t\t\t\t,\t\t\ta__ : str\t\t\t\t,\t\t\ta__ : PreTrainedTokenizer\t\t\t\t,\t\t\ta__ : str\t\t\t\t,\t\t\ta__ : Optional[int] = None\t\t\t\t,\t\t\ta__ : List[Any]=False\t\t\t\t,\t\t\ta__ : bool = False\t\t\t\t,\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n __snake_case\t\t\t\t\t\t\t = hans_processors[task]()\r\n\r\n __snake_case\t\t\t\t\t\t\t = os.path.join(\r\n a__\t\t\t\t,\t\t\t'''cached_{}_{}_{}_{}'''.format(\r\n '''dev''' if evaluate else '''train'''\t\t\t\t,\t\t\ttokenizer.__class__.__name__\t\t\t\t,\t\t\tstr(a__ )\t\t\t\t,\t\t\ta__\t\t\t\t,\t\t\t)\t\t\t\t,\t\t\t)\r\n __snake_case\t\t\t\t\t\t\t = processor.get_labels()\r\n if tokenizer.__class__ in (\r\n RobertaTokenizer,\r\n RobertaTokenizerFast,\r\n XLMRobertaTokenizer,\r\n BartTokenizer,\r\n BartTokenizerFast,\r\n ):\r\n # HACK(label indices are swapped in RoBERTa pretrained model)\r\n __snake_case ,\t__snake_case\t\t\t\t\t\t\t = label_list[2], label_list[1]\r\n __snake_case\t\t\t\t\t\t\t = label_list\r\n\r\n # Make sure only the first process in distributed training processes the dataset,\r\n # and the others will use the cache.\r\n __snake_case\t\t\t\t\t\t\t = cached_features_file + '''.lock'''\r\n with FileLock(a__ ):\r\n if os.path.exists(a__ ) and not overwrite_cache:\r\n logger.info(f\"\"\"Loading features from cached file {cached_features_file}\"\"\" )\r\n __snake_case\t\t\t\t\t\t\t = torch.load(a__ )\r\n else:\r\n logger.info(f\"\"\"Creating features from dataset file at {data_dir}\"\"\" )\r\n\r\n __snake_case\t\t\t\t\t\t\t = (\r\n processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )\r\n )\r\n\r\n logger.info('''Training examples: %s'''\t\t\t\t,\t\t\tlen(a__ ) )\r\n __snake_case\t\t\t\t\t\t\t = hans_convert_examples_to_features(a__\t\t\t\t,\t\t\ta__\t\t\t\t,\t\t\ta__\t\t\t\t,\t\t\ta__ )\r\n logger.info('''Saving features into cached file %s'''\t\t\t\t,\t\t\ta__ )\r\n torch.save(self.features\t\t\t\t,\t\t\ta__ )\r\n\r\n\r\n def __len__(self : int ):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n return len(self.features )\r\n\r\n\r\n def __getitem__(self : Dict\t\t\t\t,\t\t\ta__ : List[Any] ):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n return self.features[i]\r\n\r\n\r\n def a (self : List[Any] ):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n return self.label_list\r\n\r\n\r\nif is_tf_available():\r\n import tensorflow as tf\r\n\r\n class \t\tSCREAMING_SNAKE_CASE__\t\t\t\t:\r\n A_\t\t\t\t\t\t:\t\t\t\tList[InputFeatures]\r\n\r\n\r\n def __init__(self : Tuple\t\t\t\t,\t\t\ta__ : str\t\t\t\t,\t\t\ta__ : PreTrainedTokenizer\t\t\t\t,\t\t\ta__ : str\t\t\t\t,\t\t\ta__ : Optional[int] = 128\t\t\t\t,\t\t\ta__ : Any=False\t\t\t\t,\t\t\ta__ : bool = False\t\t\t\t,\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n __snake_case\t\t\t\t\t\t\t = hans_processors[task]()\r\n __snake_case\t\t\t\t\t\t\t = processor.get_labels()\r\n if tokenizer.__class__ in (\r\n RobertaTokenizer,\r\n RobertaTokenizerFast,\r\n XLMRobertaTokenizer,\r\n BartTokenizer,\r\n BartTokenizerFast,\r\n ):\r\n # HACK(label indices are swapped in RoBERTa pretrained model)\r\n __snake_case ,\t__snake_case\t\t\t\t\t\t\t = label_list[2], label_list[1]\r\n __snake_case\t\t\t\t\t\t\t = label_list\r\n\r\n __snake_case\t\t\t\t\t\t\t = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )\r\n __snake_case\t\t\t\t\t\t\t = hans_convert_examples_to_features(a__\t\t\t\t,\t\t\ta__\t\t\t\t,\t\t\ta__\t\t\t\t,\t\t\ta__ )\r\n\r\n def gen():\r\n for ex_index, ex in tqdm.tqdm(enumerate(self.features )\t\t\t\t,\t\t\tdesc='''convert examples to features''' ):\r\n if ex_index % 1_0000 == 0:\r\n logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) )\r\n\r\n yield (\r\n {\r\n \"example_id\": 0,\r\n \"input_ids\": ex.input_ids,\r\n \"attention_mask\": ex.attention_mask,\r\n \"token_type_ids\": ex.token_type_ids,\r\n },\r\n ex.label,\r\n )\r\n\r\n __snake_case\t\t\t\t\t\t\t = tf.data.Dataset.from_generator(\r\n a__\t\t\t\t,\t\t\t(\r\n {\r\n '''example_id''': tf.intaa,\r\n '''input_ids''': tf.intaa,\r\n '''attention_mask''': tf.intaa,\r\n '''token_type_ids''': tf.intaa,\r\n },\r\n tf.intaa,\r\n )\t\t\t\t,\t\t\t(\r\n {\r\n '''example_id''': tf.TensorShape([] ),\r\n '''input_ids''': tf.TensorShape([None, None] ),\r\n '''attention_mask''': tf.TensorShape([None, None] ),\r\n '''token_type_ids''': tf.TensorShape([None, None] ),\r\n },\r\n tf.TensorShape([] ),\r\n )\t\t\t\t,\t\t\t)\r\n\r\n\r\n def a (self : Union[str, Any] ):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n return self.dataset\r\n\r\n\r\n def __len__(self : Dict ):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n return len(self.features )\r\n\r\n\r\n def __getitem__(self : Any\t\t\t\t,\t\t\ta__ : Dict ):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n return self.features[i]\r\n\r\n\r\n def a (self : str ):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n return self.label_list\r\n\r\nclass \t\tSCREAMING_SNAKE_CASE__\t\t\t\t( _UpperCAmelCase ):\r\n\r\n\r\n def a (self : Dict\t\t\t\t,\t\t\ta__ : Dict ):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n return self._create_examples(self._read_tsv(os.path.join(a__\t\t\t\t,\t\t\t'''heuristics_train_set.txt''' ) )\t\t\t\t,\t\t\t'''train''' )\r\n\r\n\r\n def a (self : Optional[int]\t\t\t\t,\t\t\ta__ : Tuple ):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n return self._create_examples(self._read_tsv(os.path.join(a__\t\t\t\t,\t\t\t'''heuristics_evaluation_set.txt''' ) )\t\t\t\t,\t\t\t'''dev''' )\r\n\r\n\r\n def a (self : int ):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n return [\"contradiction\", \"entailment\", \"neutral\"]\r\n\r\n\r\n def a (self : Any\t\t\t\t,\t\t\ta__ : Optional[int]\t\t\t\t,\t\t\ta__ : List[Any] ):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n __snake_case\t\t\t\t\t\t\t = []\r\n for i, line in enumerate(a__ ):\r\n if i == 0:\r\n continue\r\n __snake_case\t\t\t\t\t\t\t = '''%s-%s''' % (set_type, line[0])\r\n __snake_case\t\t\t\t\t\t\t = line[5]\r\n __snake_case\t\t\t\t\t\t\t = line[6]\r\n __snake_case\t\t\t\t\t\t\t = line[7][2:] if line[7].startswith('''ex''' ) else line[7]\r\n __snake_case\t\t\t\t\t\t\t = line[0]\r\n examples.append(InputExample(guid=a__\t\t\t\t,\t\t\ttext_a=a__\t\t\t\t,\t\t\ttext_b=a__\t\t\t\t,\t\t\tlabel=a__\t\t\t\t,\t\t\tpairID=a__ ) )\r\n return examples\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef lowerCamelCase__ (\t\t\t\t\t\t\tsnake_case_\t: List[InputExample] , snake_case_\t: List[str] , snake_case_\t: int , snake_case_\t: PreTrainedTokenizer , ) ->\t\t\t\tList[str]:\r\n __snake_case\t\t\t\t\t\t\t = {label: i for i, label in enumerate(snake_case_ )}\r\n\r\n __snake_case\t\t\t\t\t\t\t = []\r\n for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ):\r\n if ex_index % 1_0000 == 0:\r\n logger.info('''Writing example %d''' % (ex_index) )\r\n\r\n __snake_case\t\t\t\t\t\t\t = tokenizer(\r\n example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )\r\n\r\n __snake_case\t\t\t\t\t\t\t = label_map[example.label] if example.label in label_map else 0\r\n\r\n __snake_case\t\t\t\t\t\t\t = int(example.pairID )\r\n\r\n features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )\r\n\r\n for i, example in enumerate(examples[:5] ):\r\n logger.info('''*** Example ***''' )\r\n logger.info(f\"\"\"guid: {example}\"\"\" )\r\n logger.info(f\"\"\"features: {features[i]}\"\"\" )\r\n\r\n return features\r\n\r\n\r\nsnake_case_\t\t\t\t=\t\t\t{\r\n 'hans': 3,\r\n}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nsnake_case_\t\t\t\t=\t\t\t{\r\n 'hans': HansProcessor,\r\n}\r\n\r\n"},"code_codestyle":{"kind":"number","value":24,"string":"24"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...utils import logging\r\n\r\n\r\nsnake_case_\t\t\t\t=\t\t\tlogging.get_logger(__name__)\r\n\r\nsnake_case_\t\t\t\t=\t\t\t{\r\n 'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',\r\n # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn\r\n}\r\n\r\nclass \t\tSCREAMING_SNAKE_CASE__\t\t\t\t( _UpperCAmelCase ):\r\n A_\t\t\t\t\t\t:\t\t\t\tList[Any]\t\t\t\t\t\t\t= 'vit_msn'\r\n\r\n\r\n def __init__(self : Union[str, Any]\t\t\t\t,\t\t\ta__ : Optional[Any]=768\t\t\t\t,\t\t\ta__ : Optional[Any]=12\t\t\t\t,\t\t\ta__ : Optional[int]=12\t\t\t\t,\t\t\ta__ : Optional[int]=3072\t\t\t\t,\t\t\ta__ : Union[str, Any]=\"gelu\"\t\t\t\t,\t\t\ta__ : str=0.0\t\t\t\t,\t\t\ta__ : int=0.0\t\t\t\t,\t\t\ta__ : Optional[Any]=0.0_2\t\t\t\t,\t\t\ta__ : List[Any]=1E-06\t\t\t\t,\t\t\ta__ : Optional[int]=224\t\t\t\t,\t\t\ta__ : str=16\t\t\t\t,\t\t\ta__ : Optional[Any]=3\t\t\t\t,\t\t\ta__ : int=True\t\t\t\t,\t\t\t**a__ : List[Any]\t\t\t\t,\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n super().__init__(**a__ )\r\n\r\n __snake_case\t\t\t\t\t\t\t = hidden_size\r\n __snake_case\t\t\t\t\t\t\t = num_hidden_layers\r\n __snake_case\t\t\t\t\t\t\t = num_attention_heads\r\n __snake_case\t\t\t\t\t\t\t = intermediate_size\r\n __snake_case\t\t\t\t\t\t\t = hidden_act\r\n __snake_case\t\t\t\t\t\t\t = hidden_dropout_prob\r\n __snake_case\t\t\t\t\t\t\t = attention_probs_dropout_prob\r\n __snake_case\t\t\t\t\t\t\t = initializer_range\r\n __snake_case\t\t\t\t\t\t\t = layer_norm_eps\r\n __snake_case\t\t\t\t\t\t\t = image_size\r\n __snake_case\t\t\t\t\t\t\t = patch_size\r\n __snake_case\t\t\t\t\t\t\t = num_channels\r\n __snake_case\t\t\t\t\t\t\t = qkv_bias\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":24,"string":"24"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":857,"cells":{"code":{"kind":"string","value":"\n\n'''simple docstring'''\n\n\n\n\n\n\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport sys\nimport time\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Dict, List, Tuple\n\nimport numpy as np\nimport pytorch_lightning as pl\nimport torch\nfrom callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback\nfrom torch import nn\nfrom torch.utils.data import DataLoader\n\nfrom transformers import MBartTokenizer, TaForConditionalGeneration\nfrom transformers.models.bart.modeling_bart import shift_tokens_right\nfrom utils import (\n ROUGE_KEYS,\n LegacySeqaSeqDataset,\n SeqaSeqDataset,\n assert_all_frozen,\n calculate_bleu,\n calculate_rouge,\n check_output_dir,\n flatten_list,\n freeze_embeds,\n freeze_params,\n get_git_info,\n label_smoothed_nll_loss,\n lmap,\n pickle_save,\n save_git_info,\n save_json,\n use_task_specific_params,\n)\n\n\n# need the parent dir module\nsys.path.insert(2, str(Path(__file__).resolve().parents[1]))\nfrom lightning_base import BaseTransformer, add_generic_args, generic_train # noqa\n\n\n__lowerCamelCase\t\t\t\t\t\t = logging.getLogger(__name__)\n\n\nclass A__ ( _snake_case ):\n lowercase\t\t =\t\t\t\t\t\t\"summarization\"\n lowercase\t\t =\t\t\t\t\t\t[\"loss\"]\n lowercase\t\t =\t\t\t\t\t\tROUGE_KEYS\n lowercase\t\t =\t\t\t\t\t\t\"rouge2\"\n\n\n\n def __init__( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\t**UpperCamelCase__ ) -> Dict:\n\n\n\n\n\n '''simple docstring'''\n if hparams.sortish_sampler and hparams.gpus > 1:\n A_\t\t = False\n elif hparams.max_tokens_per_batch is not None:\n if hparams.gpus > 1:\n raise NotImplementedError(\"\"\"Dynamic Batch size does not work for multi-gpu training\"\"\" )\n if hparams.sortish_sampler:\n raise ValueError(\"\"\"--sortish_sampler and --max_tokens_per_batch may not be used simultaneously\"\"\" )\n\n super().__init__(UpperCamelCase__\t\t\t\t\t\t,\t\t\tnum_labels=UpperCamelCase__\t\t\t\t\t\t,\t\t\tmode=self.mode\t\t\t\t\t\t,\t\t\t**UpperCamelCase__ )\n use_task_specific_params(self.model\t\t\t\t\t\t,\t\t\t\"\"\"summarization\"\"\" )\n save_git_info(self.hparams.output_dir )\n A_\t\t = Path(self.output_dir ) / \"\"\"metrics.json\"\"\"\n A_\t\t = Path(self.output_dir ) / \"\"\"hparams.pkl\"\"\"\n pickle_save(self.hparams\t\t\t\t\t\t,\t\t\tself.hparams_save_path )\n A_\t\t = 0\n A_\t\t = defaultdict(UpperCamelCase__ )\n A_\t\t = self.config.model_type\n A_\t\t = self.config.tgt_vocab_size if self.model_type == \"\"\"fsmt\"\"\" else self.config.vocab_size\n\n A_\t\t = {\n \"data_dir\": self.hparams.data_dir,\n \"max_source_length\": self.hparams.max_source_length,\n \"prefix\": self.model.config.prefix or \"\",\n }\n A_\t\t = {\n \"\"\"train\"\"\": self.hparams.n_train,\n \"\"\"val\"\"\": self.hparams.n_val,\n \"\"\"test\"\"\": self.hparams.n_test,\n }\n A_\t\t = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}\n\n A_\t\t = {\n \"\"\"train\"\"\": self.hparams.max_target_length,\n \"\"\"val\"\"\": self.hparams.val_max_target_length,\n \"\"\"test\"\"\": self.hparams.test_max_target_length,\n }\n assert self.target_lens[\"train\"] <= self.target_lens[\"val\"], f'''target_lens: {self.target_lens}'''\n assert self.target_lens[\"train\"] <= self.target_lens[\"test\"], f'''target_lens: {self.target_lens}'''\n if self.hparams.freeze_embeds:\n freeze_embeds(self.model )\n if self.hparams.freeze_encoder:\n freeze_params(self.model.get_encoder() )\n assert_all_frozen(self.model.get_encoder() )\n\n A_\t\t = get_git_info()[\"\"\"repo_sha\"\"\"]\n A_\t\t = hparams.num_workers\n A_\t\t = None # default to config\n if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ):\n A_\t\t = self.tokenizer.lang_code_to_id[hparams.tgt_lang]\n A_\t\t = self.decoder_start_token_id\n A_\t\t = (\n SeqaSeqDataset if hasattr(self.tokenizer\t\t\t\t\t\t,\t\t\t\"\"\"prepare_seq2seq_batch\"\"\" ) else LegacySeqaSeqDataset\n )\n A_\t\t = False\n A_\t\t = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams\n if self.hparams.eval_max_gen_length is not None:\n A_\t\t = self.hparams.eval_max_gen_length\n else:\n A_\t\t = self.model.config.max_length\n A_\t\t = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> Dict[str, List[str]]:\n\n\n\n\n\n '''simple docstring'''\n A_\t\t = {\n k: self.tokenizer.batch_decode(v.tolist() ) if \"\"\"mask\"\"\" not in k else v.shape for k, v in batch.items()\n }\n save_json(UpperCamelCase__\t\t\t\t\t\t,\t\t\tPath(self.output_dir ) / \"\"\"text_batch.json\"\"\" )\n save_json({k: v.tolist() for k, v in batch.items()}\t\t\t\t\t\t,\t\t\tPath(self.output_dir ) / \"\"\"tok_batch.json\"\"\" )\n\n A_\t\t = True\n return readable_batch\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\t**UpperCamelCase__ ) -> str:\n\n\n\n\n\n '''simple docstring'''\n return self.model(UpperCamelCase__\t\t\t\t\t\t,\t\t\t**UpperCamelCase__ )\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> List[str]:\n\n\n\n\n\n '''simple docstring'''\n A_\t\t = self.tokenizer.batch_decode(\n UpperCamelCase__\t\t\t\t\t\t,\t\t\tskip_special_tokens=UpperCamelCase__\t\t\t\t\t\t,\t\t\tclean_up_tokenization_spaces=UpperCamelCase__ )\n return lmap(str.strip\t\t\t\t\t\t,\t\t\tUpperCamelCase__ )\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> Tuple:\n\n\n\n\n\n '''simple docstring'''\n A_\t\t = self.tokenizer.pad_token_id\n A_ , A_\t\t = batch[\"\"\"input_ids\"\"\"], batch[\"\"\"attention_mask\"\"\"]\n A_\t\t = batch[\"\"\"labels\"\"\"]\n if isinstance(self.model\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ):\n A_\t\t = self.model._shift_right(UpperCamelCase__ )\n else:\n A_\t\t = shift_tokens_right(UpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ )\n if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero\n A_\t\t = decoder_input_ids\n self.save_readable_batch(UpperCamelCase__ )\n\n A_\t\t = self(UpperCamelCase__\t\t\t\t\t\t,\t\t\tattention_mask=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdecoder_input_ids=UpperCamelCase__\t\t\t\t\t\t,\t\t\tuse_cache=UpperCamelCase__ )\n A_\t\t = outputs[\"\"\"logits\"\"\"]\n if self.hparams.label_smoothing == 0:\n # Same behavior as modeling_bart.py, besides ignoring pad_token_id\n A_\t\t = nn.CrossEntropyLoss(ignore_index=UpperCamelCase__ )\n\n assert lm_logits.shape[-1] == self.vocab_size\n A_\t\t = ce_loss_fct(lm_logits.view(-1\t\t\t\t\t\t,\t\t\tlm_logits.shape[-1] )\t\t\t\t\t\t,\t\t\ttgt_ids.view(-1 ) )\n else:\n A_\t\t = nn.functional.log_softmax(UpperCamelCase__\t\t\t\t\t\t,\t\t\tdim=-1 )\n A_ , A_\t\t = label_smoothed_nll_loss(\n UpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\tself.hparams.label_smoothing\t\t\t\t\t\t,\t\t\tignore_index=UpperCamelCase__ )\n return (loss,)\n\n\n\n @property\n def snake_case_ ( self ) -> int:\n\n\n\n\n\n '''simple docstring'''\n return self.tokenizer.pad_token_id\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> Dict:\n\n\n\n\n\n '''simple docstring'''\n A_\t\t = self._step(UpperCamelCase__ )\n\n A_\t\t = dict(zip(self.loss_names\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) )\n # tokens per batch\n A_\t\t = batch[\"\"\"input_ids\"\"\"].ne(self.pad ).sum() + batch[\"\"\"labels\"\"\"].ne(self.pad ).sum()\n A_\t\t = batch[\"\"\"input_ids\"\"\"].shape[0]\n A_\t\t = batch[\"\"\"input_ids\"\"\"].eq(self.pad ).sum()\n A_\t\t = batch[\"\"\"input_ids\"\"\"].eq(self.pad ).float().mean()\n # TODO(SS): make a wandb summary metric for this\n return {\"loss\": loss_tensors[0], \"log\": logs}\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> Dict:\n\n\n\n\n\n '''simple docstring'''\n return self._generative_step(UpperCamelCase__ )\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__=\"val\" ) -> Dict:\n\n\n\n\n\n '''simple docstring'''\n self.step_count += 1\n A_\t\t = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}\n A_\t\t = losses[\"\"\"loss\"\"\"]\n A_\t\t = {\n k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + [\"\"\"gen_time\"\"\", \"\"\"gen_len\"\"\"]\n }\n A_\t\t = (\n generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]\n )\n A_\t\t = torch.tensor(UpperCamelCase__ ).type_as(UpperCamelCase__ )\n generative_metrics.update({k: v.item() for k, v in losses.items()} )\n losses.update(UpperCamelCase__ )\n A_\t\t = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()}\n A_\t\t = self.step_count\n self.metrics[prefix].append(UpperCamelCase__ ) # callback writes this to self.metrics_save_path\n A_\t\t = flatten_list([x[\"\"\"preds\"\"\"] for x in outputs] )\n return {\n \"log\": all_metrics,\n \"preds\": preds,\n f'''{prefix}_loss''': loss,\n f'''{prefix}_{self.val_metric}''': metric_tensor,\n }\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> Dict:\n\n\n\n\n\n '''simple docstring'''\n return calculate_rouge(UpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ )\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> dict:\n\n\n\n\n\n '''simple docstring'''\n A_\t\t = time.time()\n\n # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')\n A_\t\t = self.model.generate(\n batch[\"\"\"input_ids\"\"\"]\t\t\t\t\t\t,\t\t\tattention_mask=batch[\"\"\"attention_mask\"\"\"]\t\t\t\t\t\t,\t\t\tuse_cache=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdecoder_start_token_id=self.decoder_start_token_id\t\t\t\t\t\t,\t\t\tnum_beams=self.eval_beams\t\t\t\t\t\t,\t\t\tmax_length=self.eval_max_length\t\t\t\t\t\t,\t\t\t)\n A_\t\t = (time.time() - ta) / batch[\"\"\"input_ids\"\"\"].shape[0]\n A_\t\t = self.ids_to_clean_text(UpperCamelCase__ )\n A_\t\t = self.ids_to_clean_text(batch[\"\"\"labels\"\"\"] )\n A_\t\t = self._step(UpperCamelCase__ )\n A_\t\t = dict(zip(self.loss_names\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) )\n A_\t\t = self.calc_generative_metrics(UpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ )\n A_\t\t = np.mean(lmap(UpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) )\n base_metrics.update(gen_time=UpperCamelCase__\t\t\t\t\t\t,\t\t\tgen_len=UpperCamelCase__\t\t\t\t\t\t,\t\t\tpreds=UpperCamelCase__\t\t\t\t\t\t,\t\t\ttarget=UpperCamelCase__\t\t\t\t\t\t,\t\t\t**UpperCamelCase__ )\n return base_metrics\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> Tuple:\n\n\n\n\n\n '''simple docstring'''\n return self._generative_step(UpperCamelCase__ )\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> Dict:\n\n\n\n\n\n '''simple docstring'''\n return self.validation_epoch_end(UpperCamelCase__\t\t\t\t\t\t,\t\t\tprefix=\"\"\"test\"\"\" )\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> SeqaSeqDataset:\n\n\n\n\n\n '''simple docstring'''\n A_\t\t = self.n_obs[type_path]\n A_\t\t = self.target_lens[type_path]\n A_\t\t = self.dataset_class(\n self.tokenizer\t\t\t\t\t\t,\t\t\ttype_path=UpperCamelCase__\t\t\t\t\t\t,\t\t\tn_obs=UpperCamelCase__\t\t\t\t\t\t,\t\t\tmax_target_length=UpperCamelCase__\t\t\t\t\t\t,\t\t\t**self.dataset_kwargs\t\t\t\t\t\t,\t\t\t)\n return dataset\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ = False ) -> DataLoader:\n\n\n\n\n\n '''simple docstring'''\n A_\t\t = self.get_dataset(UpperCamelCase__ )\n\n if self.hparams.sortish_sampler and type_path != \"test\" and type_path != \"val\":\n A_\t\t = dataset.make_sortish_sampler(UpperCamelCase__\t\t\t\t\t\t,\t\t\tdistributed=self.hparams.gpus > 1 )\n return DataLoader(\n UpperCamelCase__\t\t\t\t\t\t,\t\t\tbatch_size=UpperCamelCase__\t\t\t\t\t\t,\t\t\tcollate_fn=dataset.collate_fn\t\t\t\t\t\t,\t\t\tshuffle=UpperCamelCase__\t\t\t\t\t\t,\t\t\tnum_workers=self.num_workers\t\t\t\t\t\t,\t\t\tsampler=UpperCamelCase__\t\t\t\t\t\t,\t\t\t)\n\n elif self.hparams.max_tokens_per_batch is not None and type_path != \"test\" and type_path != \"val\":\n A_\t\t = dataset.make_dynamic_sampler(\n self.hparams.max_tokens_per_batch\t\t\t\t\t\t,\t\t\tdistributed=self.hparams.gpus > 1 )\n return DataLoader(\n UpperCamelCase__\t\t\t\t\t\t,\t\t\tbatch_sampler=UpperCamelCase__\t\t\t\t\t\t,\t\t\tcollate_fn=dataset.collate_fn\t\t\t\t\t\t,\t\t\tnum_workers=self.num_workers\t\t\t\t\t\t,\t\t\t)\n else:\n return DataLoader(\n UpperCamelCase__\t\t\t\t\t\t,\t\t\tbatch_size=UpperCamelCase__\t\t\t\t\t\t,\t\t\tcollate_fn=dataset.collate_fn\t\t\t\t\t\t,\t\t\tshuffle=UpperCamelCase__\t\t\t\t\t\t,\t\t\tnum_workers=self.num_workers\t\t\t\t\t\t,\t\t\tsampler=UpperCamelCase__\t\t\t\t\t\t,\t\t\t)\n\n\n\n def snake_case_ ( self ) -> DataLoader:\n\n\n\n\n\n '''simple docstring'''\n A_\t\t = self.get_dataloader(\"\"\"train\"\"\"\t\t\t\t\t\t,\t\t\tbatch_size=self.hparams.train_batch_size\t\t\t\t\t\t,\t\t\tshuffle=UpperCamelCase__ )\n return dataloader\n\n\n\n def snake_case_ ( self ) -> DataLoader:\n\n\n\n\n\n '''simple docstring'''\n return self.get_dataloader(\"\"\"val\"\"\"\t\t\t\t\t\t,\t\t\tbatch_size=self.hparams.eval_batch_size )\n\n\n\n def snake_case_ ( self ) -> DataLoader:\n\n\n\n\n\n '''simple docstring'''\n return self.get_dataloader(\"\"\"test\"\"\"\t\t\t\t\t\t,\t\t\tbatch_size=self.hparams.eval_batch_size )\n\n\n\n\n\n\n\n @staticmethod\n def snake_case_ ( UpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> List[Any]:\n\n\n\n\n\n '''simple docstring'''\n BaseTransformer.add_model_specific_args(UpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ )\n add_generic_args(UpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ )\n parser.add_argument(\n \"\"\"--max_source_length\"\"\"\t\t\t\t\t\t,\t\t\tdefault=1024\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\thelp=(\n \"\"\"The maximum total input sequence length after tokenization. Sequences longer \"\"\"\n \"\"\"than this will be truncated, sequences shorter will be padded.\"\"\"\n )\t\t\t\t\t\t,\t\t\t)\n parser.add_argument(\n \"\"\"--max_target_length\"\"\"\t\t\t\t\t\t,\t\t\tdefault=56\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\thelp=(\n \"\"\"The maximum total input sequence length after tokenization. Sequences longer \"\"\"\n \"\"\"than this will be truncated, sequences shorter will be padded.\"\"\"\n )\t\t\t\t\t\t,\t\t\t)\n parser.add_argument(\n \"\"\"--val_max_target_length\"\"\"\t\t\t\t\t\t,\t\t\tdefault=142\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\thelp=(\n \"\"\"The maximum total input sequence length after tokenization. Sequences longer \"\"\"\n \"\"\"than this will be truncated, sequences shorter will be padded.\"\"\"\n )\t\t\t\t\t\t,\t\t\t)\n parser.add_argument(\n \"\"\"--test_max_target_length\"\"\"\t\t\t\t\t\t,\t\t\tdefault=142\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\thelp=(\n \"\"\"The maximum total input sequence length after tokenization. Sequences longer \"\"\"\n \"\"\"than this will be truncated, sequences shorter will be padded.\"\"\"\n )\t\t\t\t\t\t,\t\t\t)\n parser.add_argument(\"\"\"--freeze_encoder\"\"\"\t\t\t\t\t\t,\t\t\taction=\"\"\"store_true\"\"\" )\n parser.add_argument(\"\"\"--freeze_embeds\"\"\"\t\t\t\t\t\t,\t\t\taction=\"\"\"store_true\"\"\" )\n parser.add_argument(\"\"\"--sortish_sampler\"\"\"\t\t\t\t\t\t,\t\t\taction=\"\"\"store_true\"\"\"\t\t\t\t\t\t,\t\t\tdefault=UpperCamelCase__ )\n parser.add_argument(\"\"\"--overwrite_output_dir\"\"\"\t\t\t\t\t\t,\t\t\taction=\"\"\"store_true\"\"\"\t\t\t\t\t\t,\t\t\tdefault=UpperCamelCase__ )\n parser.add_argument(\"\"\"--max_tokens_per_batch\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=UpperCamelCase__ )\n parser.add_argument(\"\"\"--logger_name\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tchoices=[\"\"\"default\"\"\", \"\"\"wandb\"\"\", \"\"\"wandb_shared\"\"\"]\t\t\t\t\t\t,\t\t\tdefault=\"\"\"default\"\"\" )\n parser.add_argument(\"\"\"--n_train\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=-1\t\t\t\t\t\t,\t\t\trequired=UpperCamelCase__\t\t\t\t\t\t,\t\t\thelp=\"\"\"# examples. -1 means use all.\"\"\" )\n parser.add_argument(\"\"\"--n_val\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=500\t\t\t\t\t\t,\t\t\trequired=UpperCamelCase__\t\t\t\t\t\t,\t\t\thelp=\"\"\"# examples. -1 means use all.\"\"\" )\n parser.add_argument(\"\"\"--n_test\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=-1\t\t\t\t\t\t,\t\t\trequired=UpperCamelCase__\t\t\t\t\t\t,\t\t\thelp=\"\"\"# examples. -1 means use all.\"\"\" )\n parser.add_argument(\n \"\"\"--task\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=\"\"\"summarization\"\"\"\t\t\t\t\t\t,\t\t\trequired=UpperCamelCase__\t\t\t\t\t\t,\t\t\thelp=\"\"\"# examples. -1 means use all.\"\"\" )\n parser.add_argument(\"\"\"--label_smoothing\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=0.0\t\t\t\t\t\t,\t\t\trequired=UpperCamelCase__ )\n parser.add_argument(\"\"\"--src_lang\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=\"\"\"\"\"\"\t\t\t\t\t\t,\t\t\trequired=UpperCamelCase__ )\n parser.add_argument(\"\"\"--tgt_lang\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=\"\"\"\"\"\"\t\t\t\t\t\t,\t\t\trequired=UpperCamelCase__ )\n parser.add_argument(\"\"\"--eval_beams\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=UpperCamelCase__\t\t\t\t\t\t,\t\t\trequired=UpperCamelCase__ )\n parser.add_argument(\n \"\"\"--val_metric\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=UpperCamelCase__\t\t\t\t\t\t,\t\t\trequired=UpperCamelCase__\t\t\t\t\t\t,\t\t\tchoices=[\"\"\"bleu\"\"\", \"\"\"rouge2\"\"\", \"\"\"loss\"\"\", None] )\n parser.add_argument(\"\"\"--eval_max_gen_length\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=UpperCamelCase__\t\t\t\t\t\t,\t\t\thelp=\"\"\"never generate more than n tokens\"\"\" )\n parser.add_argument(\"\"\"--save_top_k\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=1\t\t\t\t\t\t,\t\t\trequired=UpperCamelCase__\t\t\t\t\t\t,\t\t\thelp=\"\"\"How many checkpoints to save\"\"\" )\n parser.add_argument(\n \"\"\"--early_stopping_patience\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=-1\t\t\t\t\t\t,\t\t\trequired=UpperCamelCase__\t\t\t\t\t\t,\t\t\thelp=(\n \"\"\"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So\"\"\"\n \"\"\" val_check_interval will effect it.\"\"\"\n )\t\t\t\t\t\t,\t\t\t)\n return parser\n\n\nclass A__ ( _snake_case ):\n lowercase\t\t =\t\t\t\t\t\t\"translation\"\n lowercase\t\t =\t\t\t\t\t\t[\"loss\"]\n lowercase\t\t =\t\t\t\t\t\t[\"bleu\"]\n lowercase\t\t =\t\t\t\t\t\t\"bleu\"\n\n\n\n def __init__( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\t**UpperCamelCase__ ) -> Optional[int]:\n\n\n\n\n\n '''simple docstring'''\n super().__init__(UpperCamelCase__\t\t\t\t\t\t,\t\t\t**UpperCamelCase__ )\n A_\t\t = hparams.src_lang\n A_\t\t = hparams.tgt_lang\n\n\n\n\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> dict:\n\n\n\n\n\n '''simple docstring'''\n return calculate_bleu(UpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ )\n\ndef UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=None\t\t\t) -> SummarizationModule:\n Path(args.output_dir\t\t\t).mkdir(exist_ok=UpperCAmelCase__\t\t\t)\n check_output_dir(UpperCAmelCase__, expected_items=3\t\t\t)\n\n if model is None:\n if \"summarization\" in args.task:\n A_\t\t = SummarizationModule(UpperCAmelCase__\t\t\t)\n else:\n A_\t\t = TranslationModule(UpperCAmelCase__\t\t\t)\n A_\t\t = Path(args.data_dir\t\t\t).name\n if (\n args.logger_name == \"default\"\n or args.fast_dev_run\n or str(args.output_dir\t\t\t).startswith(\"\"\"/tmp\"\"\"\t\t\t)\n or str(args.output_dir\t\t\t).startswith(\"\"\"/var\"\"\"\t\t\t)\n ):\n A_\t\t = True # don't pollute wandb logs unnecessarily\n elif args.logger_name == \"wandb\":\n from pytorch_lightning.loggers import WandbLogger\n\n A_\t\t = os.environ.get(\"\"\"WANDB_PROJECT\"\"\", UpperCAmelCase__\t\t\t)\n A_\t\t = WandbLogger(name=model.output_dir.name, project=UpperCAmelCase__\t\t\t)\n\n elif args.logger_name == \"wandb_shared\":\n from pytorch_lightning.loggers import WandbLogger\n\n A_\t\t = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}'''\t\t\t)\n\n if args.early_stopping_patience >= 0:\n A_\t\t = get_early_stopping_callback(model.val_metric, args.early_stopping_patience\t\t\t)\n else:\n A_\t\t = False\n\n A_\t\t = args.val_metric == \"\"\"loss\"\"\"\n A_\t\t = generic_train(\n UpperCAmelCase__, UpperCAmelCase__, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(\n args.output_dir, model.val_metric, args.save_top_k, UpperCAmelCase__\t\t\t), early_stopping_callback=UpperCAmelCase__, logger=UpperCAmelCase__, )\n pickle_save(model.hparams, model.output_dir / \"\"\"hparams.pkl\"\"\"\t\t\t)\n if not args.do_predict:\n return model\n\n A_\t\t = \"\"\"\"\"\"\n A_\t\t = sorted(glob.glob(os.path.join(args.output_dir, \"\"\"*.ckpt\"\"\"\t\t\t), recursive=UpperCAmelCase__\t\t\t)\t\t\t)\n if checkpoints:\n A_\t\t = checkpoints[-1]\n A_\t\t = checkpoints[-1]\n trainer.logger.log_hyperparams(model.hparams\t\t\t)\n\n # test() without a model tests using the best checkpoint automatically\n trainer.test()\n return model\n\n\nif __name__ == \"__main__\":\n __lowerCamelCase\t\t\t\t\t\t = argparse.ArgumentParser()\n __lowerCamelCase\t\t\t\t\t\t = pl.Trainer.add_argparse_args(parser)\n __lowerCamelCase\t\t\t\t\t\t = SummarizationModule.add_model_specific_args(parser, os.getcwd())\n\n __lowerCamelCase\t\t\t\t\t\t = parser.parse_args()\n\n main(args)\n"},"code_codestyle":{"kind":"number","value":101,"string":"101"},"style_context":{"kind":"string","value":"\n\n'''simple docstring'''\n\n\n\n\n\n\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport sys\nimport time\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Dict, List, Tuple\n\nimport numpy as np\nimport pytorch_lightning as pl\nimport torch\nfrom callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback\nfrom torch import nn\nfrom torch.utils.data import DataLoader\n\nfrom transformers import MBartTokenizer, TaForConditionalGeneration\nfrom transformers.models.bart.modeling_bart import shift_tokens_right\nfrom utils import (\n ROUGE_KEYS,\n LegacySeqaSeqDataset,\n SeqaSeqDataset,\n assert_all_frozen,\n calculate_bleu,\n calculate_rouge,\n check_output_dir,\n flatten_list,\n freeze_embeds,\n freeze_params,\n get_git_info,\n label_smoothed_nll_loss,\n lmap,\n pickle_save,\n save_git_info,\n save_json,\n use_task_specific_params,\n)\n\n\n# need the parent dir module\nsys.path.insert(2, str(Path(__file__).resolve().parents[1]))\nfrom lightning_base import BaseTransformer, add_generic_args, generic_train # noqa\n\n\n__lowerCamelCase\t\t\t\t\t\t = logging.getLogger(__name__)\n\n\nclass A__ ( _snake_case ):\n lowercase\t\t =\t\t\t\t\t\t\"summarization\"\n lowercase\t\t =\t\t\t\t\t\t[\"loss\"]\n lowercase\t\t =\t\t\t\t\t\tROUGE_KEYS\n lowercase\t\t =\t\t\t\t\t\t\"rouge2\"\n\n\n\n def __init__( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\t**UpperCamelCase__ ) -> Dict:\n\n\n\n\n\n '''simple docstring'''\n if hparams.sortish_sampler and hparams.gpus > 1:\n A_\t\t = False\n elif hparams.max_tokens_per_batch is not None:\n if hparams.gpus > 1:\n raise NotImplementedError(\"\"\"Dynamic Batch size does not work for multi-gpu training\"\"\" )\n if hparams.sortish_sampler:\n raise ValueError(\"\"\"--sortish_sampler and --max_tokens_per_batch may not be used simultaneously\"\"\" )\n\n super().__init__(UpperCamelCase__\t\t\t\t\t\t,\t\t\tnum_labels=UpperCamelCase__\t\t\t\t\t\t,\t\t\tmode=self.mode\t\t\t\t\t\t,\t\t\t**UpperCamelCase__ )\n use_task_specific_params(self.model\t\t\t\t\t\t,\t\t\t\"\"\"summarization\"\"\" )\n save_git_info(self.hparams.output_dir )\n A_\t\t = Path(self.output_dir ) / \"\"\"metrics.json\"\"\"\n A_\t\t = Path(self.output_dir ) / \"\"\"hparams.pkl\"\"\"\n pickle_save(self.hparams\t\t\t\t\t\t,\t\t\tself.hparams_save_path )\n A_\t\t = 0\n A_\t\t = defaultdict(UpperCamelCase__ )\n A_\t\t = self.config.model_type\n A_\t\t = self.config.tgt_vocab_size if self.model_type == \"\"\"fsmt\"\"\" else self.config.vocab_size\n\n A_\t\t = {\n \"data_dir\": self.hparams.data_dir,\n \"max_source_length\": self.hparams.max_source_length,\n \"prefix\": self.model.config.prefix or \"\",\n }\n A_\t\t = {\n \"\"\"train\"\"\": self.hparams.n_train,\n \"\"\"val\"\"\": self.hparams.n_val,\n \"\"\"test\"\"\": self.hparams.n_test,\n }\n A_\t\t = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}\n\n A_\t\t = {\n \"\"\"train\"\"\": self.hparams.max_target_length,\n \"\"\"val\"\"\": self.hparams.val_max_target_length,\n \"\"\"test\"\"\": self.hparams.test_max_target_length,\n }\n assert self.target_lens[\"train\"] <= self.target_lens[\"val\"], f'''target_lens: {self.target_lens}'''\n assert self.target_lens[\"train\"] <= self.target_lens[\"test\"], f'''target_lens: {self.target_lens}'''\n if self.hparams.freeze_embeds:\n freeze_embeds(self.model )\n if self.hparams.freeze_encoder:\n freeze_params(self.model.get_encoder() )\n assert_all_frozen(self.model.get_encoder() )\n\n A_\t\t = get_git_info()[\"\"\"repo_sha\"\"\"]\n A_\t\t = hparams.num_workers\n A_\t\t = None # default to config\n if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ):\n A_\t\t = self.tokenizer.lang_code_to_id[hparams.tgt_lang]\n A_\t\t = self.decoder_start_token_id\n A_\t\t = (\n SeqaSeqDataset if hasattr(self.tokenizer\t\t\t\t\t\t,\t\t\t\"\"\"prepare_seq2seq_batch\"\"\" ) else LegacySeqaSeqDataset\n )\n A_\t\t = False\n A_\t\t = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams\n if self.hparams.eval_max_gen_length is not None:\n A_\t\t = self.hparams.eval_max_gen_length\n else:\n A_\t\t = self.model.config.max_length\n A_\t\t = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> Dict[str, List[str]]:\n\n\n\n\n\n '''simple docstring'''\n A_\t\t = {\n k: self.tokenizer.batch_decode(v.tolist() ) if \"\"\"mask\"\"\" not in k else v.shape for k, v in batch.items()\n }\n save_json(UpperCamelCase__\t\t\t\t\t\t,\t\t\tPath(self.output_dir ) / \"\"\"text_batch.json\"\"\" )\n save_json({k: v.tolist() for k, v in batch.items()}\t\t\t\t\t\t,\t\t\tPath(self.output_dir ) / \"\"\"tok_batch.json\"\"\" )\n\n A_\t\t = True\n return readable_batch\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\t**UpperCamelCase__ ) -> str:\n\n\n\n\n\n '''simple docstring'''\n return self.model(UpperCamelCase__\t\t\t\t\t\t,\t\t\t**UpperCamelCase__ )\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> List[str]:\n\n\n\n\n\n '''simple docstring'''\n A_\t\t = self.tokenizer.batch_decode(\n UpperCamelCase__\t\t\t\t\t\t,\t\t\tskip_special_tokens=UpperCamelCase__\t\t\t\t\t\t,\t\t\tclean_up_tokenization_spaces=UpperCamelCase__ )\n return lmap(str.strip\t\t\t\t\t\t,\t\t\tUpperCamelCase__ )\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> Tuple:\n\n\n\n\n\n '''simple docstring'''\n A_\t\t = self.tokenizer.pad_token_id\n A_ , A_\t\t = batch[\"\"\"input_ids\"\"\"], batch[\"\"\"attention_mask\"\"\"]\n A_\t\t = batch[\"\"\"labels\"\"\"]\n if isinstance(self.model\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ):\n A_\t\t = self.model._shift_right(UpperCamelCase__ )\n else:\n A_\t\t = shift_tokens_right(UpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ )\n if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero\n A_\t\t = decoder_input_ids\n self.save_readable_batch(UpperCamelCase__ )\n\n A_\t\t = self(UpperCamelCase__\t\t\t\t\t\t,\t\t\tattention_mask=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdecoder_input_ids=UpperCamelCase__\t\t\t\t\t\t,\t\t\tuse_cache=UpperCamelCase__ )\n A_\t\t = outputs[\"\"\"logits\"\"\"]\n if self.hparams.label_smoothing == 0:\n # Same behavior as modeling_bart.py, besides ignoring pad_token_id\n A_\t\t = nn.CrossEntropyLoss(ignore_index=UpperCamelCase__ )\n\n assert lm_logits.shape[-1] == self.vocab_size\n A_\t\t = ce_loss_fct(lm_logits.view(-1\t\t\t\t\t\t,\t\t\tlm_logits.shape[-1] )\t\t\t\t\t\t,\t\t\ttgt_ids.view(-1 ) )\n else:\n A_\t\t = nn.functional.log_softmax(UpperCamelCase__\t\t\t\t\t\t,\t\t\tdim=-1 )\n A_ , A_\t\t = label_smoothed_nll_loss(\n UpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\tself.hparams.label_smoothing\t\t\t\t\t\t,\t\t\tignore_index=UpperCamelCase__ )\n return (loss,)\n\n\n\n @property\n def snake_case_ ( self ) -> int:\n\n\n\n\n\n '''simple docstring'''\n return self.tokenizer.pad_token_id\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> Dict:\n\n\n\n\n\n '''simple docstring'''\n A_\t\t = self._step(UpperCamelCase__ )\n\n A_\t\t = dict(zip(self.loss_names\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) )\n # tokens per batch\n A_\t\t = batch[\"\"\"input_ids\"\"\"].ne(self.pad ).sum() + batch[\"\"\"labels\"\"\"].ne(self.pad ).sum()\n A_\t\t = batch[\"\"\"input_ids\"\"\"].shape[0]\n A_\t\t = batch[\"\"\"input_ids\"\"\"].eq(self.pad ).sum()\n A_\t\t = batch[\"\"\"input_ids\"\"\"].eq(self.pad ).float().mean()\n # TODO(SS): make a wandb summary metric for this\n return {\"loss\": loss_tensors[0], \"log\": logs}\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> Dict:\n\n\n\n\n\n '''simple docstring'''\n return self._generative_step(UpperCamelCase__ )\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__=\"val\" ) -> Dict:\n\n\n\n\n\n '''simple docstring'''\n self.step_count += 1\n A_\t\t = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}\n A_\t\t = losses[\"\"\"loss\"\"\"]\n A_\t\t = {\n k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + [\"\"\"gen_time\"\"\", \"\"\"gen_len\"\"\"]\n }\n A_\t\t = (\n generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]\n )\n A_\t\t = torch.tensor(UpperCamelCase__ ).type_as(UpperCamelCase__ )\n generative_metrics.update({k: v.item() for k, v in losses.items()} )\n losses.update(UpperCamelCase__ )\n A_\t\t = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()}\n A_\t\t = self.step_count\n self.metrics[prefix].append(UpperCamelCase__ ) # callback writes this to self.metrics_save_path\n A_\t\t = flatten_list([x[\"\"\"preds\"\"\"] for x in outputs] )\n return {\n \"log\": all_metrics,\n \"preds\": preds,\n f'''{prefix}_loss''': loss,\n f'''{prefix}_{self.val_metric}''': metric_tensor,\n }\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> Dict:\n\n\n\n\n\n '''simple docstring'''\n return calculate_rouge(UpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ )\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> dict:\n\n\n\n\n\n '''simple docstring'''\n A_\t\t = time.time()\n\n # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')\n A_\t\t = self.model.generate(\n batch[\"\"\"input_ids\"\"\"]\t\t\t\t\t\t,\t\t\tattention_mask=batch[\"\"\"attention_mask\"\"\"]\t\t\t\t\t\t,\t\t\tuse_cache=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdecoder_start_token_id=self.decoder_start_token_id\t\t\t\t\t\t,\t\t\tnum_beams=self.eval_beams\t\t\t\t\t\t,\t\t\tmax_length=self.eval_max_length\t\t\t\t\t\t,\t\t\t)\n A_\t\t = (time.time() - ta) / batch[\"\"\"input_ids\"\"\"].shape[0]\n A_\t\t = self.ids_to_clean_text(UpperCamelCase__ )\n A_\t\t = self.ids_to_clean_text(batch[\"\"\"labels\"\"\"] )\n A_\t\t = self._step(UpperCamelCase__ )\n A_\t\t = dict(zip(self.loss_names\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) )\n A_\t\t = self.calc_generative_metrics(UpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ )\n A_\t\t = np.mean(lmap(UpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) )\n base_metrics.update(gen_time=UpperCamelCase__\t\t\t\t\t\t,\t\t\tgen_len=UpperCamelCase__\t\t\t\t\t\t,\t\t\tpreds=UpperCamelCase__\t\t\t\t\t\t,\t\t\ttarget=UpperCamelCase__\t\t\t\t\t\t,\t\t\t**UpperCamelCase__ )\n return base_metrics\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> Tuple:\n\n\n\n\n\n '''simple docstring'''\n return self._generative_step(UpperCamelCase__ )\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> Dict:\n\n\n\n\n\n '''simple docstring'''\n return self.validation_epoch_end(UpperCamelCase__\t\t\t\t\t\t,\t\t\tprefix=\"\"\"test\"\"\" )\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> SeqaSeqDataset:\n\n\n\n\n\n '''simple docstring'''\n A_\t\t = self.n_obs[type_path]\n A_\t\t = self.target_lens[type_path]\n A_\t\t = self.dataset_class(\n self.tokenizer\t\t\t\t\t\t,\t\t\ttype_path=UpperCamelCase__\t\t\t\t\t\t,\t\t\tn_obs=UpperCamelCase__\t\t\t\t\t\t,\t\t\tmax_target_length=UpperCamelCase__\t\t\t\t\t\t,\t\t\t**self.dataset_kwargs\t\t\t\t\t\t,\t\t\t)\n return dataset\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ = False ) -> DataLoader:\n\n\n\n\n\n '''simple docstring'''\n A_\t\t = self.get_dataset(UpperCamelCase__ )\n\n if self.hparams.sortish_sampler and type_path != \"test\" and type_path != \"val\":\n A_\t\t = dataset.make_sortish_sampler(UpperCamelCase__\t\t\t\t\t\t,\t\t\tdistributed=self.hparams.gpus > 1 )\n return DataLoader(\n UpperCamelCase__\t\t\t\t\t\t,\t\t\tbatch_size=UpperCamelCase__\t\t\t\t\t\t,\t\t\tcollate_fn=dataset.collate_fn\t\t\t\t\t\t,\t\t\tshuffle=UpperCamelCase__\t\t\t\t\t\t,\t\t\tnum_workers=self.num_workers\t\t\t\t\t\t,\t\t\tsampler=UpperCamelCase__\t\t\t\t\t\t,\t\t\t)\n\n elif self.hparams.max_tokens_per_batch is not None and type_path != \"test\" and type_path != \"val\":\n A_\t\t = dataset.make_dynamic_sampler(\n self.hparams.max_tokens_per_batch\t\t\t\t\t\t,\t\t\tdistributed=self.hparams.gpus > 1 )\n return DataLoader(\n UpperCamelCase__\t\t\t\t\t\t,\t\t\tbatch_sampler=UpperCamelCase__\t\t\t\t\t\t,\t\t\tcollate_fn=dataset.collate_fn\t\t\t\t\t\t,\t\t\tnum_workers=self.num_workers\t\t\t\t\t\t,\t\t\t)\n else:\n return DataLoader(\n UpperCamelCase__\t\t\t\t\t\t,\t\t\tbatch_size=UpperCamelCase__\t\t\t\t\t\t,\t\t\tcollate_fn=dataset.collate_fn\t\t\t\t\t\t,\t\t\tshuffle=UpperCamelCase__\t\t\t\t\t\t,\t\t\tnum_workers=self.num_workers\t\t\t\t\t\t,\t\t\tsampler=UpperCamelCase__\t\t\t\t\t\t,\t\t\t)\n\n\n\n def snake_case_ ( self ) -> DataLoader:\n\n\n\n\n\n '''simple docstring'''\n A_\t\t = self.get_dataloader(\"\"\"train\"\"\"\t\t\t\t\t\t,\t\t\tbatch_size=self.hparams.train_batch_size\t\t\t\t\t\t,\t\t\tshuffle=UpperCamelCase__ )\n return dataloader\n\n\n\n def snake_case_ ( self ) -> DataLoader:\n\n\n\n\n\n '''simple docstring'''\n return self.get_dataloader(\"\"\"val\"\"\"\t\t\t\t\t\t,\t\t\tbatch_size=self.hparams.eval_batch_size )\n\n\n\n def snake_case_ ( self ) -> DataLoader:\n\n\n\n\n\n '''simple docstring'''\n return self.get_dataloader(\"\"\"test\"\"\"\t\t\t\t\t\t,\t\t\tbatch_size=self.hparams.eval_batch_size )\n\n\n\n\n\n\n\n @staticmethod\n def snake_case_ ( UpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> List[Any]:\n\n\n\n\n\n '''simple docstring'''\n BaseTransformer.add_model_specific_args(UpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ )\n add_generic_args(UpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ )\n parser.add_argument(\n \"\"\"--max_source_length\"\"\"\t\t\t\t\t\t,\t\t\tdefault=1024\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\thelp=(\n \"\"\"The maximum total input sequence length after tokenization. Sequences longer \"\"\"\n \"\"\"than this will be truncated, sequences shorter will be padded.\"\"\"\n )\t\t\t\t\t\t,\t\t\t)\n parser.add_argument(\n \"\"\"--max_target_length\"\"\"\t\t\t\t\t\t,\t\t\tdefault=56\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\thelp=(\n \"\"\"The maximum total input sequence length after tokenization. Sequences longer \"\"\"\n \"\"\"than this will be truncated, sequences shorter will be padded.\"\"\"\n )\t\t\t\t\t\t,\t\t\t)\n parser.add_argument(\n \"\"\"--val_max_target_length\"\"\"\t\t\t\t\t\t,\t\t\tdefault=142\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\thelp=(\n \"\"\"The maximum total input sequence length after tokenization. Sequences longer \"\"\"\n \"\"\"than this will be truncated, sequences shorter will be padded.\"\"\"\n )\t\t\t\t\t\t,\t\t\t)\n parser.add_argument(\n \"\"\"--test_max_target_length\"\"\"\t\t\t\t\t\t,\t\t\tdefault=142\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\thelp=(\n \"\"\"The maximum total input sequence length after tokenization. Sequences longer \"\"\"\n \"\"\"than this will be truncated, sequences shorter will be padded.\"\"\"\n )\t\t\t\t\t\t,\t\t\t)\n parser.add_argument(\"\"\"--freeze_encoder\"\"\"\t\t\t\t\t\t,\t\t\taction=\"\"\"store_true\"\"\" )\n parser.add_argument(\"\"\"--freeze_embeds\"\"\"\t\t\t\t\t\t,\t\t\taction=\"\"\"store_true\"\"\" )\n parser.add_argument(\"\"\"--sortish_sampler\"\"\"\t\t\t\t\t\t,\t\t\taction=\"\"\"store_true\"\"\"\t\t\t\t\t\t,\t\t\tdefault=UpperCamelCase__ )\n parser.add_argument(\"\"\"--overwrite_output_dir\"\"\"\t\t\t\t\t\t,\t\t\taction=\"\"\"store_true\"\"\"\t\t\t\t\t\t,\t\t\tdefault=UpperCamelCase__ )\n parser.add_argument(\"\"\"--max_tokens_per_batch\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=UpperCamelCase__ )\n parser.add_argument(\"\"\"--logger_name\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tchoices=[\"\"\"default\"\"\", \"\"\"wandb\"\"\", \"\"\"wandb_shared\"\"\"]\t\t\t\t\t\t,\t\t\tdefault=\"\"\"default\"\"\" )\n parser.add_argument(\"\"\"--n_train\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=-1\t\t\t\t\t\t,\t\t\trequired=UpperCamelCase__\t\t\t\t\t\t,\t\t\thelp=\"\"\"# examples. -1 means use all.\"\"\" )\n parser.add_argument(\"\"\"--n_val\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=500\t\t\t\t\t\t,\t\t\trequired=UpperCamelCase__\t\t\t\t\t\t,\t\t\thelp=\"\"\"# examples. -1 means use all.\"\"\" )\n parser.add_argument(\"\"\"--n_test\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=-1\t\t\t\t\t\t,\t\t\trequired=UpperCamelCase__\t\t\t\t\t\t,\t\t\thelp=\"\"\"# examples. -1 means use all.\"\"\" )\n parser.add_argument(\n \"\"\"--task\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=\"\"\"summarization\"\"\"\t\t\t\t\t\t,\t\t\trequired=UpperCamelCase__\t\t\t\t\t\t,\t\t\thelp=\"\"\"# examples. -1 means use all.\"\"\" )\n parser.add_argument(\"\"\"--label_smoothing\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=0.0\t\t\t\t\t\t,\t\t\trequired=UpperCamelCase__ )\n parser.add_argument(\"\"\"--src_lang\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=\"\"\"\"\"\"\t\t\t\t\t\t,\t\t\trequired=UpperCamelCase__ )\n parser.add_argument(\"\"\"--tgt_lang\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=\"\"\"\"\"\"\t\t\t\t\t\t,\t\t\trequired=UpperCamelCase__ )\n parser.add_argument(\"\"\"--eval_beams\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=UpperCamelCase__\t\t\t\t\t\t,\t\t\trequired=UpperCamelCase__ )\n parser.add_argument(\n \"\"\"--val_metric\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=UpperCamelCase__\t\t\t\t\t\t,\t\t\trequired=UpperCamelCase__\t\t\t\t\t\t,\t\t\tchoices=[\"\"\"bleu\"\"\", \"\"\"rouge2\"\"\", \"\"\"loss\"\"\", None] )\n parser.add_argument(\"\"\"--eval_max_gen_length\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=UpperCamelCase__\t\t\t\t\t\t,\t\t\thelp=\"\"\"never generate more than n tokens\"\"\" )\n parser.add_argument(\"\"\"--save_top_k\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=1\t\t\t\t\t\t,\t\t\trequired=UpperCamelCase__\t\t\t\t\t\t,\t\t\thelp=\"\"\"How many checkpoints to save\"\"\" )\n parser.add_argument(\n \"\"\"--early_stopping_patience\"\"\"\t\t\t\t\t\t,\t\t\ttype=UpperCamelCase__\t\t\t\t\t\t,\t\t\tdefault=-1\t\t\t\t\t\t,\t\t\trequired=UpperCamelCase__\t\t\t\t\t\t,\t\t\thelp=(\n \"\"\"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So\"\"\"\n \"\"\" val_check_interval will effect it.\"\"\"\n )\t\t\t\t\t\t,\t\t\t)\n return parser\n\n\nclass A__ ( _snake_case ):\n lowercase\t\t =\t\t\t\t\t\t\"translation\"\n lowercase\t\t =\t\t\t\t\t\t[\"loss\"]\n lowercase\t\t =\t\t\t\t\t\t[\"bleu\"]\n lowercase\t\t =\t\t\t\t\t\t\"bleu\"\n\n\n\n def __init__( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\t**UpperCamelCase__ ) -> Optional[int]:\n\n\n\n\n\n '''simple docstring'''\n super().__init__(UpperCamelCase__\t\t\t\t\t\t,\t\t\t**UpperCamelCase__ )\n A_\t\t = hparams.src_lang\n A_\t\t = hparams.tgt_lang\n\n\n\n\n\n\n\n def snake_case_ ( self\t\t\t\t\t\t,\t\t\tUpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ ) -> dict:\n\n\n\n\n\n '''simple docstring'''\n return calculate_bleu(UpperCamelCase__\t\t\t\t\t\t,\t\t\tUpperCamelCase__ )\n\ndef UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=None\t\t\t) -> SummarizationModule:\n Path(args.output_dir\t\t\t).mkdir(exist_ok=UpperCAmelCase__\t\t\t)\n check_output_dir(UpperCAmelCase__, expected_items=3\t\t\t)\n\n if model is None:\n if \"summarization\" in args.task:\n A_\t\t = SummarizationModule(UpperCAmelCase__\t\t\t)\n else:\n A_\t\t = TranslationModule(UpperCAmelCase__\t\t\t)\n A_\t\t = Path(args.data_dir\t\t\t).name\n if (\n args.logger_name == \"default\"\n or args.fast_dev_run\n or str(args.output_dir\t\t\t).startswith(\"\"\"/tmp\"\"\"\t\t\t)\n or str(args.output_dir\t\t\t).startswith(\"\"\"/var\"\"\"\t\t\t)\n ):\n A_\t\t = True # don't pollute wandb logs unnecessarily\n elif args.logger_name == \"wandb\":\n from pytorch_lightning.loggers import WandbLogger\n\n A_\t\t = os.environ.get(\"\"\"WANDB_PROJECT\"\"\", UpperCAmelCase__\t\t\t)\n A_\t\t = WandbLogger(name=model.output_dir.name, project=UpperCAmelCase__\t\t\t)\n\n elif args.logger_name == \"wandb_shared\":\n from pytorch_lightning.loggers import WandbLogger\n\n A_\t\t = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}'''\t\t\t)\n\n if args.early_stopping_patience >= 0:\n A_\t\t = get_early_stopping_callback(model.val_metric, args.early_stopping_patience\t\t\t)\n else:\n A_\t\t = False\n\n A_\t\t = args.val_metric == \"\"\"loss\"\"\"\n A_\t\t = generic_train(\n UpperCAmelCase__, UpperCAmelCase__, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(\n args.output_dir, model.val_metric, args.save_top_k, UpperCAmelCase__\t\t\t), early_stopping_callback=UpperCAmelCase__, logger=UpperCAmelCase__, )\n pickle_save(model.hparams, model.output_dir / \"\"\"hparams.pkl\"\"\"\t\t\t)\n if not args.do_predict:\n return model\n\n A_\t\t = \"\"\"\"\"\"\n A_\t\t = sorted(glob.glob(os.path.join(args.output_dir, \"\"\"*.ckpt\"\"\"\t\t\t), recursive=UpperCAmelCase__\t\t\t)\t\t\t)\n if checkpoints:\n A_\t\t = checkpoints[-1]\n A_\t\t = checkpoints[-1]\n trainer.logger.log_hyperparams(model.hparams\t\t\t)\n\n # test() without a model tests using the best checkpoint automatically\n trainer.test()\n return model\n\n\nif __name__ == \"__main__\":\n __lowerCamelCase\t\t\t\t\t\t = argparse.ArgumentParser()\n __lowerCamelCase\t\t\t\t\t\t = pl.Trainer.add_argparse_args(parser)\n __lowerCamelCase\t\t\t\t\t\t = SummarizationModule.add_model_specific_args(parser, os.getcwd())\n\n __lowerCamelCase\t\t\t\t\t\t = parser.parse_args()\n\n main(args)\n"},"style_context_codestyle":{"kind":"number","value":101,"string":"101"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":858,"cells":{"code":{"kind":"string","value":"\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\nimport math\r\nimport sys\r\n\r\nimport cva\r\nimport numpy as np\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef a__ ( __SCREAMING_SNAKE_CASE\t\t\t\t\t, __SCREAMING_SNAKE_CASE )\t->\tnp.ndarray:\r\n\t\t# For applying gaussian function for each element in matrix.\r\n\t\t__lowerCAmelCase: Union[str, Any] =\t\t\tmath.sqrt(__SCREAMING_SNAKE_CASE )\r\n\t\t__lowerCAmelCase: List[str] =\t\t\t1 / (sigma * math.sqrt(2 * math.pi ))\r\n\t\treturn cons * np.exp(-((img / sigma) ** 2) * 0.5 )\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef a__ ( __SCREAMING_SNAKE_CASE\t\t\t\t\t, __SCREAMING_SNAKE_CASE\t\t\t\t\t, __SCREAMING_SNAKE_CASE\t\t\t\t\t, __SCREAMING_SNAKE_CASE )\t->\tnp.ndarray:\r\n\t\t__lowerCAmelCase: Any =\t\t\tkernel_size // 2\r\n\t\treturn img[x - half : x + half + 1, y - half : y + half + 1]\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef a__ ( __SCREAMING_SNAKE_CASE\t\t\t\t\t, __SCREAMING_SNAKE_CASE )\t->\tnp.ndarray:\r\n\t\t# Creates a gaussian kernel of given dimension.\r\n\t\t__lowerCAmelCase: List[str] =\t\t\tnp.zeros((kernel_size, kernel_size) )\r\n\t\tfor i in range(0\t\t\t\t\t, __SCREAMING_SNAKE_CASE ):\r\n\t\t\t\tfor j in range(0\t\t\t\t\t, __SCREAMING_SNAKE_CASE ):\r\n\t\t\t\t\t\t__lowerCAmelCase: str =\t\t\tmath.sqrt(\r\n\t\t\t\t\t\t abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )\r\n\t\treturn vec_gaussian(__SCREAMING_SNAKE_CASE\t\t\t\t\t, __SCREAMING_SNAKE_CASE )\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef a__ ( __SCREAMING_SNAKE_CASE\t\t\t\t\t, __SCREAMING_SNAKE_CASE\t\t\t\t\t, __SCREAMING_SNAKE_CASE\t\t\t\t\t, __SCREAMING_SNAKE_CASE\t\t\t\t\t, )\t->\tnp.ndarray:\r\n\t\t__lowerCAmelCase: Dict =\t\t\tnp.zeros(img.shape )\r\n\t\t__lowerCAmelCase: Union[str, Any] =\t\t\tget_gauss_kernel(__SCREAMING_SNAKE_CASE\t\t\t\t\t, __SCREAMING_SNAKE_CASE )\r\n\t\t__lowerCAmelCase , __lowerCAmelCase: Any =\t\t\timg.shape\r\n\t\tfor i in range(kernel_size // 2\t\t\t\t\t, size_x - kernel_size // 2 ):\r\n\t\t\t\tfor j in range(kernel_size // 2\t\t\t\t\t, size_y - kernel_size // 2 ):\r\n\t\t\t\t\t\t__lowerCAmelCase: Tuple =\t\t\tget_slice(__SCREAMING_SNAKE_CASE\t\t\t\t\t, __SCREAMING_SNAKE_CASE\t\t\t\t\t, __SCREAMING_SNAKE_CASE\t\t\t\t\t, __SCREAMING_SNAKE_CASE )\r\n\t\t\t\t\t\t__lowerCAmelCase: Dict =\t\t\timg_s - img_s[kernel_size // 2, kernel_size // 2]\r\n\t\t\t\t\t\t__lowerCAmelCase: List[Any] =\t\t\tvec_gaussian(__SCREAMING_SNAKE_CASE\t\t\t\t\t, __SCREAMING_SNAKE_CASE )\r\n\t\t\t\t\t\t__lowerCAmelCase: int =\t\t\tnp.multiply(__SCREAMING_SNAKE_CASE\t\t\t\t\t, __SCREAMING_SNAKE_CASE )\r\n\t\t\t\t\t\t__lowerCAmelCase: int =\t\t\tnp.multiply(__SCREAMING_SNAKE_CASE\t\t\t\t\t, __SCREAMING_SNAKE_CASE )\r\n\t\t\t\t\t\t__lowerCAmelCase: Tuple =\t\t\tnp.sum(__SCREAMING_SNAKE_CASE ) / np.sum(__SCREAMING_SNAKE_CASE )\r\n\t\t\t\t\t\t__lowerCAmelCase: str =\t\t\tval\r\n\t\treturn imga\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef a__ ( __SCREAMING_SNAKE_CASE )\t->\ttuple:\r\n\t\t__lowerCAmelCase: List[Any] =\t\t\targs[1] if args[1:] else \"../image_data/lena.jpg\"\r\n\t\t__lowerCAmelCase: List[Any] =\t\t\tfloat(args[2] ) if args[2:] else 1.0\r\n\t\t__lowerCAmelCase: List[Any] =\t\t\tfloat(args[3] ) if args[3:] else 1.0\r\n\t\tif args[4:]:\r\n\t\t\t\t__lowerCAmelCase: int =\t\t\tint(args[4] )\r\n\t\t\t\t__lowerCAmelCase: str =\t\t\tkernel_size + abs(kernel_size % 2 - 1 )\r\n\t\telse:\r\n\t\t\t\t__lowerCAmelCase: str =\t\t\t5\r\n\t\treturn filename, spatial_variance, intensity_variance, kernel_size\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t__A ,\t\t\t\t__A ,\t\t\t\t__A ,\t\t\t\t__A\t\t\t\t\t\t =\t\t\tparse_args(sys.argv)\r\n\t\t\t__A\t\t\t\t\t\t =\t\t\tcva.imread(filename, 0)\r\n\t\t\tcva.imshow(\"input image\", img)\r\n\r\n\t\t\t__A\t\t\t\t\t\t =\t\t\timg / 255\r\n\t\t\t__A\t\t\t\t\t\t =\t\t\tout.astype(\"float32\")\r\n\t\t\t__A\t\t\t\t\t\t =\t\t\tbilateral_filter(out, spatial_variance, intensity_variance, kernel_size)\r\n\t\t\t__A\t\t\t\t\t\t =\t\t\tout * 255\r\n\t\t\t__A\t\t\t\t\t\t =\t\t\tnp.uinta(out)\r\n\t\t\tcva.imshow(\"output image\", out)\r\n\t\t\tcva.waitKey(0)\r\n\t\t\tcva.destroyAllWindows()\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":217,"string":"217"},"style_context":{"kind":"string","value":"\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\nimport pickle\r\n\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\n\r\n\r\n\r\nclass snake_case\t:\r\n\r\n\t\t\t\tdef __init__(\tself\t\t\t\t\t\t\t:\t\t\t\tint , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tOptional[Any] , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tOptional[int] , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tList[str] , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tList[str] , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tDict , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tUnion[str, Any]=0.2 , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tAny=0.2)-> Optional[int]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\t\t\t\t\t\t__lowerCAmelCase: Optional[Any] =\t\t\tbp_numa\r\n\t\t\t\t\t\t__lowerCAmelCase: Optional[int] =\t\t\tbp_numa\r\n\t\t\t\t\t\t__lowerCAmelCase: Tuple =\t\t\tbp_numa\r\n\t\t\t\t\t\t__lowerCAmelCase: Optional[int] =\t\t\tconva_get[:2]\r\n\t\t\t\t\t\t__lowerCAmelCase: int =\t\t\tconva_get[2]\r\n\t\t\t\t\t\t__lowerCAmelCase: List[str] =\t\t\tsize_pa\r\n\t\t\t\t\t\t__lowerCAmelCase: Tuple =\t\t\trate_w\r\n\t\t\t\t\t\t__lowerCAmelCase: Dict =\t\t\trate_t\r\n\t\t\t\t\t\t__lowerCAmelCase: List[Any] =\t\t\t[\r\n\t\t\t\t\t\t np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)\r\n\t\t\t\t\t\t for i in range(self.conva[1])\r\n\t\t\t\t\t\t]\r\n\t\t\t\t\t\t__lowerCAmelCase: Union[str, Any] =\t\t\tnp.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)\r\n\t\t\t\t\t\t__lowerCAmelCase: int =\t\t\tnp.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)\r\n\t\t\t\t\t\t__lowerCAmelCase: Optional[Any] =\t\t\t-2 * np.random.rand(self.conva[1]) + 1\r\n\t\t\t\t\t\t__lowerCAmelCase: int =\t\t\t-2 * np.random.rand(self.num_bpa) + 1\r\n\t\t\t\t\t\t__lowerCAmelCase: str =\t\t\t-2 * np.random.rand(self.num_bpa) + 1\r\n\r\n\t\t\t\tdef \t\t\t\t\t\t\tlowercase_ (\tself\t\t\t\t\t\t\t:\t\t\t\tOptional[int] , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tint)-> List[str]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\t\t\t\t\t\t__lowerCAmelCase: Any =\t\t\t{\r\n\t\t\t\t\t\t \"num_bp1\": self.num_bpa,\r\n\t\t\t\t\t\t \"num_bp2\": self.num_bpa,\r\n\t\t\t\t\t\t \"num_bp3\": self.num_bpa,\r\n\t\t\t\t\t\t \"conv1\": self.conva,\r\n\t\t\t\t\t\t \"step_conv1\": self.step_conva,\r\n\t\t\t\t\t\t \"size_pooling1\": self.size_poolinga,\r\n\t\t\t\t\t\t \"rate_weight\": self.rate_weight,\r\n\t\t\t\t\t\t \"rate_thre\": self.rate_thre,\r\n\t\t\t\t\t\t \"w_conv1\": self.w_conva,\r\n\t\t\t\t\t\t \"wkj\": self.wkj,\r\n\t\t\t\t\t\t \"vji\": self.vji,\r\n\t\t\t\t\t\t \"thre_conv1\": self.thre_conva,\r\n\t\t\t\t\t\t \"thre_bp2\": self.thre_bpa,\r\n\t\t\t\t\t\t \"thre_bp3\": self.thre_bpa,\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\twith open(UpperCamelCase__ , \"wb\") as f:\r\n\t\t\t\t\t\t\t\tpickle.dump(UpperCamelCase__ , UpperCamelCase__)\r\n\r\n\t\t\t\t\t\tprint(f\"Model saved: {save_path}\")\r\n\r\n\t\t\t\t@classmethod\r\n\t\t\t\tdef \t\t\t\t\t\t\tlowercase_ (\tcls\t\t\t\t\t\t\t:\t\t\t\tDict , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tUnion[str, Any])-> List[Any]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\t\t\t\t\t\twith open(UpperCamelCase__ , \"rb\") as f:\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: Dict =\t\t\tpickle.load(UpperCamelCase__) # noqa: S301\r\n\r\n\t\t\t\t\t\t__lowerCAmelCase: Optional[int] =\t\t\tmodel_dic.get(\"conv1\")\r\n\t\t\t\t\t\tconv_get.append(model_dic.get(\"step_conv1\"))\r\n\t\t\t\t\t\t__lowerCAmelCase: List[str] =\t\t\tmodel_dic.get(\"size_pooling1\")\r\n\t\t\t\t\t\t__lowerCAmelCase: Union[str, Any] =\t\t\tmodel_dic.get(\"num_bp1\")\r\n\t\t\t\t\t\t__lowerCAmelCase: Any =\t\t\tmodel_dic.get(\"num_bp2\")\r\n\t\t\t\t\t\t__lowerCAmelCase: Union[str, Any] =\t\t\tmodel_dic.get(\"num_bp3\")\r\n\t\t\t\t\t\t__lowerCAmelCase: Optional[int] =\t\t\tmodel_dic.get(\"rate_weight\")\r\n\t\t\t\t\t\t__lowerCAmelCase: int =\t\t\tmodel_dic.get(\"rate_thre\")\r\n\t\t\t\t\t\t# create model instance\r\n\t\t\t\t\t\t__lowerCAmelCase: Tuple =\t\t\tCNN(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)\r\n\t\t\t\t\t\t# modify model parameter\r\n\t\t\t\t\t\t__lowerCAmelCase: Any =\t\t\tmodel_dic.get(\"w_conv1\")\r\n\t\t\t\t\t\t__lowerCAmelCase: Optional[Any] =\t\t\tmodel_dic.get(\"wkj\")\r\n\t\t\t\t\t\t__lowerCAmelCase: Any =\t\t\tmodel_dic.get(\"vji\")\r\n\t\t\t\t\t\t__lowerCAmelCase: Dict =\t\t\tmodel_dic.get(\"thre_conv1\")\r\n\t\t\t\t\t\t__lowerCAmelCase: int =\t\t\tmodel_dic.get(\"thre_bp2\")\r\n\t\t\t\t\t\t__lowerCAmelCase: Optional[int] =\t\t\tmodel_dic.get(\"thre_bp3\")\r\n\t\t\t\t\t\treturn conv_ins\r\n\r\n\t\t\t\tdef \t\t\t\t\t\t\tlowercase_ (\tself\t\t\t\t\t\t\t:\t\t\t\tDict , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tList[Any])-> List[Any]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\t\t\t\t\t\treturn 1 / (1 + np.exp(-1 * x))\r\n\r\n\t\t\t\tdef \t\t\t\t\t\t\tlowercase_ (\tself\t\t\t\t\t\t\t:\t\t\t\tDict , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tList[Any])-> Optional[Any]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\t\t\t\t\t\treturn round(UpperCamelCase__ , 3)\r\n\r\n\t\t\t\tdef \t\t\t\t\t\t\tlowercase_ (\tself\t\t\t\t\t\t\t:\t\t\t\tOptional[int] , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tOptional[int] , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tAny , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tList[str] , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tTuple , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tint)-> Dict:\r\n\r\n\r\n\r\n\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\t\t\t\t\t\t__lowerCAmelCase: List[Any] =\t\t\tconvs[0]\r\n\t\t\t\t\t\t__lowerCAmelCase: int =\t\t\tconvs[1]\r\n\t\t\t\t\t\t__lowerCAmelCase: Union[str, Any] =\t\t\tnp.shape(UpperCamelCase__)[0]\r\n\t\t\t\t\t\t# get the data slice of original image data, data_focus\r\n\t\t\t\t\t\t__lowerCAmelCase: Optional[Any] =\t\t\t[]\r\n\t\t\t\t\t\tfor i_focus in range(0 , size_data - size_conv + 1 , UpperCamelCase__):\r\n\t\t\t\t\t\t\t\tfor j_focus in range(0 , size_data - size_conv + 1 , UpperCamelCase__):\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: Union[str, Any] =\t\t\tdata[\r\n\t\t\t\t\t\t\t\t\t\t i_focus : i_focus + size_conv, j_focus : j_focus + size_conv\r\n\t\t\t\t\t\t\t\t\t\t]\r\n\t\t\t\t\t\t\t\t\t\tdata_focus.append(UpperCamelCase__)\r\n # calculate the feature map of every single kernel, and saved as list of matrix\r\n\t\t\t\t\t\t__lowerCAmelCase: int =\t\t\t[]\r\n\t\t\t\t\t\t__lowerCAmelCase: Optional[int] =\t\t\tint((size_data - size_conv) / conv_step + 1)\r\n\t\t\t\t\t\tfor i_map in range(UpperCamelCase__):\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: List[str] =\t\t\t[]\r\n\t\t\t\t\t\t\t\tfor i_focus in range(len(UpperCamelCase__)):\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: Union[str, Any] =\t\t\t(\r\n\t\t\t\t\t\t\t\t\t\t np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))\r\n\t\t\t\t\t\t\t\t\t\t - thre_convs[i_map]\r\n\t\t\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\tfeaturemap.append(self.sig(UpperCamelCase__))\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: str =\t\t\tnp.asmatrix(UpperCamelCase__).reshape(\r\n\t\t\t\t\t\t\t\t UpperCamelCase__ , UpperCamelCase__)\r\n\t\t\t\t\t\t\t\tdata_featuremap.append(UpperCamelCase__)\r\n\r\n\t\t\t\t\t\t# expanding the data slice to One dimenssion\r\n\t\t\t\t\t\t__lowerCAmelCase: Optional[Any] =\t\t\t[]\r\n\t\t\t\t\t\tfor each_focus in data_focus:\r\n\t\t\t\t\t\t\t\tfocusa_list.extend(self.Expand_Mat(UpperCamelCase__))\r\n\t\t\t\t\t\t__lowerCAmelCase: List[Any] =\t\t\tnp.asarray(UpperCamelCase__)\r\n\t\t\t\t\t\treturn focus_list, data_featuremap\r\n\r\n\t\t\t\tdef \t\t\t\t\t\t\tlowercase_ (\tself\t\t\t\t\t\t\t:\t\t\t\tAny , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tList[str] , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tList[Any] , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tOptional[Any]=\"average_pool\")-> str:\r\n\r\n\r\n\r\n\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\t\t\t\t\t\t__lowerCAmelCase: Tuple =\t\t\tlen(featuremaps[0])\r\n\t\t\t\t\t\t__lowerCAmelCase: List[Any] =\t\t\tint(size_map / size_pooling)\r\n\t\t\t\t\t\t__lowerCAmelCase: int =\t\t\t[]\r\n\t\t\t\t\t\tfor i_map in range(len(UpperCamelCase__)):\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: str =\t\t\tfeaturemaps[i_map]\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: List[Any] =\t\t\t[]\r\n\t\t\t\t\t\t\t\tfor i_focus in range(0 , UpperCamelCase__ , UpperCamelCase__):\r\n\t\t\t\t\t\t\t\t\t\tfor j_focus in range(0 , UpperCamelCase__ , UpperCamelCase__):\r\n\t\t\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: Any =\t\t\tfeature_map[\r\n\t\t\t\t\t\t\t\t\t\t\t\t i_focus : i_focus + size_pooling,\r\n\t\t\t\t\t\t\t\t\t\t\t\t j_focus : j_focus + size_pooling,\r\n\t\t\t\t\t\t\t\t\t\t\t\t]\r\n\t\t\t\t\t\t\t\t\t\t\t\tif pooling_type == \"average_pool\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# average pooling\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tmap_pooled.append(np.average(UpperCamelCase__))\r\n\t\t\t\t\t\t\t\t\t\t\t\telif pooling_type == \"max_pooling\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# max pooling\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tmap_pooled.append(np.max(UpperCamelCase__))\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: Optional[int] =\t\t\tnp.asmatrix(UpperCamelCase__).reshape(UpperCamelCase__ , UpperCamelCase__)\r\n\t\t\t\t\t\t\t\tfeaturemap_pooled.append(UpperCamelCase__)\r\n\t\t\t\t\t\treturn featuremap_pooled\r\n\r\n\t\t\t\tdef \t\t\t\t\t\t\tlowercase_ (\tself\t\t\t\t\t\t\t:\t\t\t\tUnion[str, Any] , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tstr)-> int:\r\n\r\n\r\n\r\n\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\t\t\t\t\t\t__lowerCAmelCase: List[Any] =\t\t\t[]\r\n\t\t\t\t\t\tfor i in range(len(UpperCamelCase__)):\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: Union[str, Any] =\t\t\tnp.shape(data[i])\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: int =\t\t\tdata[i].reshape(1 , shapes[0] * shapes[1])\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: Dict =\t\t\tdata_listed.getA().tolist()[0]\r\n\t\t\t\t\t\t\t\tdata_expanded.extend(UpperCamelCase__)\r\n\t\t\t\t\t\t__lowerCAmelCase: Any =\t\t\tnp.asarray(UpperCamelCase__)\r\n\t\t\t\t\t\treturn data_expanded\r\n\r\n\t\t\t\tdef \t\t\t\t\t\t\tlowercase_ (\tself\t\t\t\t\t\t\t:\t\t\t\tUnion[str, Any] , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tUnion[str, Any])-> Optional[Any]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\t\t\t\t\t\t__lowerCAmelCase: Dict =\t\t\tnp.asarray(UpperCamelCase__)\r\n\t\t\t\t\t\t__lowerCAmelCase: Optional[int] =\t\t\tnp.shape(UpperCamelCase__)\r\n\t\t\t\t\t\t__lowerCAmelCase: Optional[int] =\t\t\tdata_mat.reshape(1 , shapes[0] * shapes[1])\r\n\t\t\t\t\t\treturn data_expanded\r\n\r\n\t\t\t\tdef \t\t\t\t\t\t\tlowercase_ (\tself\t\t\t\t\t\t\t:\t\t\t\tDict , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tUnion[str, Any] , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tDict , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tOptional[Any] , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tList[Any] , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tDict)-> List[Any]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\t\t\t\t\t\t__lowerCAmelCase: Optional[int] =\t\t\t[]\r\n\t\t\t\t\t\t__lowerCAmelCase: Any =\t\t\t0\r\n\t\t\t\t\t\tfor i_map in range(UpperCamelCase__):\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: Optional[Any] =\t\t\tnp.ones((size_map, size_map))\r\n\t\t\t\t\t\t\t\tfor i in range(0 , UpperCamelCase__ , UpperCamelCase__):\r\n\t\t\t\t\t\t\t\t\t\tfor j in range(0 , UpperCamelCase__ , UpperCamelCase__):\r\n\t\t\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: Optional[Any] =\t\t\tpd_pool[\r\n\t\t\t\t\t\t\t\t\t\t\t\t i_pool\r\n\t\t\t\t\t\t\t\t\t\t\t\t]\r\n\t\t\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: str =\t\t\ti_pool + 1\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: Dict =\t\t\tnp.multiply(\r\n\t\t\t\t\t\t\t\t UpperCamelCase__ , np.multiply(out_map[i_map] , (1 - out_map[i_map])))\r\n\t\t\t\t\t\t\t\tpd_all.append(UpperCamelCase__)\r\n\t\t\t\t\t\treturn pd_all\r\n\r\n\t\t\t\tdef \t\t\t\t\t\t\tlowercase_ (\tself\t\t\t\t\t\t\t:\t\t\t\tstr , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tUnion[str, Any] , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tint , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tDict , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tList[Any] , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tAny , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tstr=bool)-> List[str]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\t\t\t\t\t\tprint(\"----------------------Start Training-------------------------\")\r\n\t\t\t\t\t\tprint((\" - - Shape: Train_Data \", np.shape(UpperCamelCase__)))\r\n\t\t\t\t\t\tprint((\" - - Shape: Teach_Data \", np.shape(UpperCamelCase__)))\r\n\t\t\t\t\t\t__lowerCAmelCase: str =\t\t\t0\r\n\t\t\t\t\t\t__lowerCAmelCase: Optional[int] =\t\t\t[]\r\n\t\t\t\t\t\t__lowerCAmelCase: List[Any] =\t\t\t1_0_0_0_0\r\n\t\t\t\t\t\twhile rp < n_repeat and mse >= error_accuracy:\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: Optional[Any] =\t\t\t0\r\n\t\t\t\t\t\t\t\tprint(f\"-------------Learning Time {rp}--------------\")\r\n\t\t\t\t\t\t\t\tfor p in range(len(UpperCamelCase__)):\r\n\t\t\t\t\t\t\t\t\t\t# print('------------Learning Image: %d--------------'%p)\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: Dict =\t\t\tnp.asmatrix(datas_train[p])\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: Dict =\t\t\tnp.asarray(datas_teach[p])\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase , __lowerCAmelCase: int =\t\t\tself.convolute(\r\n\t\t\t\t\t\t\t\t\t\t UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: Any =\t\t\tself.pooling(UpperCamelCase__ , self.size_poolinga)\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: Optional[Any] =\t\t\tnp.shape(UpperCamelCase__)\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: str =\t\t\tself._expand(UpperCamelCase__)\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: str =\t\t\tdata_bp_input\r\n\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: int =\t\t\tnp.dot(UpperCamelCase__ , self.vji.T) - self.thre_bpa\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: int =\t\t\tself.sig(UpperCamelCase__)\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: Optional[Any] =\t\t\tnp.dot(UpperCamelCase__ , self.wkj.T) - self.thre_bpa\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: str =\t\t\tself.sig(UpperCamelCase__)\r\n\r\n\t\t\t\t\t\t\t\t\t\t# --------------Model Leaning ------------------------\r\n\t\t\t\t\t\t\t\t\t\t# calculate error and gradient---------------\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: Union[str, Any] =\t\t\tnp.multiply(\r\n\t\t\t\t\t\t\t\t\t\t (data_teach - bp_outa) , np.multiply(UpperCamelCase__ , (1 - bp_outa)))\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: Any =\t\t\tnp.multiply(\r\n\t\t\t\t\t\t\t\t\t\t np.dot(UpperCamelCase__ , self.wkj) , np.multiply(UpperCamelCase__ , (1 - bp_outa)))\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: str =\t\t\tnp.dot(UpperCamelCase__ , self.vji)\r\n\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: Union[str, Any] =\t\t\tpd_i_all / (self.size_poolinga * self.size_poolinga)\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: str =\t\t\tpd_conva_pooled.T.getA().tolist()\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: str =\t\t\tself._calculate_gradient_from_pool(\r\n\t\t\t\t\t\t\t\t\t\t UpperCamelCase__ , UpperCamelCase__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )\r\n\t\t\t\t\t\t\t\t\t\t# weight and threshold learning process---------\r\n\t\t\t\t\t\t\t\t\t\t# convolution layer\r\n\t\t\t\t\t\t\t\t\t\tfor k_conv in range(self.conva[1]):\r\n\t\t\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: List[Any] =\t\t\tself._expand_mat(pd_conva_all[k_conv])\r\n\t\t\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: int =\t\t\tself.rate_weight * np.dot(UpperCamelCase__ , UpperCamelCase__)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: Tuple =\t\t\tself.w_conva[k_conv] + delta_w.reshape(\r\n\t\t\t\t\t\t\t\t\t\t\t\t (self.conva[0], self.conva[0]))\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: Tuple =\t\t\t(\r\n\t\t\t\t\t\t\t\t\t\t\t\t self.thre_conva[k_conv]\r\n\t\t\t\t\t\t\t\t\t\t\t\t - np.sum(pd_conva_all[k_conv]) * self.rate_thre\r\n\t\t\t\t\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t# all connected layer\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: List[Any] =\t\t\tself.wkj + pd_k_all.T * bp_outa * self.rate_weight\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: Union[str, Any] =\t\t\tself.vji + pd_j_all.T * bp_outa * self.rate_weight\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: Tuple =\t\t\tself.thre_bpa - pd_k_all * self.rate_thre\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: Optional[int] =\t\t\tself.thre_bpa - pd_j_all * self.rate_thre\r\n\t\t\t\t\t\t\t\t\t\t# calculate the sum error of all single image\r\n\t\t\t\t\t\t\t\t\t\t__lowerCAmelCase: List[str] =\t\t\tnp.sum(abs(data_teach - bp_outa))\r\n\t\t\t\t\t\t\t\t\t\terror_count += errors\r\n\t\t\t\t\t\t\t\t\t\t# print(' ----Teach ',data_teach)\r\n\t\t\t\t\t\t\t\t\t\t# print(' ----BP_output ',bp_out3)\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: Tuple =\t\t\trp + 1\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: Optional[Any] =\t\t\terror_count / patterns\r\n\t\t\t\t\t\t\t\tall_mse.append(UpperCamelCase__)\r\n\r\n\t\t\t\t\t\tdef draw_error():\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: Dict =\t\t\t[error_accuracy for i in range(int(n_repeat * 1.2))]\r\n\t\t\t\t\t\t\t\tplt.plot(UpperCamelCase__ , \"+-\")\r\n\t\t\t\t\t\t\t\tplt.plot(UpperCamelCase__ , \"r--\")\r\n\t\t\t\t\t\t\t\tplt.xlabel(\"Learning Times\")\r\n\t\t\t\t\t\t\t\tplt.ylabel(\"All_mse\")\r\n\t\t\t\t\t\t\t\tplt.grid(UpperCamelCase__ , alpha=0.5)\r\n\t\t\t\t\t\t\t\tplt.show()\r\n\r\n\t\t\t\t\t\tprint(\"------------------Training Complished---------------------\")\r\n\t\t\t\t\t\tprint((\" - - Training epoch: \", rp, f\" - - Mse: {mse:.6f}\"))\r\n\t\t\t\t\t\tif draw_e:\r\n\t\t\t\t\t\t\t\tdraw_error()\r\n\t\t\t\t\t\treturn mse\r\n\r\n\t\t\t\tdef \t\t\t\t\t\t\tlowercase_ (\tself\t\t\t\t\t\t\t:\t\t\t\tUnion[str, Any] , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tTuple)-> List[str]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\t\t\t\t\t\t__lowerCAmelCase: int =\t\t\t[]\r\n\t\t\t\t\t\tprint(\"-------------------Start Testing-------------------------\")\r\n\t\t\t\t\t\tprint((\" - - Shape: Test_Data \", np.shape(UpperCamelCase__)))\r\n\t\t\t\t\t\tfor p in range(len(UpperCamelCase__)):\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: Dict =\t\t\tnp.asmatrix(datas_test[p])\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase , __lowerCAmelCase: Optional[int] =\t\t\tself.convolute(\r\n\t\t\t\t\t\t\t\t UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: Tuple =\t\t\tself.pooling(UpperCamelCase__ , self.size_poolinga)\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: List[str] =\t\t\tself._expand(UpperCamelCase__)\r\n\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: int =\t\t\tdata_bp_input\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: List[Any] =\t\t\tbp_outa * self.vji.T - self.thre_bpa\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: Any =\t\t\tself.sig(UpperCamelCase__)\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: Union[str, Any] =\t\t\tbp_outa * self.wkj.T - self.thre_bpa\r\n\t\t\t\t\t\t\t\t__lowerCAmelCase: List[str] =\t\t\tself.sig(UpperCamelCase__)\r\n\t\t\t\t\t\t\t\tproduce_out.extend(bp_outa.getA().tolist())\r\n\t\t\t\t\t\t__lowerCAmelCase: Tuple =\t\t\t[list(map(self.do_round , UpperCamelCase__)) for each in produce_out]\r\n\t\t\t\t\t\treturn np.asarray(UpperCamelCase__)\r\n\r\n\r\n\r\n\r\n\t\t\t\tdef \t\t\t\t\t\t\tlowercase_ (\tself\t\t\t\t\t\t\t:\t\t\t\tint , UpperCamelCase__\t\t\t\t\t\t\t:\t\t\t\tAny)-> Any:\r\n\r\n\r\n\r\n\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\t\t\t\t\t\t__lowerCAmelCase: Union[str, Any] =\t\t\tnp.asmatrix(UpperCamelCase__)\r\n\t\t\t\t\t\t__lowerCAmelCase , __lowerCAmelCase: Optional[Any] =\t\t\tself.convolute(\r\n\t\t\t\t\t\t UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )\r\n\t\t\t\t\t\t__lowerCAmelCase: Any =\t\t\tself.pooling(UpperCamelCase__ , self.size_poolinga)\r\n\r\n\t\t\t\t\t\treturn data_conveda, data_pooleda\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\tpass\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":217,"string":"217"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":859,"cells":{"code":{"kind":"string","value":"\"\"\"simple docstring\"\"\"\r\r\rimport itertools\rimport os\rfrom collections import Counter, defaultdict\rfrom concurrent.futures import ThreadPoolExecutor, as_completed\r\rimport numpy as np\r\rimport datasets\r\rfrom .execute import check_correctness\r\r\rUpperCAmelCase__\t\t\t\t\t\t\t\t\t\t\t= \"\"\"\\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\\nand William Saunders and Christopher Hesse and Andrew N. Carr \\\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n\"\"\"\r\rUpperCAmelCase__\t\t\t\t\t\t\t\t\t\t\t= \"\"\"\\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \\\"Evaluating Large Language Models Trained on Code\\\"\n(https://arxiv.org/abs/2107.03374).\n\"\"\"\r\r\rUpperCAmelCase__\t\t\t\t\t\t\t\t\t\t\t= \"\"\"\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\\\"code_eval\\\")\n >>> test_cases = [\\\"assert add(2,3)==5\\\"]\n >>> candidates = [[\\\"def add(a,b): return a*b\\\", \\\"def add(a, b): return a+b\\\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n\"\"\"\r\r\rUpperCAmelCase__\t\t\t\t\t\t\t\t\t\t\t= \"\"\"\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \\\"code_eval\\\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \\\"Evaluating Large\nLanguage Models Trained on Code\\\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\\\"1\\\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\\\"HF_ALLOW_CODE_EVAL\\\"] = \\\"1\\\"\n\n################################################################################\\\n\"\"\"\r\rUpperCAmelCase__\t\t\t\t\t\t\t\t\t\t\t= \"\"\"The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \\\"Software\\\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \\\"AS IS\\\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\"\"\"\r\r\r@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION\t\t,\t\t\t\t\t\t\t_KWARGS_DESCRIPTION\t\t\t\t)\rclass a\t\t\t(\t\t\t\t\t\tdatasets.Metric\t\t\t\t):\r\r\r def lowerCAmelCase_\t\t\t\t\t( self\t\t:\t\t\t\t\tTuple ):\r return datasets.MetricInfo(\r # This is the description that will appear on the metrics page.\r description=_DESCRIPTION\t\t\t\t,\t\t\tcitation=_CITATION\t\t\t\t,\t\t\tinputs_description=_KWARGS_DESCRIPTION\t\t\t\t,\t\t\tfeatures=datasets.Features(\r {\r \"\"\"predictions\"\"\": datasets.Sequence(datasets.Value(\"\"\"string\"\"\" ) ),\r \"\"\"references\"\"\": datasets.Value(\"\"\"string\"\"\" ),\r } )\t\t\t\t,\t\t\thomepage=\"\"\"https://github.com/openai/human-eval\"\"\"\t\t\t\t,\t\t\tcodebase_urls=[\"\"\"https://github.com/openai/human-eval\"\"\"]\t\t\t\t,\t\t\treference_urls=[\"\"\"https://github.com/openai/human-eval\"\"\"]\t\t\t\t,\t\t\tlicense=_LICENSE\t\t\t\t,\t\t\t)\r\r\r def lowerCAmelCase_\t\t\t\t\t( self\t\t:\t\t\t\t\tOptional[int]\t\t\t\t,\t\t\t__lowerCAmelCase\t\t:\t\t\t\t\tList[str]\t\t\t\t,\t\t\t__lowerCAmelCase\t\t:\t\t\t\t\tDict\t\t\t\t,\t\t\t__lowerCAmelCase\t\t:\t\t\t\t\tList[Any]=[1, 10, 100]\t\t\t\t,\t\t\t__lowerCAmelCase\t\t:\t\t\t\t\tAny=4\t\t\t\t,\t\t\t__lowerCAmelCase\t\t:\t\t\t\t\tUnion[str, Any]=3.0 ):\r\r if os.getenv(\"\"\"HF_ALLOW_CODE_EVAL\"\"\"\t\t\t\t,\t\t\t0 ) != \"1\":\r raise ValueError(_WARNING )\r\r if os.name == \"nt\":\r raise NotImplementedError(\"\"\"This metric is currently not supported on Windows.\"\"\" )\r\r with ThreadPoolExecutor(max_workers=__lowerCAmelCase ) as executor:\r _UpperCAmelCase \t= []\r _UpperCAmelCase \t= Counter()\r _UpperCAmelCase \t= 0\r _UpperCAmelCase \t= defaultdict(__lowerCAmelCase )\r\r for task_id, (candidates, test_case) in enumerate(zip(__lowerCAmelCase\t\t\t\t,\t\t\t__lowerCAmelCase ) ):\r for candidate in candidates:\r _UpperCAmelCase \t= candidate + \"\"\"\\n\"\"\" + test_case\r _UpperCAmelCase \t= (test_program, timeout, task_id, completion_id[task_id])\r _UpperCAmelCase \t= executor.submit(__lowerCAmelCase\t\t\t\t,\t\t\t*__lowerCAmelCase )\r futures.append(__lowerCAmelCase )\r completion_id[task_id] += 1\r n_samples += 1\r\r for future in as_completed(__lowerCAmelCase ):\r _UpperCAmelCase \t= future.result()\r results[result[\"task_id\"]].append((result[\"\"\"completion_id\"\"\"], result) )\r\r _UpperCAmelCase\t,\t\t\t_UpperCAmelCase \t= [], []\r for result in results.values():\r result.sort()\r _UpperCAmelCase \t= [r[1][\"\"\"passed\"\"\"] for r in result]\r total.append(len(__lowerCAmelCase ) )\r correct.append(sum(__lowerCAmelCase ) )\r _UpperCAmelCase \t= np.array(__lowerCAmelCase )\r _UpperCAmelCase \t= np.array(__lowerCAmelCase )\r\r _UpperCAmelCase \t= k\r _UpperCAmelCase \t= {f'''pass@{k}''': estimate_pass_at_k(__lowerCAmelCase\t\t\t\t,\t\t\t__lowerCAmelCase\t\t\t\t,\t\t\t__lowerCAmelCase ).mean() for k in ks if (total >= k).all()}\r\r return pass_at_k, results\r\r\r\r\r\r\rdef __UpperCAmelCase ( lowercase\t\t\t\t,lowercase\t\t\t\t,lowercase\t\t\t\t\t\t):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r def estimator(lowercase\t\t\t\t,lowercase\t\t\t\t,lowercase\t\t\t\t\t\t) -> float:\r if n - c < k:\r return 1.0\r return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1\t\t\t\t,n + 1\t\t\t\t\t\t)\t\t\t\t\t\t)\r\r if isinstance(lowercase\t\t\t\t,lowercase\t\t\t\t\t\t):\r _UpperCAmelCase \t= itertools.repeat(lowercase\t\t\t\t,len(lowercase\t\t\t\t\t\t)\t\t\t\t\t\t)\r else:\r assert len(lowercase\t\t\t\t\t\t) == len(lowercase\t\t\t\t\t\t)\r _UpperCAmelCase \t= iter(lowercase\t\t\t\t\t\t)\r\r return np.array([estimator(int(lowercase\t\t\t\t\t\t)\t\t\t\t,int(lowercase\t\t\t\t\t\t)\t\t\t\t,lowercase\t\t\t\t\t\t) for n, c in zip(lowercase\t\t\t\t,lowercase\t\t\t\t\t\t)]\t\t\t\t\t\t)\r"},"code_codestyle":{"kind":"number","value":30,"string":"30"},"style_context":{"kind":"string","value":"\"\"\"simple docstring\"\"\"\r\r\rimport mpmath # for roots of unity\rimport numpy as np\r\r\rclass a\t\t\t:\r\r\r def __init__( self\t\t:\t\t\t\t\tTuple\t\t\t\t,\t\t\t__lowerCAmelCase\t\t:\t\t\t\t\tDict=None\t\t\t\t,\t\t\t__lowerCAmelCase\t\t:\t\t\t\t\tUnion[str, Any]=None ):\r # Input as list\r _UpperCAmelCase \t= list(poly_a or [0] )[:]\r _UpperCAmelCase \t= list(poly_b or [0] )[:]\r\r # Remove leading zero coefficients\r while self.polyA[-1] == 0:\r self.polyA.pop()\r _UpperCAmelCase \t= len(self.polyA )\r\r while self.polyB[-1] == 0:\r self.polyB.pop()\r _UpperCAmelCase \t= len(self.polyB )\r\r # Add 0 to make lengths equal a power of 2\r _UpperCAmelCase \t= int(\r 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )\r\r while len(self.polyA ) < self.c_max_length:\r self.polyA.append(0 )\r while len(self.polyB ) < self.c_max_length:\r self.polyB.append(0 )\r # A complex root used for the fourier transform\r _UpperCAmelCase \t= complex(mpmath.root(x=1\t\t\t\t,\t\t\tn=self.c_max_length\t\t\t\t,\t\t\tk=1 ) )\r\r # The product\r _UpperCAmelCase \t= self.__multiply()\r\r\r def lowerCAmelCase_\t\t\t\t\t( self\t\t:\t\t\t\t\tOptional[int]\t\t\t\t,\t\t\t__lowerCAmelCase\t\t:\t\t\t\t\tstr ):\r _UpperCAmelCase \t= [[x] for x in self.polyA] if which == \"\"\"A\"\"\" else [[x] for x in self.polyB]\r # Corner case\r if len(__lowerCAmelCase ) <= 1:\r return dft[0]\r #\r _UpperCAmelCase \t= self.c_max_length // 2\r while next_ncol > 0:\r _UpperCAmelCase \t= [[] for i in range(__lowerCAmelCase )]\r _UpperCAmelCase \t= self.root**next_ncol\r\r # First half of next step\r _UpperCAmelCase \t= 1\r for j in range(self.c_max_length // (next_ncol * 2) ):\r for i in range(__lowerCAmelCase ):\r new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )\r current_root *= root\r # Second half of next step\r _UpperCAmelCase \t= 1\r for j in range(self.c_max_length // (next_ncol * 2) ):\r for i in range(__lowerCAmelCase ):\r new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )\r current_root *= root\r # Update\r _UpperCAmelCase \t= new_dft\r _UpperCAmelCase \t= next_ncol // 2\r return dft[0]\r\r\r def lowerCAmelCase_\t\t\t\t\t( self\t\t:\t\t\t\t\tTuple ):\r _UpperCAmelCase \t= self.__dft(\"\"\"A\"\"\" )\r _UpperCAmelCase \t= self.__dft(\"\"\"B\"\"\" )\r _UpperCAmelCase \t= [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]\r del dft_a\r del dft_b\r\r # Corner Case\r if len(inverce_c[0] ) <= 1:\r return inverce_c[0]\r # Inverse DFT\r _UpperCAmelCase \t= 2\r while next_ncol <= self.c_max_length:\r _UpperCAmelCase \t= [[] for i in range(__lowerCAmelCase )]\r _UpperCAmelCase \t= self.root ** (next_ncol // 2)\r _UpperCAmelCase \t= 1\r # First half of next step\r for j in range(self.c_max_length // next_ncol ):\r for i in range(next_ncol // 2 ):\r # Even positions\r new_inverse_c[i].append(\r (\r inverce_c[i][j]\r + inverce_c[i][j + self.c_max_length // next_ncol]\r )\r / 2 )\r # Odd positions\r new_inverse_c[i + next_ncol // 2].append(\r (\r inverce_c[i][j]\r - inverce_c[i][j + self.c_max_length // next_ncol]\r )\r / (2 * current_root) )\r current_root *= root\r # Update\r _UpperCAmelCase \t= new_inverse_c\r next_ncol *= 2\r # Unpack\r _UpperCAmelCase \t= [round(x[0].real\t\t\t\t,\t\t\t8 ) + round(x[0].imag\t\t\t\t,\t\t\t8 ) * 1j for x in inverce_c]\r\r # Remove leading 0's\r while inverce_c[-1] == 0:\r inverce_c.pop()\r return inverce_c\r\r\r def __str__( self\t\t:\t\t\t\t\tDict ):\r _UpperCAmelCase \t= \"\"\"A = \"\"\" + \"\"\" + \"\"\".join(\r f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )\r _UpperCAmelCase \t= \"\"\"B = \"\"\" + \"\"\" + \"\"\".join(\r f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )\r _UpperCAmelCase \t= \"\"\"A*B = \"\"\" + \"\"\" + \"\"\".join(\r f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )\r\r return f'''{a}\\n{b}\\n{c}'''\r\r\r# Unit tests\rif __name__ == \"__main__\":\r import doctest\r\r doctest.testmod()\r"},"style_context_codestyle":{"kind":"number","value":30,"string":"30"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":860,"cells":{"code":{"kind":"string","value":"\r\n\r\nimport argparse\r\n\r\nimport requests\r\nimport torch\r\nfrom PIL import Image\r\n\r\nfrom transformers import CLIPProcessor, GroupViTConfig, GroupViTModel\r\ndef \t\t\t\t\t\t_snake_case( SCREAMING_SNAKE_CASE__\t\t\t:\tOptional[Any] )\t\t\t\t\t\t\t-> Union[str, Any]:\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n if \"img_encoder.pos_embed\" in name:\r\n A__ = name.replace('img_encoder.pos_embed' ,\t\t\t'vision_model.embeddings.position_embeddings' )\r\n if \"img_encoder.patch_embed.proj\" in name:\r\n A__ = name.replace('img_encoder.patch_embed.proj' ,\t\t\t'vision_model.embeddings.patch_embeddings.projection' )\r\n if \"img_encoder.patch_embed.norm\" in name:\r\n A__ = name.replace('img_encoder.patch_embed.norm' ,\t\t\t'vision_model.embeddings.layernorm' )\r\n if \"img_encoder.layers\" in name:\r\n A__ = name.replace('img_encoder.layers' ,\t\t\t'vision_model.encoder.stages' )\r\n if \"blocks\" in name and \"res\" not in name:\r\n A__ = name.replace('blocks' ,\t\t\t'layers' )\r\n if \"attn\" in name and \"pre_assign\" not in name:\r\n A__ = name.replace('attn' ,\t\t\t'self_attn' )\r\n if \"proj\" in name and \"self_attn\" in name and \"text\" not in name:\r\n A__ = name.replace('proj' ,\t\t\t'out_proj' )\r\n if \"pre_assign_attn.attn.proj\" in name:\r\n A__ = name.replace('pre_assign_attn.attn.proj' ,\t\t\t'pre_assign_attn.attn.out_proj' )\r\n if \"norm1\" in name:\r\n A__ = name.replace('norm1' ,\t\t\t'layer_norm1' )\r\n if \"norm2\" in name and \"pre_assign\" not in name:\r\n A__ = name.replace('norm2' ,\t\t\t'layer_norm2' )\r\n if \"img_encoder.norm\" in name:\r\n A__ = name.replace('img_encoder.norm' ,\t\t\t'vision_model.layernorm' )\r\n # text encoder\r\n if \"text_encoder.token_embedding\" in name:\r\n A__ = name.replace('text_encoder.token_embedding' ,\t\t\t'text_model.embeddings.token_embedding' )\r\n if \"text_encoder.positional_embedding\" in name:\r\n A__ = name.replace('text_encoder.positional_embedding' ,\t\t\t'text_model.embeddings.position_embedding.weight' )\r\n if \"text_encoder.transformer.resblocks.\" in name:\r\n A__ = name.replace('text_encoder.transformer.resblocks.' ,\t\t\t'text_model.encoder.layers.' )\r\n if \"ln_1\" in name:\r\n A__ = name.replace('ln_1' ,\t\t\t'layer_norm1' )\r\n if \"ln_2\" in name:\r\n A__ = name.replace('ln_2' ,\t\t\t'layer_norm2' )\r\n if \"c_fc\" in name:\r\n A__ = name.replace('c_fc' ,\t\t\t'fc1' )\r\n if \"c_proj\" in name:\r\n A__ = name.replace('c_proj' ,\t\t\t'fc2' )\r\n if \"text_encoder\" in name:\r\n A__ = name.replace('text_encoder' ,\t\t\t'text_model' )\r\n if \"ln_final\" in name:\r\n A__ = name.replace('ln_final' ,\t\t\t'final_layer_norm' )\r\n # projection layers\r\n if \"img_projector.linear_hidden.\" in name:\r\n A__ = name.replace('img_projector.linear_hidden.' ,\t\t\t'visual_projection.' )\r\n if \"img_projector.linear_out.\" in name:\r\n A__ = name.replace('img_projector.linear_out.' ,\t\t\t'visual_projection.3.' )\r\n if \"text_projector.linear_hidden\" in name:\r\n A__ = name.replace('text_projector.linear_hidden' ,\t\t\t'text_projection' )\r\n if \"text_projector.linear_out\" in name:\r\n A__ = name.replace('text_projector.linear_out' ,\t\t\t'text_projection.3' )\r\n\r\n return name\r\ndef \t\t\t\t\t\t_snake_case( SCREAMING_SNAKE_CASE__\t\t\t:\tOptional[int] ,\t\t\tSCREAMING_SNAKE_CASE__\t\t\t:\tTuple )\t\t\t\t\t\t\t-> str:\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n for key in orig_state_dict.copy().keys():\r\n A__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )\r\n\r\n if \"qkv\" in key:\r\n # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:\r\n # we need to split them up into separate matrices/vectors\r\n A__ = key.split('.' )\r\n A__ ,\t\t\t\t\t\tA__ = int(key_split[2] ), int(key_split[4] )\r\n A__ = config.vision_config.hidden_size\r\n if \"weight\" in key:\r\n A__ = val[:dim, :]\r\n A__ = val[dim : dim * 2, :]\r\n A__ = val[-dim:, :]\r\n else:\r\n A__ = val[:dim]\r\n A__ = val[dim : dim * 2]\r\n A__ = val[-dim:]\r\n elif \"in_proj\" in key:\r\n # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:\r\n # we need to split them up into separate matrices/vectors\r\n A__ = key.split('.' )\r\n A__ = int(key_split[3] )\r\n A__ = config.text_config.hidden_size\r\n if \"weight\" in key:\r\n A__ = val[:dim, :]\r\n A__ = val[\r\n dim : dim * 2, :\r\n ]\r\n A__ = val[-dim:, :]\r\n else:\r\n A__ = val[:dim]\r\n A__ = val[dim : dim * 2]\r\n A__ = val[-dim:]\r\n else:\r\n A__ = rename_key(SCREAMING_SNAKE_CASE__ )\r\n # squeeze if necessary\r\n if (\r\n \"text_projection.0\" in new_name\r\n or \"text_projection.3\" in new_name\r\n or \"visual_projection.0\" in new_name\r\n or \"visual_projection.3\" in new_name\r\n ):\r\n A__ = val.squeeze_()\r\n else:\r\n A__ = val\r\n\r\n return orig_state_dict\r\ndef \t\t\t\t\t\t_snake_case( )\t\t\t\t\t\t\t-> Optional[int]:\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'\r\n A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ ,\t\t\tstream=SCREAMING_SNAKE_CASE__ ).raw )\r\n return im\r\n@torch.no_grad()\r\ndef \t\t\t\t\t\t_snake_case( SCREAMING_SNAKE_CASE__\t\t\t:\tDict ,\t\t\tSCREAMING_SNAKE_CASE__\t\t\t:\tDict ,\t\t\tSCREAMING_SNAKE_CASE__\t\t\t:\tList[str]=\"groupvit-gcc-yfcc\" ,\t\t\tSCREAMING_SNAKE_CASE__\t\t\t:\tList[Any]=False )\t\t\t\t\t\t\t-> Dict:\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n A__ = GroupViTConfig()\r\n A__ = GroupViTModel(SCREAMING_SNAKE_CASE__ ).eval()\r\n\r\n A__ = torch.load(SCREAMING_SNAKE_CASE__ ,\t\t\tmap_location='cpu' )['model']\r\n A__ = convert_state_dict(SCREAMING_SNAKE_CASE__ ,\t\t\tSCREAMING_SNAKE_CASE__ )\r\n A__ ,\t\t\t\t\t\tA__ = model.load_state_dict(SCREAMING_SNAKE_CASE__ ,\t\t\tstrict=SCREAMING_SNAKE_CASE__ )\r\n assert missing_keys == [\"text_model.embeddings.position_ids\"]\r\n assert (unexpected_keys == [\"multi_label_logit_scale\"]) or (len(SCREAMING_SNAKE_CASE__ ) == 0)\r\n\r\n # verify result\r\n A__ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )\r\n A__ = prepare_img()\r\n A__ = processor(text=['a photo of a cat', 'a photo of a dog'] ,\t\t\timages=SCREAMING_SNAKE_CASE__ ,\t\t\tpadding=SCREAMING_SNAKE_CASE__ ,\t\t\treturn_tensors='pt' )\r\n\r\n with torch.no_grad():\r\n A__ = model(**SCREAMING_SNAKE_CASE__ )\r\n\r\n if model_name == \"groupvit-gcc-yfcc\":\r\n A__ = torch.tensor([[13.3523, 6.3629]] )\r\n elif model_name == \"groupvit-gcc-redcaps\":\r\n A__ = torch.tensor([[16.1873, 8.6230]] )\r\n else:\r\n raise ValueError(f'Model name {model_name} not supported.' )\r\n assert torch.allclose(outputs.logits_per_image ,\t\t\tSCREAMING_SNAKE_CASE__ ,\t\t\tatol=1E-3 )\r\n\r\n processor.save_pretrained(SCREAMING_SNAKE_CASE__ )\r\n model.save_pretrained(SCREAMING_SNAKE_CASE__ )\r\n print('Successfully saved processor and model to' ,\t\t\tSCREAMING_SNAKE_CASE__ )\r\n\r\n if push_to_hub:\r\n print('Pushing to the hub...' )\r\n processor.push_to_hub(SCREAMING_SNAKE_CASE__ ,\t\t\torganization='nielsr' )\r\n model.push_to_hub(SCREAMING_SNAKE_CASE__ ,\t\t\torganization='nielsr' )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n lowercase_ \t\t= argparse.ArgumentParser()\r\n parser.add_argument(\r\n \"--pytorch_dump_folder_path\", default=None, type=str, help=\"Path to dump the processor and PyTorch model.\"\r\n )\r\n parser.add_argument(\"--checkpoint_path\", default=None, type=str, help=\"Path to GroupViT checkpoint\")\r\n parser.add_argument(\r\n \"--model_name\",\r\n default=\"groupvit-gccy-fcc\",\r\n type=str,\r\n help=\"Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'\",\r\n )\r\n parser.add_argument(\r\n \"--push_to_hub\",\r\n action=\"store_true\",\r\n help=\"Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.\",\r\n )\r\n lowercase_ \t\t= parser.parse_args()\r\n\r\n convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)\r\n\r\n"},"code_codestyle":{"kind":"number","value":7,"string":"7"},"style_context":{"kind":"string","value":"\r\r\r'''simple docstring'''\r\rfrom __future__ import annotations\r\rfrom math import ceil, floor, sqrt\r\r\r\r\r\rdef lowerCamelCase\t\t\t(\t\t\tlowerCAmelCase\t\t\t\t:\tint = 200_0000\t\t):\r\r\r\r\r\r\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r\t__magic_name__ : list[int] \t= [0]\r\t__magic_name__ : int\r\r\tfor idx in range(1\t\t,\t\tceil(sqrt(target * 2\t\t) * 1.1\t\t)\t\t):\r\t\ttriangle_numbers.append(triangle_numbers[-1] + idx\t\t)\r\r\t# we want this to be as close as possible to target\r\t__magic_name__ : int \t= 0\r\t# the area corresponding to the grid that gives the product closest to target\r\t__magic_name__ : int \t= 0\r\t# an estimate of b, using the quadratic formula\r\t__magic_name__ : float\r\t# the largest integer less than b_estimate\r\t__magic_name__ : int\r\t# the largest integer less than b_estimate\r\t__magic_name__ : int\r\t# the triangle number corresponding to b_floor\r\t__magic_name__ : int\r\t# the triangle number corresponding to b_ceil\r\t__magic_name__ : int\r\r\tfor idx_a, triangle_a in enumerate(triangle_numbers[1:]\t\t,\t\t1\t\t):\r\t\t__magic_name__ : Dict \t= (-1 + sqrt(1 + 8 * target / triangle_a\t\t)) / 2\r\t\t__magic_name__ : List[Any] \t= floor(lowerCAmelCase\t\t)\r\t\t__magic_name__ : Dict \t= ceil(lowerCAmelCase\t\t)\r\t\t__magic_name__ : Any \t= triangle_numbers[b_floor]\r\t\t__magic_name__ : Optional[int] \t= triangle_numbers[b_ceil]\r\r\t\tif abs(target - triangle_b_first_guess * triangle_a\t\t) < abs(\r\t\t target - best_product\t\t):\r\t\t\t__magic_name__ : Any \t= triangle_b_first_guess * triangle_a\r\t\t\t__magic_name__ : Any \t= idx_a * b_floor\r\r\t\tif abs(target - triangle_b_second_guess * triangle_a\t\t) < abs(\r\t\t target - best_product\t\t):\r\t\t\t__magic_name__ : List[str] \t= triangle_b_second_guess * triangle_a\r\t\t\t__magic_name__ : Optional[int] \t= idx_a * b_ceil\r\r\treturn area\r\r\rif __name__ == \"__main__\":\r\t\t\t\t\t\t\tprint(F'{solution() = }')"},"style_context_codestyle":{"kind":"number","value":331,"string":"331"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":861,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\nimport math\r\nfrom collections import Counter\r\nfrom string import ascii_lowercase\r\n\r\ndef snake_case (UpperCAmelCase__ )\t-> None:\r\n UpperCamelCase_\t\t\t\t,UpperCamelCase_: Dict \t\t\t\t= analyze_text(UpperCAmelCase__ )\r\n UpperCamelCase_: List[str] \t\t\t\t= list(' ' + ascii_lowercase )\r\n # what is our total sum of probabilities.\r\n UpperCamelCase_: Dict \t\t\t\t= sum(single_char_strings.values() )\r\n\r\n # one length string\r\n UpperCamelCase_: List[str] \t\t\t\t= 0\r\n # for each alpha we go in our dict and if it is in it we calculate entropy\r\n for ch in my_alphas:\r\n if ch in single_char_strings:\r\n UpperCamelCase_: List[Any] \t\t\t\t= single_char_strings[ch]\r\n UpperCamelCase_: int \t\t\t\t= my_str / all_sum\r\n my_fir_sum += prob * math.loga(UpperCAmelCase__ ) # entropy formula.\r\n\r\n # print entropy\r\n print(F'''{round(-1 * my_fir_sum ):.1f}''' )\r\n\r\n # two len string\r\n UpperCamelCase_: int \t\t\t\t= sum(two_char_strings.values() )\r\n UpperCamelCase_: Optional[int] \t\t\t\t= 0\r\n # for each alpha (two in size) calculate entropy.\r\n for cha in my_alphas:\r\n for cha in my_alphas:\r\n UpperCamelCase_: Tuple \t\t\t\t= cha + cha\r\n if sequence in two_char_strings:\r\n UpperCamelCase_: str \t\t\t\t= two_char_strings[sequence]\r\n UpperCamelCase_: str \t\t\t\t= int(UpperCAmelCase__ ) / all_sum\r\n my_sec_sum += prob * math.loga(UpperCAmelCase__ )\r\n\r\n # print second entropy\r\n print(F'''{round(-1 * my_sec_sum ):.1f}''' )\r\n\r\n # print the difference between them\r\n print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )\r\n\r\ndef snake_case (UpperCAmelCase__ )\t-> tuple[dict, dict]:\r\n UpperCamelCase_: Any \t\t\t\t= Counter() # type: ignore\r\n UpperCamelCase_: Union[str, Any] \t\t\t\t= Counter() # type: ignore\r\n single_char_strings[text[-1]] += 1\r\n\r\n # first case when we have space at start.\r\n two_char_strings[\" \" + text[0]] += 1\r\n for i in range(0 , len(UpperCAmelCase__ ) - 1 ):\r\n single_char_strings[text[i]] += 1\r\n two_char_strings[text[i : i + 2]] += 1\r\n return single_char_strings, two_char_strings\r\n\r\ndef snake_case ()\t-> Tuple:\r\n import doctest\r\n\r\n doctest.testmod()\r\n # text = (\r\n # \"Had repulsive dashwoods suspicion sincerity but advantage now him. Remark \"\r\n # \"easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest \"\r\n # \"jointure saw horrible. He private he on be imagine suppose. Fertile \"\r\n # \"beloved evident through no service elderly is. Blind there if every no so \"\r\n # \"at. Own neglected you preferred way sincerity delivered his attempted. To \"\r\n # \"of message cottage windows do besides against uncivil. Delightful \"\r\n # \"unreserved impossible few estimating men favourable see entreaties. She \"\r\n # \"propriety immediate was improving. He or entrance humoured likewise \"\r\n # \"moderate. Much nor game son say feel. Fat make met can must form into \"\r\n # \"gate. Me we offending prevailed discovery. \"\r\n # )\r\n\r\n # calculate_prob(text)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()"},"code_codestyle":{"kind":"number","value":292,"string":"292"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport os\r\nimport sys\r\nimport tempfile\r\n\r\nimport torch\r\n\r\nfrom .state import AcceleratorState\r\nfrom .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment\r\n\r\ndef snake_case (UpperCAmelCase__ , UpperCAmelCase__=() , UpperCAmelCase__=None , UpperCAmelCase__=\"no\" , UpperCAmelCase__=\"29500\" )\t-> List[Any]:\r\n UpperCamelCase_: Any \t\t\t\t= False\r\n UpperCamelCase_: List[str] \t\t\t\t= False\r\n if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):\r\n UpperCamelCase_: List[Any] \t\t\t\t= True\r\n elif \"IPython\" in sys.modules:\r\n UpperCamelCase_: List[Any] \t\t\t\t= 'google.colab' in str(sys.modules['IPython'].get_ipython() )\r\n\r\n try:\r\n UpperCamelCase_: Optional[int] \t\t\t\t= PrecisionType(mixed_precision.lower() )\r\n except ValueError:\r\n raise ValueError(\r\n F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )\r\n\r\n if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , UpperCAmelCase__ ) is not None):\r\n # TPU launch\r\n import torch_xla.distributed.xla_multiprocessing as xmp\r\n\r\n if len(AcceleratorState._shared_state ) > 0:\r\n raise ValueError(\r\n 'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '\r\n 'your training function. Restart your notebook and make sure no cells initializes an '\r\n '`Accelerator`.' )\r\n if num_processes is None:\r\n UpperCamelCase_: List[str] \t\t\t\t= 8\r\n\r\n UpperCamelCase_: str \t\t\t\t= PrepareForLaunch(UpperCAmelCase__ , distributed_type='TPU' )\r\n print(F'''Launching a training on {num_processes} TPU cores.''' )\r\n xmp.spawn(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='fork' )\r\n elif in_colab:\r\n # No need for a distributed launch otherwise as it's either CPU or one GPU.\r\n if torch.cuda.is_available():\r\n print('Launching training on one GPU.' )\r\n else:\r\n print('Launching training on one CPU.' )\r\n function(*UpperCAmelCase__ )\r\n else:\r\n if num_processes is None:\r\n raise ValueError(\r\n 'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )\r\n\r\n if num_processes > 1:\r\n # Multi-GPU launch\r\n from torch.multiprocessing import start_processes\r\n from torch.multiprocessing.spawn import ProcessRaisedException\r\n\r\n if len(AcceleratorState._shared_state ) > 0:\r\n raise ValueError(\r\n 'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '\r\n 'inside your training function. Restart your notebook and make sure no cells initializes an '\r\n '`Accelerator`.' )\r\n\r\n if torch.cuda.is_initialized():\r\n raise ValueError(\r\n 'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '\r\n 'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '\r\n 'function.' )\r\n\r\n # torch.distributed will expect a few environment variable to be here. We set the ones common to each\r\n # process here (the other ones will be set be the launcher).\r\n with patch_environment(\r\n world_size=UpperCAmelCase__ , master_addr='127.0.01' , master_port=UpperCAmelCase__ , mixed_precision=UpperCAmelCase__ ):\r\n UpperCamelCase_: str \t\t\t\t= PrepareForLaunch(UpperCAmelCase__ , distributed_type='MULTI_GPU' )\r\n print(F'''Launching training on {num_processes} GPUs.''' )\r\n try:\r\n start_processes(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='fork' )\r\n except ProcessRaisedException as e:\r\n if \"Cannot re-initialize CUDA in forked subprocess\" in e.args[0]:\r\n raise RuntimeError(\r\n 'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '\r\n 'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '\r\n 'Please review your imports and test them when running the `notebook_launcher()` to identify '\r\n 'which one is problematic.' ) from e\r\n\r\n else:\r\n # No need for a distributed launch otherwise as it's either CPU, GPU or MPS.\r\n if is_mps_available():\r\n UpperCamelCase_: Tuple \t\t\t\t= '1'\r\n print('Launching training on MPS.' )\r\n elif torch.cuda.is_available():\r\n print('Launching training on one GPU.' )\r\n else:\r\n print('Launching training on CPU.' )\r\n function(*UpperCAmelCase__ )\r\n\r\ndef snake_case (UpperCAmelCase__ , UpperCAmelCase__=() , UpperCAmelCase__=2 )\t-> Optional[int]:\r\n from torch.multiprocessing import start_processes\r\n\r\n with tempfile.NamedTemporaryFile() as tmp_file:\r\n # torch.distributed will expect a few environment variable to be here. We set the ones common to each\r\n # process here (the other ones will be set be the launcher).\r\n with patch_environment(\r\n world_size=UpperCAmelCase__ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):\r\n UpperCamelCase_: str \t\t\t\t= PrepareForLaunch(UpperCAmelCase__ , debug=UpperCAmelCase__ )\r\n start_processes(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='fork' )"},"style_context_codestyle":{"kind":"number","value":292,"string":"292"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":862,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\r'''simple docstring'''\r\r\r\r\r\r\r\rfrom collections.abc import Iterator, MutableMapping\rfrom dataclasses import dataclass\rfrom typing import Generic, TypeVar\r\rlowerCAmelCase_\t\t\t\t\t: Optional[Any] \t=\t\t\tTypeVar('KEY')\rlowerCAmelCase_\t\t\t\t\t: Any \t=\t\t\tTypeVar('VAL')\r@dataclass(frozen=lowerCamelCase_ ,\t\t\t\t\t\t\tslots=lowerCamelCase_ )\rclass \t\t\t\t__SCREAMING_SNAKE_CASE (Generic[KEY, VAL] ):\r\r\r\r\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r\t\t\t\t__a =42\r\t\t\t\t__a =42\r\r\rclass \t\t\t\t__SCREAMING_SNAKE_CASE (_Item ):\r\r\r\r\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r\t\t\t\tdef __init__( self :\t\t\t\t\tOptional[Any]\t\t\t\t\t):\r\t\t\t\t\t\t\t\tsuper().__init__(__a\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t)\r\r\r\r\r\t\t\t\tdef __bool__( self :\t\t\t\t\tAny\t\t\t\t\t):\r\t\t\t\t\t\t\t\treturn False\r\r\rlowerCAmelCase_\t\t\t\t\t: List[str] \t=\t\t\t_DeletedItem()\rclass \t\t\t\t__SCREAMING_SNAKE_CASE (MutableMapping[KEY, VAL] ):\r\r\r\r\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r\t\t\t\tdef __init__( self :\t\t\t\t\tList[str]\t\t\t\t\t,\t\t\t\t__a :\t\t\t\t\tint = 8\t\t\t\t\t,\t\t\t\t__a :\t\t\t\t\tfloat = 0.75\t\t\t\t\t):\r\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t\t= initial_block_size\r\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t\t= [None] * initial_block_size\r\t\t\t\t\t\t\t\tassert 0.0 < capacity_factor < 1.0\r\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t\t= capacity_factor\r\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t\t= 0\r\r\r\r\r\t\t\t\tdef UpperCamelCase__ ( self :\t\t\t\t\tUnion[str, Any]\t\t\t\t\t,\t\t\t\t__a :\t\t\t\t\tKEY\t\t\t\t\t):\r\t\t\t\t\t\t\t\treturn hash(__a\t\t\t\t\t) % len(self._buckets\t\t\t\t\t)\r\r\r\r\r\t\t\t\tdef UpperCamelCase__ ( self :\t\t\t\t\tOptional[int]\t\t\t\t\t,\t\t\t\t__a :\t\t\t\t\tint\t\t\t\t\t):\r\t\t\t\t\t\t\t\treturn (ind + 1) % len(self._buckets\t\t\t\t\t)\r\r\r\r\r\t\t\t\tdef UpperCamelCase__ ( self :\t\t\t\t\tint\t\t\t\t\t,\t\t\t\t__a :\t\t\t\t\tint\t\t\t\t\t,\t\t\t\t__a :\t\t\t\t\tKEY\t\t\t\t\t,\t\t\t\t__a :\t\t\t\t\tVAL\t\t\t\t\t):\r\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t\t= self._buckets[ind]\r\t\t\t\t\t\t\t\tif not stored:\r\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t\t= _Item(__a\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tself._len += 1\r\t\t\t\t\t\t\t\t\t\t\t\treturn True\r\t\t\t\t\t\t\t\telif stored.key == key:\r\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t\t= _Item(__a\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\treturn True\r\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\treturn False\r\r\r\r\r\t\t\t\tdef UpperCamelCase__ ( self :\t\t\t\t\tAny\t\t\t\t\t):\r\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t\t= len(self._buckets\t\t\t\t\t) * self._capacity_factor\r\t\t\t\t\t\t\t\treturn len(self\t\t\t\t\t) >= int(__a\t\t\t\t\t)\r\r\r\r\r\t\t\t\tdef UpperCamelCase__ ( self :\t\t\t\t\tList[str]\t\t\t\t\t):\r\r\r\r\r\r\r\t\t\t\t\t\t\t\tif len(self._buckets\t\t\t\t\t) <= self._initial_block_size:\r\t\t\t\t\t\t\t\t\t\t\t\treturn False\r\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t\t= len(self._buckets\t\t\t\t\t) * self._capacity_factor / 2\r\t\t\t\t\t\t\t\treturn len(self\t\t\t\t\t) < limit\r\r\r\r\r\t\t\t\tdef UpperCamelCase__ ( self :\t\t\t\t\tTuple\t\t\t\t\t,\t\t\t\t__a :\t\t\t\t\tint\t\t\t\t\t):\r\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t\t= self._buckets\r\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t\t= [None] * new_size\r\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t\t= 0\r\t\t\t\t\t\t\t\tfor item in old_buckets:\r\t\t\t\t\t\t\t\t\t\t\t\tif item:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself._add_item(item.key\t\t\t\t\t,\t\t\t\titem.val\t\t\t\t\t)\r\r\r\r\r\t\t\t\tdef UpperCamelCase__ ( self :\t\t\t\t\tAny\t\t\t\t\t):\r\t\t\t\t\t\t\t\tself._resize(len(self._buckets\t\t\t\t\t) * 2\t\t\t\t\t)\r\r\r\r\r\t\t\t\tdef UpperCamelCase__ ( self :\t\t\t\t\tUnion[str, Any]\t\t\t\t\t):\r\t\t\t\t\t\t\t\tself._resize(len(self._buckets\t\t\t\t\t) // 2\t\t\t\t\t)\r\r\r\r\r\t\t\t\tdef UpperCamelCase__ ( self :\t\t\t\t\tUnion[str, Any]\t\t\t\t\t,\t\t\t\t__a :\t\t\t\t\tKEY\t\t\t\t\t):\r\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t\t= self._get_bucket_index(__a\t\t\t\t\t)\r\t\t\t\t\t\t\t\tfor _ in range(len(self._buckets\t\t\t\t\t)\t\t\t\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\tyield ind\r\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t\t= self._get_next_ind(__a\t\t\t\t\t)\r\r\r\r\r\t\t\t\tdef UpperCamelCase__ ( self :\t\t\t\t\tAny\t\t\t\t\t,\t\t\t\t__a :\t\t\t\t\tKEY\t\t\t\t\t,\t\t\t\t__a :\t\t\t\t\tVAL\t\t\t\t\t):\r\r\r\r\r\r\r\t\t\t\t\t\t\t\tfor ind in self._iterate_buckets(__a\t\t\t\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\tif self._try_set(__a\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\r\r\r\r\t\t\t\tdef __setitem__( self :\t\t\t\t\tint\t\t\t\t\t,\t\t\t\t__a :\t\t\t\t\tKEY\t\t\t\t\t,\t\t\t\t__a :\t\t\t\t\tVAL\t\t\t\t\t):\r\r\r\r\r\r\r\t\t\t\t\t\t\t\tif self._is_full():\r\t\t\t\t\t\t\t\t\t\t\t\tself._size_up()\r\r\t\t\t\t\t\t\t\tself._add_item(__a\t\t\t\t\t,\t\t\t\t__a\t\t\t\t\t)\r\r\r\r\r\t\t\t\tdef __delitem__( self :\t\t\t\t\tDict\t\t\t\t\t,\t\t\t\t__a :\t\t\t\t\tKEY\t\t\t\t\t):\r\r\r\r\r\r\r\t\t\t\t\t\t\t\tfor ind in self._iterate_buckets(__a\t\t\t\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t\t= self._buckets[ind]\r\t\t\t\t\t\t\t\t\t\t\t\tif item is None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise KeyError(__a\t\t\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\tif item is _deleted:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcontinue\r\t\t\t\t\t\t\t\t\t\t\t\tif item.key == key:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t\t= _deleted\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself._len -= 1\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\t\t\t\t\t\t\t\tif self._is_sparse():\r\t\t\t\t\t\t\t\t\t\t\t\tself._size_down()\r\r\r\r\r\t\t\t\tdef __getitem__( self :\t\t\t\t\tAny\t\t\t\t\t,\t\t\t\t__a :\t\t\t\t\tKEY\t\t\t\t\t):\r\r\r\r\r\r\r\t\t\t\t\t\t\t\tfor ind in self._iterate_buckets(__a\t\t\t\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t\t= self._buckets[ind]\r\t\t\t\t\t\t\t\t\t\t\t\tif item is None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\t\t\t\t\t\t\t\t\t\t\t\tif item is _deleted:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcontinue\r\t\t\t\t\t\t\t\t\t\t\t\tif item.key == key:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn item.val\r\t\t\t\t\t\t\t\traise KeyError(__a\t\t\t\t\t)\r\r\r\r\r\t\t\t\tdef __len__( self :\t\t\t\t\tint\t\t\t\t\t):\r\t\t\t\t\t\t\t\treturn self._len\r\r\r\r\r\t\t\t\tdef __iter__( self :\t\t\t\t\tDict\t\t\t\t\t):\r\t\t\t\t\t\t\t\tyield from (item.key for item in self._buckets if item)\r\r\r\r\r\t\t\t\tdef __repr__( self :\t\t\t\t\tTuple\t\t\t\t\t):\r\t\t\t\t\t\t\t\t_a\t\t\t\t\t\t\t\t= \" ,\".join(\r\t\t\t\t\t\t\t\t f'{item.key}: {item.val}' for item in self._buckets if item\t\t\t\t\t)\r\t\t\t\t\t\t\t\treturn f'HashMap({val_string})'\r"},"code_codestyle":{"kind":"number","value":63,"string":"63"},"style_context":{"kind":"string","value":"\r\r\r\rfrom __future__ import annotations\r\r\r\r\r\rdef \t\t\t\t\t\t\tA\t( _UpperCAmelCase\t\t\t\t: list[int] )\t\t\t\t-> bool:\r\r\r\r\r\r\r\r\t\t\t'''simple docstring'''\r\t\t\treturn len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase )\r\r\rif __name__ == \"__main__\":\r\t\t\t\t\t\t\timport doctest\r\r\t\t\t\t\t\t\tdoctest.testmod()\r"},"style_context_codestyle":{"kind":"number","value":339,"string":"339"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":863,"cells":{"code":{"kind":"string","value":"\n\n\n\nfrom __future__ import annotations\n\nimport numpy as np\n\n\n\n\n\ndef \t\t\t\t\t__snake_case ( _UpperCAmelCase ):\n\t\t\t\t__a\t, __a\t\t\t\t\t\t\t= np.shape(_UpperCAmelCase )\n\t\t\t\tif rows != columns:\n\t\t\t\t\t\t\t\t__a\t\t\t\t\t\t\t= (\n\t\t\t\t\t\t\t\t '''\\'table\\' has to be of square shaped array but got a '''\n\t\t\t\t\t\t\t\t f'{rows}x{columns} array:\\n{table}'\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\traise ValueError(_UpperCAmelCase )\n\n\t\t\t\t__a\t\t\t\t\t\t\t= np.zeros((rows, columns) )\n\t\t\t\t__a\t\t\t\t\t\t\t= np.zeros((rows, columns) )\n\t\t\t\tfor i in range(_UpperCAmelCase ):\n\t\t\t\t\t\t\t\tfor j in range(_UpperCAmelCase ):\n\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t\t\t\t\t= sum(lower[i][k] * upper[k][j] for k in range(_UpperCAmelCase ) )\n\t\t\t\t\t\t\t\t\t\t\t\tif upper[j][j] == 0:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ArithmeticError('''No LU decomposition exists''' )\n\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t\t\t\t\t= (table[i][j] - total) / upper[j][j]\n\t\t\t\t\t\t\t\t__a\t\t\t\t\t\t\t= 1\n\t\t\t\t\t\t\t\tfor j in range(_UpperCAmelCase ,\t\t\t_UpperCAmelCase ):\n\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t\t\t\t\t= sum(lower[i][k] * upper[k][j] for k in range(_UpperCAmelCase ) )\n\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t\t\t\t\t\t= table[i][j] - total\n\t\t\t\treturn lower, upper\n\n\nif __name__ == \"__main__\":\n\timport doctest\n\n\tdoctest.testmod()\n\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":364,"string":"364"},"style_context":{"kind":"string","value":"\n\n\n\nimport gc\nimport random\nimport unittest\n\nimport numpy as np\nimport torch\nfrom PIL import Image\n\nfrom diffusers import (\n DDIMScheduler,\n KandinskyVaaImgaImgPipeline,\n KandinskyVaaPriorPipeline,\n UNetaDConditionModel,\n VQModel,\n)\nfrom diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device\nfrom diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu\n\nfrom ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference\n\n\nenable_full_determinism()\n\nclass _A\t( __UpperCAmelCase\t\t\t\t,unittest.TestCase ):\n UpperCamelCase__ : str\t\t\t\t\t\t =\t\t\t\t\t\tKandinskyVaaImgaImgPipeline\n UpperCamelCase__ : Optional[Any]\t\t\t\t\t\t =\t\t\t\t\t\t['''image_embeds''', '''negative_image_embeds''', '''image''']\n UpperCamelCase__ : Dict\t\t\t\t\t\t =\t\t\t\t\t\t[\n '''image_embeds''',\n '''negative_image_embeds''',\n '''image''',\n ]\n UpperCamelCase__ : Any\t\t\t\t\t\t =\t\t\t\t\t\t[\n '''generator''',\n '''height''',\n '''width''',\n '''strength''',\n '''guidance_scale''',\n '''num_inference_steps''',\n '''return_dict''',\n '''guidance_scale''',\n '''num_images_per_prompt''',\n '''output_type''',\n '''return_dict''',\n ]\n UpperCamelCase__ : List[Any]\t\t\t\t\t\t =\t\t\t\t\t\tFalse\n\n\n\n @property\n def \t\t_lowerCamelCase (\t\tself\t\t\t\t\t\t\t:\t\t\t\t\tOptional[int]):\n\n\n '''simple docstring'''\n\n return 32\n\n\n\n @property\n def \t\t_lowerCamelCase (\t\tself\t\t\t\t\t\t\t:\t\t\t\t\tList[str]):\n\n\n '''simple docstring'''\n\n return 32\n\n\n\n @property\n def \t\t_lowerCamelCase (\t\tself\t\t\t\t\t\t\t:\t\t\t\t\tList[Any]):\n\n\n '''simple docstring'''\n\n return self.time_input_dim\n\n\n\n @property\n def \t\t_lowerCamelCase (\t\tself\t\t\t\t\t\t\t:\t\t\t\t\tint):\n\n\n '''simple docstring'''\n\n return self.time_input_dim * 4\n\n\n\n @property\n def \t\t_lowerCamelCase (\t\tself\t\t\t\t\t\t\t:\t\t\t\t\tOptional[Any]):\n\n\n '''simple docstring'''\n\n return 100\n\n\n\n @property\n def \t\t_lowerCamelCase (\t\tself\t\t\t\t\t\t\t:\t\t\t\t\tAny):\n\n\n '''simple docstring'''\n\n torch.manual_seed(0)\n\n __a\t\t\t\t\t\t\t= {\n '''in_channels''': 4,\n # Out channels is double in channels because predicts mean and variance\n '''out_channels''': 8,\n '''addition_embed_type''': '''image''',\n '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),\n '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),\n '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',\n '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),\n '''layers_per_block''': 1,\n '''encoder_hid_dim''': self.text_embedder_hidden_size,\n '''encoder_hid_dim_type''': '''image_proj''',\n '''cross_attention_dim''': self.cross_attention_dim,\n '''attention_head_dim''': 4,\n '''resnet_time_scale_shift''': '''scale_shift''',\n '''class_embed_type''': None,\n }\n\n __a\t\t\t\t\t\t\t= UNetaDConditionModel(**__SCREAMING_SNAKE_CASE)\n return model\n\n\n\n @property\n def \t\t_lowerCamelCase (\t\tself\t\t\t\t\t\t\t:\t\t\t\t\tTuple):\n\n\n '''simple docstring'''\n\n return {\n \"block_out_channels\": [32, 64],\n \"down_block_types\": [\"DownEncoderBlock2D\", \"AttnDownEncoderBlock2D\"],\n \"in_channels\": 3,\n \"latent_channels\": 4,\n \"layers_per_block\": 1,\n \"norm_num_groups\": 8,\n \"norm_type\": \"spatial\",\n \"num_vq_embeddings\": 12,\n \"out_channels\": 3,\n \"up_block_types\": [\n \"AttnUpDecoderBlock2D\",\n \"UpDecoderBlock2D\",\n ],\n \"vq_embed_dim\": 4,\n }\n\n\n\n @property\n def \t\t_lowerCamelCase (\t\tself\t\t\t\t\t\t\t:\t\t\t\t\tOptional[int]):\n\n\n '''simple docstring'''\n\n torch.manual_seed(0)\n __a\t\t\t\t\t\t\t= VQModel(**self.dummy_movq_kwargs)\n return model\n\n\n\n def \t\t_lowerCamelCase (\t\tself\t\t\t\t\t\t\t:\t\t\t\t\tOptional[int]):\n\n\n '''simple docstring'''\n\n __a\t\t\t\t\t\t\t= self.dummy_unet\n __a\t\t\t\t\t\t\t= self.dummy_movq\n\n __a\t\t\t\t\t\t\t= {\n '''num_train_timesteps''': 1_000,\n '''beta_schedule''': '''linear''',\n '''beta_start''': 0.0_00_85,\n '''beta_end''': 0.0_12,\n '''clip_sample''': False,\n '''set_alpha_to_one''': False,\n '''steps_offset''': 0,\n '''prediction_type''': '''epsilon''',\n '''thresholding''': False,\n }\n\n __a\t\t\t\t\t\t\t= DDIMScheduler(**__SCREAMING_SNAKE_CASE)\n\n __a\t\t\t\t\t\t\t= {\n '''unet''': unet,\n '''scheduler''': scheduler,\n '''movq''': movq,\n }\n\n return components\n\n\n\n def \t\t_lowerCamelCase (\t\tself\t\t\t\t\t\t\t:\t\t\t\t\tUnion[str, Any]\t\t\t\t\t,\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\tOptional[Any]\t\t\t\t\t,\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\tUnion[str, Any]=0):\n\n\n '''simple docstring'''\n\n __a\t\t\t\t\t\t\t= floats_tensor((1, self.text_embedder_hidden_size)\t\t\t\t\t,\t\trng=random.Random(__SCREAMING_SNAKE_CASE)).to(__SCREAMING_SNAKE_CASE)\n __a\t\t\t\t\t\t\t= floats_tensor((1, self.text_embedder_hidden_size)\t\t\t\t\t,\t\trng=random.Random(seed + 1)).to(\n __SCREAMING_SNAKE_CASE)\n # create init_image\n __a\t\t\t\t\t\t\t= floats_tensor((1, 3, 64, 64)\t\t\t\t\t,\t\trng=random.Random(__SCREAMING_SNAKE_CASE)).to(__SCREAMING_SNAKE_CASE)\n __a\t\t\t\t\t\t\t= image.cpu().permute(0\t\t\t\t\t,\t\t2\t\t\t\t\t,\t\t3\t\t\t\t\t,\t\t1)[0]\n __a\t\t\t\t\t\t\t= Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE)).convert('''RGB''').resize((256, 256))\n\n if str(__SCREAMING_SNAKE_CASE).startswith('''mps'''):\n __a\t\t\t\t\t\t\t= torch.manual_seed(__SCREAMING_SNAKE_CASE)\n else:\n __a\t\t\t\t\t\t\t= torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(__SCREAMING_SNAKE_CASE)\n __a\t\t\t\t\t\t\t= {\n '''image''': init_image,\n '''image_embeds''': image_embeds,\n '''negative_image_embeds''': negative_image_embeds,\n '''generator''': generator,\n '''height''': 64,\n '''width''': 64,\n '''num_inference_steps''': 10,\n '''guidance_scale''': 7.0,\n '''strength''': 0.2,\n '''output_type''': '''np''',\n }\n return inputs\n\n\n\n\n\n def \t\t_lowerCamelCase (\t\tself\t\t\t\t\t\t\t:\t\t\t\t\tDict):\n\n\n '''simple docstring'''\n\n __a\t\t\t\t\t\t\t= '''cpu'''\n\n __a\t\t\t\t\t\t\t= self.get_dummy_components()\n\n __a\t\t\t\t\t\t\t= self.pipeline_class(**__SCREAMING_SNAKE_CASE)\n __a\t\t\t\t\t\t\t= pipe.to(__SCREAMING_SNAKE_CASE)\n\n pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)\n\n __a\t\t\t\t\t\t\t= pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE))\n __a\t\t\t\t\t\t\t= output.images\n\n __a\t\t\t\t\t\t\t= pipe(\n **self.get_dummy_inputs(__SCREAMING_SNAKE_CASE)\t\t\t\t\t,\t\treturn_dict=__SCREAMING_SNAKE_CASE\t\t\t\t\t,\t\t)[0]\n\n __a\t\t\t\t\t\t\t= image[0, -3:, -3:, -1]\n __a\t\t\t\t\t\t\t= image_from_tuple[0, -3:, -3:, -1]\n\n assert image.shape == (1, 64, 64, 3)\n\n __a\t\t\t\t\t\t\t= np.array(\n [0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63])\n assert (\n np.abs(image_slice.flatten() - expected_slice).max() < 1E-2\n ), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'\n assert (\n np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2\n ), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'\n\n\n@slow\n@require_torch_gpu\nclass _A\t( unittest.TestCase ):\n\n\n\n def \t\t_lowerCamelCase (\t\tself\t\t\t\t\t\t\t:\t\t\t\t\tDict):\n\n\n '''simple docstring'''\n\n super().tearDown()\n gc.collect()\n torch.cuda.empty_cache()\n\n\n\n\n\n def \t\t_lowerCamelCase (\t\tself\t\t\t\t\t\t\t:\t\t\t\t\tstr):\n\n\n '''simple docstring'''\n\n __a\t\t\t\t\t\t\t= load_numpy(\n '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''\n '''/kandinskyv22/kandinskyv22_img2img_frog.npy''')\n\n __a\t\t\t\t\t\t\t= load_image(\n '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''')\n __a\t\t\t\t\t\t\t= '''A red cartoon frog, 4k'''\n\n __a\t\t\t\t\t\t\t= KandinskyVaaPriorPipeline.from_pretrained(\n '''kandinsky-community/kandinsky-2-2-prior'''\t\t\t\t\t,\t\ttorch_dtype=torch.floataa)\n pipe_prior.to(__SCREAMING_SNAKE_CASE)\n\n __a\t\t\t\t\t\t\t= KandinskyVaaImgaImgPipeline.from_pretrained(\n '''kandinsky-community/kandinsky-2-2-decoder'''\t\t\t\t\t,\t\ttorch_dtype=torch.floataa)\n __a\t\t\t\t\t\t\t= pipeline.to(__SCREAMING_SNAKE_CASE)\n\n pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)\n\n __a\t\t\t\t\t\t\t= torch.Generator(device='''cpu''').manual_seed(0)\n __a\t, __a\t\t\t\t\t\t\t= pipe_prior(\n __SCREAMING_SNAKE_CASE\t\t\t\t\t,\t\tgenerator=__SCREAMING_SNAKE_CASE\t\t\t\t\t,\t\tnum_inference_steps=5\t\t\t\t\t,\t\tnegative_prompt=''''''\t\t\t\t\t,\t\t).to_tuple()\n\n __a\t\t\t\t\t\t\t= pipeline(\n image=__SCREAMING_SNAKE_CASE\t\t\t\t\t,\t\timage_embeds=__SCREAMING_SNAKE_CASE\t\t\t\t\t,\t\tnegative_image_embeds=__SCREAMING_SNAKE_CASE\t\t\t\t\t,\t\tgenerator=__SCREAMING_SNAKE_CASE\t\t\t\t\t,\t\tnum_inference_steps=100\t\t\t\t\t,\t\theight=768\t\t\t\t\t,\t\twidth=768\t\t\t\t\t,\t\tstrength=0.2\t\t\t\t\t,\t\toutput_type='''np'''\t\t\t\t\t,\t\t)\n\n __a\t\t\t\t\t\t\t= output.images[0]\n\n assert image.shape == (768, 768, 3)\n\n assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE\t\t\t\t\t,\t\t__SCREAMING_SNAKE_CASE)\n\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":131,"string":"131"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":864,"cells":{"code":{"kind":"string","value":"\n\nfrom __future__ import annotations\n\nfrom random import random\n\n\n\n\n\nclass \t\t\t\t\t__UpperCAmelCase :\n\n\n\n def __init__(\t\t\t\t\t\t\tself\t\t\t\t\t\t\t: str,\t\t__A\t\t\t\t\t\t\t: int | None = None\t\t\t\t\t\t):\n UpperCAmelCase :\t\t\t\t\t\t\tint = value\n UpperCAmelCase :\t\t\t\t\t\t\tAny = random()\n UpperCAmelCase :\t\t\t\t\t\t\tNode | None = None\n UpperCAmelCase :\t\t\t\t\t\t\tNode | None = None\n\n\n\n def __repr__(\t\t\t\t\t\t\tself\t\t\t\t\t\t\t: Tuple\t\t\t\t\t\t):\n from pprint import pformat\n\n if self.left is None and self.right is None:\n return F'''\\'{self.value}: {self.prior:.5}\\''''\n else:\n return pformat(\n {F'''{self.value}: {self.prior:.5}''': (self.left, self.right)},\t\tindent=1\t\t\t\t\t\t)\n\n\n\n\n\n def __str__(\t\t\t\t\t\t\tself\t\t\t\t\t\t\t: Dict\t\t\t\t\t\t):\n UpperCAmelCase :\t\t\t\t\t\t\tDict = str(self.value\t\t\t\t\t\t) + ''' '''\n UpperCAmelCase :\t\t\t\t\t\t\tDict = str(self.left or ''''''\t\t\t\t\t\t)\n UpperCAmelCase :\t\t\t\t\t\t\tList[str] = str(self.right or ''''''\t\t\t\t\t\t)\n return value + left + right\n\n\n\n\n\n\ndef \t\t\ta__ (\t\tUpperCAmelCase\t\t\t\t\t\t\t:\t\t\tOptional[Any]\t\t\t\t, UpperCAmelCase\t\t\t\t\t\t\t:\t\t\tList[str]\t\t\t) ->\t\t\t\t\t\ttuple[Node | None, Node | None]:\n if root is None: # None tree is split into 2 Nones\n return None, None\n elif root.value is None:\n return None, None\n else:\n if value < root.value:\n UpperCAmelCase :\t\t\t\t\t\t\tDict = split(root.left\t\t\t\t, __lowerCamelCase\t\t\t)\n return left, root\n else:\n UpperCAmelCase :\t\t\t\t\t\t\tOptional[Any] = split(root.right\t\t\t\t, __lowerCamelCase\t\t\t)\n return root, right\n\ndef \t\t\ta__ (\t\tUpperCAmelCase\t\t\t\t\t\t\t:\t\t\tstr\t\t\t\t, UpperCAmelCase\t\t\t\t\t\t\t:\t\t\tstr\t\t\t) ->\t\t\t\t\t\tNode | None:\n if (not left) or (not right): # If one node is None, return the other\n return left or right\n elif left.prior < right.prior:\n UpperCAmelCase :\t\t\t\t\t\t\tOptional[int] = merge(left.right\t\t\t\t, __lowerCamelCase\t\t\t)\n return left\n else:\n UpperCAmelCase :\t\t\t\t\t\t\tstr = merge(__lowerCamelCase\t\t\t\t, right.left\t\t\t)\n return right\n\ndef \t\t\ta__ (\t\tUpperCAmelCase\t\t\t\t\t\t\t:\t\t\tAny\t\t\t\t, UpperCAmelCase\t\t\t\t\t\t\t:\t\t\tDict\t\t\t) ->\t\t\t\t\t\tNode | None:\n UpperCAmelCase :\t\t\t\t\t\t\tUnion[str, Any] = Node(__lowerCamelCase\t\t\t)\n UpperCAmelCase :\t\t\t\t\t\t\tOptional[Any] = split(__lowerCamelCase\t\t\t\t, __lowerCamelCase\t\t\t)\n return merge(merge(__lowerCamelCase\t\t\t\t, __lowerCamelCase\t\t\t)\t\t\t\t, __lowerCamelCase\t\t\t)\n\ndef \t\t\ta__ (\t\tUpperCAmelCase\t\t\t\t\t\t\t:\t\t\tOptional[Any]\t\t\t\t, UpperCAmelCase\t\t\t\t\t\t\t:\t\t\tDict\t\t\t) ->\t\t\t\t\t\tNode | None:\n UpperCAmelCase :\t\t\t\t\t\t\tList[str] = split(__lowerCamelCase\t\t\t\t, value - 1\t\t\t)\n UpperCAmelCase :\t\t\t\t\t\t\tTuple = split(__lowerCamelCase\t\t\t\t, __lowerCamelCase\t\t\t)\n return merge(__lowerCamelCase\t\t\t\t, __lowerCamelCase\t\t\t)\n\ndef \t\t\ta__ (\t\tUpperCAmelCase\t\t\t\t\t\t\t:\t\t\tUnion[str, Any]\t\t\t) ->\t\t\t\t\t\tNone:\n if not root: # None\n return\n else:\n inorder(root.left\t\t\t)\n print(root.value\t\t\t\t, end=''','''\t\t\t)\n inorder(root.right\t\t\t)\n\ndef \t\t\ta__ (\t\tUpperCAmelCase\t\t\t\t\t\t\t:\t\t\tUnion[str, Any]\t\t\t\t, UpperCAmelCase\t\t\t\t\t\t\t:\t\t\tstr\t\t\t) ->\t\t\t\t\t\tNode | None:\n for arg in args.split():\n if arg[0] == \"+\":\n UpperCAmelCase :\t\t\t\t\t\t\tAny = insert(__lowerCamelCase\t\t\t\t, int(arg[1:]\t\t\t)\t\t\t)\n\n elif arg[0] == \"-\":\n UpperCAmelCase :\t\t\t\t\t\t\tList[Any] = erase(__lowerCamelCase\t\t\t\t, int(arg[1:]\t\t\t)\t\t\t)\n\n else:\n print('''Unknown command'''\t\t\t)\n\n return root\n\ndef \t\t\ta__ (\t\t) ->\t\t\t\t\t\tNone:\n UpperCAmelCase :\t\t\t\t\t\t\tstr = None\n print(\n '''enter numbers to create a tree, + value to add value into treap, '''\n '''- value to erase all nodes with value. \\'q\\' to quit. '''\t\t\t)\n\n UpperCAmelCase :\t\t\t\t\t\t\tOptional[Any] = input()\n while args != \"q\":\n UpperCAmelCase :\t\t\t\t\t\t\tUnion[str, Any] = interact_treap(__lowerCamelCase\t\t\t\t, __lowerCamelCase\t\t\t)\n print(__lowerCamelCase\t\t\t)\n UpperCAmelCase :\t\t\t\t\t\t\tOptional[Any] = input()\n\n print('''good by!'''\t\t\t)\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n main()\n\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":336,"string":"336"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport os\r\nfrom argparse import ArgumentParser\r\nfrom typing import List\r\n\r\nimport torch.utils.data\r\n\r\nfrom datasets import Dataset, IterableDataset\r\nfrom datasets.distributed import split_dataset_by_node\r\n\r\n\r\nlowerCAmelCase_ \t\t\t\t\t\t= 4\r\nlowerCAmelCase_ \t\t\t\t\t\t= 3\r\nclass __A (\tA_ ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\t\tpass\r\n\r\n\r\n\r\n\r\n\r\ndef __UpperCAmelCase ( __lowerCamelCase\t)\t\t\t\t\t\t\t-> Dict:\r\n\t\tfor shard in shards:\r\n\t\t\t\tfor i in range(__lowerCamelCase\t):\r\n\t\t\t\t\t\tyield {\"i\": i, \"shard\": shard}\r\n\r\n\r\n\r\ndef __UpperCAmelCase ( )\t\t\t\t\t\t\t-> Tuple:\r\n\t\tlowercase__ : int\t\t\t\t=\t\t\tint(os.environ['''RANK''']\t)\r\n\t\tlowercase__ : str\t\t\t\t=\t\t\tint(os.environ['''WORLD_SIZE''']\t)\r\n\r\n\t\tlowercase__ : List[Any]\t\t\t\t=\t\t\tArgumentParser()\r\n\t\tparser.add_argument('''--streaming'''\t\t\t\t\t\t, type=__lowerCamelCase\t)\r\n\t\tparser.add_argument('''--local_rank'''\t\t\t\t\t\t, type=__lowerCamelCase\t)\r\n\t\tparser.add_argument('''--num_workers'''\t\t\t\t\t\t, type=__lowerCamelCase\t\t\t\t\t\t, default=0\t)\r\n\t\tlowercase__ : int\t\t\t\t=\t\t\tparser.parse_args()\r\n\t\tlowercase__ : Optional[Any]\t\t\t\t=\t\t\targs.streaming\r\n\t\tlowercase__ : List[Any]\t\t\t\t=\t\t\targs.num_workers\r\n\r\n\t\tlowercase__ : Optional[Any]\t\t\t\t=\t\t\t{'''shards''': [f\"\"\"shard_{shard_idx}\"\"\" for shard_idx in range(__lowerCamelCase\t)]}\r\n\t\tlowercase__ : Dict\t\t\t\t=\t\t\tIterableDataset.from_generator(__lowerCamelCase\t\t\t\t\t\t, gen_kwargs=__lowerCamelCase\t)\r\n\t\tif not streaming:\r\n\t\t\t\tlowercase__ : int\t\t\t\t=\t\t\tDataset.from_list(list(__lowerCamelCase\t)\t)\r\n\r\n\t\tlowercase__ : int\t\t\t\t=\t\t\tsplit_dataset_by_node(__lowerCamelCase\t\t\t\t\t\t, rank=__lowerCamelCase\t\t\t\t\t\t, world_size=__lowerCamelCase\t)\r\n\t\tlowercase__ : Optional[Any]\t\t\t\t=\t\t\ttorch.utils.data.DataLoader(__lowerCamelCase\t\t\t\t\t\t, num_workers=__lowerCamelCase\t)\r\n\r\n\t\tlowercase__ : Optional[Any]\t\t\t\t=\t\t\tNUM_SHARDS * NUM_ITEMS_PER_SHARD\r\n\t\tlowercase__ : str\t\t\t\t=\t\t\tfull_size // world_size\r\n\t\texpected_local_size += int(rank < (full_size % world_size)\t)\r\n\r\n\t\tlowercase__ : str\t\t\t\t=\t\t\tsum(1 for _ in dataloader\t)\r\n\t\tif local_size != expected_local_size:\r\n\t\t\t\traise FailedTestError(f\"\"\"local_size {local_size} != expected_local_size {expected_local_size}\"\"\"\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t\t\t\tmain()\r\n"},"style_context_codestyle":{"kind":"number","value":16,"string":"16"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":865,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\ndef \tlowercase\t\t( _SCREAMING_SNAKE_CASE\t\t\t\t: int\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\t\t\t\tif bit_count < 0:\r\n\t\t\t\t\t\t\t\traise ValueError('''The given input must be positive'''\t\t\t)\r\n\r\n\t\t\t\t# get the generated string sequence\r\n\t\t\t\t_UpperCAmelCase \t\t\t\t\t\t= gray_code_sequence_string(_SCREAMING_SNAKE_CASE\t\t\t)\r\n\t\t\t\t#\r\n\t\t\t\t# convert them to integers\r\n\t\t\t\tfor i in range(len(_SCREAMING_SNAKE_CASE\t\t\t)\t\t\t):\r\n\t\t\t\t\t\t\t\t_UpperCAmelCase \t\t\t\t\t\t= int(sequence[i] , 2\t\t\t)\r\n\r\n\t\t\t\treturn sequence\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \tlowercase\t\t( _SCREAMING_SNAKE_CASE\t\t\t\t: int\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\t\t\t\tif bit_count == 0:\r\n\t\t\t\t\t\t\t\treturn [\"0\"]\r\n\r\n\t\t\t\tif bit_count == 1:\r\n\t\t\t\t\t\t\t\treturn [\"0\", \"1\"]\r\n\r\n\t\t\t\t_UpperCAmelCase \t\t\t\t\t\t= 1 << bit_count # defines the length of the sequence\r\n\t\t\t\t# 1<< n is equivalent to 2^n\r\n\r\n\t\t\t\t# recursive answer will generate answer for n-1 bits\r\n\t\t\t\t_UpperCAmelCase \t\t\t\t\t\t= gray_code_sequence_string(bit_count - 1\t\t\t)\r\n\r\n\t\t\t\t_UpperCAmelCase \t\t\t\t\t\t= []\r\n\r\n\t\t\t\t# append 0 to first half of the smaller sequence generated\r\n\t\t\t\tfor i in range(seq_len // 2\t\t\t):\r\n\t\t\t\t\t\t\t\t_UpperCAmelCase \t\t\t\t\t\t= '''0''' + smaller_sequence[i]\r\n\t\t\t\t\t\t\t\tsequence.append(_SCREAMING_SNAKE_CASE\t\t\t)\r\n\r\n\t\t\t\t# append 1 to second half ... start from the end of the list\r\n\t\t\t\tfor i in reversed(range(seq_len // 2\t\t\t)\t\t\t):\r\n\t\t\t\t\t\t\t\t_UpperCAmelCase \t\t\t\t\t\t= '''1''' + smaller_sequence[i]\r\n\t\t\t\t\t\t\t\tsequence.append(_SCREAMING_SNAKE_CASE\t\t\t)\r\n\r\n\t\t\t\treturn sequence\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t\timport doctest\r\n\r\n\t\t\t\t\tdoctest.testmod()\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":326,"string":"326"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\nimport logging\r\nimport os\r\n\r\nfrom .state import PartialState\r\nclass _a\t( logging.LoggerAdapter):\r\n\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\t\t\t@staticmethod\r\n\t\t\tdef \tlowercase__ (\t\t\t\t\t\t__UpperCamelCase : Optional[Any]\t\t\t\t)->List[Any]:\r\n\t\t\t\t\t\t\t_UpperCAmelCase \t\t\t\t\t\t= PartialState()\r\n\t\t\t\t\t\t\treturn not main_process_only or (main_process_only and state.is_main_process)\r\n\r\n\r\n\r\n\t\t\tdef \tlowercase__ (\t\t\t\t\t\tself : List[Any]\t\t\t,\t\t\t\t__UpperCamelCase : List[Any]\t\t\t,\t\t\t\t__UpperCamelCase : Tuple\t\t\t,\t\t\t\t*__UpperCamelCase : Optional[Any]\t\t\t,\t\t\t\t**__UpperCamelCase : Union[str, Any]\t\t\t\t)->int:\r\n\t\t\t\t\t\t\tif PartialState._shared_state == {}:\r\n\t\t\t\t\t\t\t\t\t\t\traise RuntimeError(\r\n\t\t\t\t\t\t\t\t\t\t\t '''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.'''\t\t\t\t)\r\n\t\t\t\t\t\t\t_UpperCAmelCase \t\t\t\t\t\t= kwargs.pop('''main_process_only'''\t\t\t,\t\t\t\t__UpperCamelCase\t\t\t\t)\r\n\t\t\t\t\t\t\t_UpperCAmelCase \t\t\t\t\t\t= kwargs.pop('''in_order'''\t\t\t,\t\t\t\t__UpperCamelCase\t\t\t\t)\r\n\r\n\t\t\t\t\t\t\tif self.isEnabledFor(__UpperCamelCase\t\t\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\tif self._should_log(__UpperCamelCase\t\t\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCAmelCase\t\t\t\t\t\t, _UpperCAmelCase \t\t\t\t\t\t= self.process(__UpperCamelCase\t\t\t,\t\t\t\t__UpperCamelCase\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.logger.log(__UpperCamelCase\t\t\t,\t\t\t\t__UpperCamelCase\t\t\t,\t\t\t\t*__UpperCamelCase\t\t\t,\t\t\t\t**__UpperCamelCase\t\t\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\telif in_order:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCAmelCase \t\t\t\t\t\t= PartialState()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor i in range(state.num_processes\t\t\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif i == state.process_index:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCAmelCase\t\t\t\t\t\t, _UpperCAmelCase \t\t\t\t\t\t= self.process(__UpperCamelCase\t\t\t,\t\t\t\t__UpperCamelCase\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.logger.log(__UpperCamelCase\t\t\t,\t\t\t\t__UpperCamelCase\t\t\t,\t\t\t\t*__UpperCamelCase\t\t\t,\t\t\t\t**__UpperCamelCase\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tstate.wait_for_everyone()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \tlowercase\t\t( _SCREAMING_SNAKE_CASE\t\t\t\t: str , _SCREAMING_SNAKE_CASE\t\t\t\t: str = None\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\t\t\t\tif log_level is None:\r\n\t\t\t\t\t\t\t\t_UpperCAmelCase \t\t\t\t\t\t= os.environ.get('''ACCELERATE_LOG_LEVEL''' , _SCREAMING_SNAKE_CASE\t\t\t)\r\n\t\t\t\t_UpperCAmelCase \t\t\t\t\t\t= logging.getLogger(_SCREAMING_SNAKE_CASE\t\t\t)\r\n\t\t\t\tif log_level is not None:\r\n\t\t\t\t\t\t\t\tlogger.setLevel(log_level.upper()\t\t\t)\r\n\t\t\t\t\t\t\t\tlogger.root.setLevel(log_level.upper()\t\t\t)\r\n\t\t\t\treturn MultiProcessAdapter(_SCREAMING_SNAKE_CASE , {}\t\t\t)\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":326,"string":"326"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":866,"cells":{"code":{"kind":"string","value":"'''simple docstring'''\r\r\r\r\rdef \tUpperCamelCase_\t\t\t\t\t\t\t(\t\t\t\t\t_UpperCAmelCase : int ) -> int:\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r if n == 1 or not isinstance(_UpperCAmelCase\t\t\t,\t_UpperCAmelCase ):\r return 0\r elif n == 2:\r return 1\r else:\r _UpperCAmelCase : List[Any]\t\t\t\t\t\t\t = [0, 1]\r for i in range(2\t\t\t,\tn + 1 ):\r sequence.append(sequence[i - 1] + sequence[i - 2] )\r\r return sequence[n]\r\rdef \tUpperCamelCase_\t\t\t\t\t\t\t(\t\t\t\t\t_UpperCAmelCase : int ) -> int:\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r _UpperCAmelCase : List[str]\t\t\t\t\t\t\t = 0\r _UpperCAmelCase : Dict\t\t\t\t\t\t\t = 2\r\r while digits < n:\r index += 1\r _UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t = len(str(fibonacci(_UpperCAmelCase ) ) )\r\r return index\r\rdef \tUpperCamelCase_\t\t\t\t\t\t\t(\t\t\t\t\t_UpperCAmelCase : int = 1_000 ) -> int:\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r return fibonacci_digits_index(_UpperCAmelCase )\r\r\rif __name__ == \"__main__\":\r print(solution(int(str(input()).strip())))\r\r"},"code_codestyle":{"kind":"number","value":31,"string":"31"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport json\r\nimport os\r\nimport shutil\r\nimport tempfile\r\nimport unittest\r\n\r\nimport numpy as np\r\n\r\nfrom transformers import BertTokenizerFast\r\nfrom transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer\r\nfrom transformers.testing_utils import require_tokenizers, require_vision\r\nfrom transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available\r\n\r\n\r\nif is_vision_available():\r\n from PIL import Image\r\n\r\n from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_tokenizers\r\n@require_vision\r\nclass \t\tlowercase_\t\t\t\t\t(\tunittest.TestCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n def __lowerCAmelCase\t\t\t\t\t( self :\t\t\t\t\t\tList[Any] ) ->Tuple:\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n a\t\t = tempfile.mkdtemp()\r\n\r\n # fmt: off\r\n a\t\t = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']\r\n # fmt: on\r\n a\t\t = os.path.join(self.tmpdirname ,\t\t\t\t\tVOCAB_FILES_NAMES['''vocab_file'''] )\r\n with open(self.vocab_file ,\t\t\t\t\t'''w''' ,\t\t\t\t\tencoding='''utf-8''' ) as vocab_writer:\r\n vocab_writer.write(''''''.join([x + '''\\n''' for x in vocab_tokens] ) )\r\n\r\n a\t\t = {\r\n '''do_resize''': True,\r\n '''size''': {'''height''': 18, '''width''': 18},\r\n '''do_normalize''': True,\r\n '''image_mean''': [0.5, 0.5, 0.5],\r\n '''image_std''': [0.5, 0.5, 0.5],\r\n }\r\n a\t\t = os.path.join(self.tmpdirname ,\t\t\t\t\t__UpperCAmelCase )\r\n with open(self.image_processor_file ,\t\t\t\t\t'''w''' ,\t\t\t\t\tencoding='''utf-8''' ) as fp:\r\n json.dump(__UpperCAmelCase ,\t\t\t\t\t__UpperCAmelCase )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __lowerCAmelCase\t\t\t\t\t( self :\t\t\t\t\t\tList[Any] ,\t\t\t\t\t**__UpperCAmelCase :\t\t\t\t\t\tList[Any] ) ->int:\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n return BertTokenizer.from_pretrained(self.tmpdirname ,\t\t\t\t\t**__UpperCAmelCase )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __lowerCAmelCase\t\t\t\t\t( self :\t\t\t\t\t\tUnion[str, Any] ,\t\t\t\t\t**__UpperCAmelCase :\t\t\t\t\t\tOptional[int] ) ->Union[str, Any]:\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n return ViTImageProcessor.from_pretrained(self.tmpdirname ,\t\t\t\t\t**__UpperCAmelCase )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __lowerCAmelCase\t\t\t\t\t( self :\t\t\t\t\t\tTuple ) ->Any:\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n shutil.rmtree(self.tmpdirname )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __lowerCAmelCase\t\t\t\t\t( self :\t\t\t\t\t\tstr ) ->Dict:\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n a\t\t = [np.random.randint(255 ,\t\t\t\t\tsize=(3, 30, 400) ,\t\t\t\t\tdtype=np.uinta )]\r\n\r\n a\t\t = [Image.fromarray(np.moveaxis(__UpperCAmelCase ,\t\t\t\t\t0 ,\t\t\t\t\t-1 ) ) for x in image_inputs]\r\n\r\n return image_inputs\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __lowerCAmelCase\t\t\t\t\t( self :\t\t\t\t\t\tAny ) ->Tuple:\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n a\t\t = self.get_tokenizer()\r\n a\t\t = self.get_image_processor()\r\n\r\n a\t\t = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase ,\t\t\t\t\timage_processor=__UpperCAmelCase )\r\n\r\n processor.save_pretrained(self.tmpdirname )\r\n a\t\t = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )\r\n\r\n self.assertEqual(processor.tokenizer.get_vocab() ,\t\t\t\t\ttokenizer.get_vocab() )\r\n self.assertIsInstance(processor.tokenizer ,\t\t\t\t\t(BertTokenizer, BertTokenizerFast) )\r\n\r\n self.assertEqual(processor.image_processor.to_json_string() ,\t\t\t\t\timage_processor.to_json_string() )\r\n self.assertIsInstance(processor.image_processor ,\t\t\t\t\t__UpperCAmelCase )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __lowerCAmelCase\t\t\t\t\t( self :\t\t\t\t\t\tint ) ->Dict:\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n a\t\t = VisionTextDualEncoderProcessor(\r\n tokenizer=self.get_tokenizer() ,\t\t\t\t\timage_processor=self.get_image_processor() )\r\n processor.save_pretrained(self.tmpdirname )\r\n\r\n a\t\t = self.get_tokenizer(bos_token='''(BOS)''' ,\t\t\t\t\teos_token='''(EOS)''' )\r\n a\t\t = self.get_image_processor(do_normalize=__UpperCAmelCase ,\t\t\t\t\tpadding_value=1.0 )\r\n\r\n a\t\t = VisionTextDualEncoderProcessor.from_pretrained(\r\n self.tmpdirname ,\t\t\t\t\tbos_token='''(BOS)''' ,\t\t\t\t\teos_token='''(EOS)''' ,\t\t\t\t\tdo_normalize=__UpperCAmelCase ,\t\t\t\t\tpadding_value=1.0 )\r\n\r\n self.assertEqual(processor.tokenizer.get_vocab() ,\t\t\t\t\ttokenizer_add_kwargs.get_vocab() )\r\n self.assertIsInstance(processor.tokenizer ,\t\t\t\t\t(BertTokenizer, BertTokenizerFast) )\r\n\r\n self.assertEqual(processor.image_processor.to_json_string() ,\t\t\t\t\timage_processor_add_kwargs.to_json_string() )\r\n self.assertIsInstance(processor.image_processor ,\t\t\t\t\t__UpperCAmelCase )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __lowerCAmelCase\t\t\t\t\t( self :\t\t\t\t\t\tTuple ) ->Dict:\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n a\t\t = self.get_image_processor()\r\n a\t\t = self.get_tokenizer()\r\n\r\n a\t\t = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase ,\t\t\t\t\timage_processor=__UpperCAmelCase )\r\n\r\n a\t\t = self.prepare_image_inputs()\r\n\r\n a\t\t = image_processor(__UpperCAmelCase ,\t\t\t\t\treturn_tensors='''np''' )\r\n a\t\t = processor(images=__UpperCAmelCase ,\t\t\t\t\treturn_tensors='''np''' )\r\n\r\n for key in input_feat_extract.keys():\r\n self.assertAlmostEqual(input_feat_extract[key].sum() ,\t\t\t\t\tinput_processor[key].sum() ,\t\t\t\t\tdelta=1e-2 )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __lowerCAmelCase\t\t\t\t\t( self :\t\t\t\t\t\tList[str] ) ->str:\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n a\t\t = self.get_image_processor()\r\n a\t\t = self.get_tokenizer()\r\n\r\n a\t\t = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase ,\t\t\t\t\timage_processor=__UpperCAmelCase )\r\n\r\n a\t\t = '''lower newer'''\r\n\r\n a\t\t = processor(text=__UpperCAmelCase )\r\n\r\n a\t\t = tokenizer(__UpperCAmelCase )\r\n\r\n for key in encoded_tok.keys():\r\n self.assertListEqual(encoded_tok[key] ,\t\t\t\t\tencoded_processor[key] )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __lowerCAmelCase\t\t\t\t\t( self :\t\t\t\t\t\tList[Any] ) ->List[str]:\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n a\t\t = self.get_image_processor()\r\n a\t\t = self.get_tokenizer()\r\n\r\n a\t\t = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase ,\t\t\t\t\timage_processor=__UpperCAmelCase )\r\n\r\n a\t\t = '''lower newer'''\r\n a\t\t = self.prepare_image_inputs()\r\n\r\n a\t\t = processor(text=__UpperCAmelCase ,\t\t\t\t\timages=__UpperCAmelCase )\r\n\r\n self.assertListEqual(list(inputs.keys() ) ,\t\t\t\t\t['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )\r\n\r\n # test if it raises when no input is passed\r\n with self.assertRaises(__UpperCAmelCase ):\r\n processor()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __lowerCAmelCase\t\t\t\t\t( self :\t\t\t\t\t\tOptional[int] ) ->List[str]:\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n a\t\t = self.get_image_processor()\r\n a\t\t = self.get_tokenizer()\r\n\r\n a\t\t = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase ,\t\t\t\t\timage_processor=__UpperCAmelCase )\r\n\r\n a\t\t = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]\r\n\r\n a\t\t = processor.batch_decode(__UpperCAmelCase )\r\n a\t\t = tokenizer.batch_decode(__UpperCAmelCase )\r\n\r\n self.assertListEqual(__UpperCAmelCase ,\t\t\t\t\t__UpperCAmelCase )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __lowerCAmelCase\t\t\t\t\t( self :\t\t\t\t\t\tOptional[Any] ) ->Dict:\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n a\t\t = self.get_image_processor()\r\n a\t\t = self.get_tokenizer()\r\n\r\n a\t\t = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase ,\t\t\t\t\timage_processor=__UpperCAmelCase )\r\n\r\n a\t\t = '''lower newer'''\r\n a\t\t = self.prepare_image_inputs()\r\n\r\n a\t\t = processor(text=__UpperCAmelCase ,\t\t\t\t\timages=__UpperCAmelCase )\r\n\r\n self.assertListEqual(list(inputs.keys() ) ,\t\t\t\t\tprocessor.model_input_names )\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":0,"string":"0"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":867,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\"\"\"simple docstring\"\"\"\n\nfrom ...processing_utils import ProcessorMixin\n\n\n\n\n\nclass lowercase_\t(\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t\t\t):\n\n\n\n '''simple docstring'''\n UpperCAmelCase :\t\t\t\t\tTuple\t\t\t\t = ['''image_processor''', '''feature_extractor''']\n UpperCAmelCase :\t\t\t\t\tDict\t\t\t\t = '''TvltImageProcessor'''\n UpperCAmelCase :\t\t\t\t\tTuple\t\t\t\t = '''TvltFeatureExtractor'''\n\n def __init__(\t\t\tself\t:\tint ,\t\t\t\t\t\t\t_UpperCAmelCase\t:\tList[str] ,\t\t\t\t\t\t\t_UpperCAmelCase\t:\tstr\t\t):\n super().__init__(image_processor=_UpperCAmelCase ,\t\t\t\t\t\t\tfeature_extractor=_UpperCAmelCase\t\t)\n\n _A = image_processor\n _A = feature_extractor\n\n def __call__(\t\t\tself\t:\tList[Any] ,\t\t\t\t\t\t\t_UpperCAmelCase\t:\tstr=None ,\t\t\t\t\t\t\t_UpperCAmelCase\t:\tint=None ,\t\t\t\t\t\t\t_UpperCAmelCase\t:\tOptional[Any]=None ,\t\t\t\t\t\t\t_UpperCAmelCase\t:\tUnion[str, Any]=None ,\t\t\t\t\t\t\t_UpperCAmelCase\t:\tUnion[str, Any]=False ,\t\t\t\t\t\t\t_UpperCAmelCase\t:\tList[str]=False ,\t\t\t\t\t\t\t*_UpperCAmelCase\t:\tAny ,\t\t\t\t\t\t\t**_UpperCAmelCase\t:\tstr ,\t\t\t\t\t\t\t):\n\n if images is None and audio is None:\n raise ValueError('You need to specify either an `images` or `audio` input to process.'\t\t)\n\n _A = None\n if images is not None:\n _A = self.image_processor(_UpperCAmelCase ,\t\t\t\t\t\t\tmask_pixel=_UpperCAmelCase ,\t\t\t\t\t\t\t*_UpperCAmelCase ,\t\t\t\t\t\t\t**_UpperCAmelCase\t\t)\n if images_mixed is not None:\n _A = self.image_processor(_UpperCAmelCase ,\t\t\t\t\t\t\tis_mixed=_UpperCAmelCase ,\t\t\t\t\t\t\t*_UpperCAmelCase ,\t\t\t\t\t\t\t**_UpperCAmelCase\t\t)\n if audio is not None:\n _A = self.feature_extractor(\n _UpperCAmelCase ,\t\t\t\t\t\t\t*_UpperCAmelCase ,\t\t\t\t\t\t\tsampling_rate=_UpperCAmelCase ,\t\t\t\t\t\t\tmask_audio=_UpperCAmelCase ,\t\t\t\t\t\t\t**_UpperCAmelCase\t\t)\n\n _A = {}\n if audio is not None:\n output_dict.update(_UpperCAmelCase\t\t)\n if images is not None:\n output_dict.update(_UpperCAmelCase\t\t)\n if images_mixed_dict is not None:\n output_dict.update(_UpperCAmelCase\t\t)\n return output_dict\n\n\n\n @property\n def lowerCAmelCase_\t\t\t\t\t\t\t(\t\t\tself\t:\tAny\t\t):\n _A = self.image_processor.model_input_names\n _A = self.feature_extractor.model_input_names\n return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names\t\t)\t\t)\n\n"},"code_codestyle":{"kind":"number","value":370,"string":"370"},"style_context":{"kind":"string","value":"\n\n\n\n\n\"\"\"simple docstring\"\"\"\n\nimport argparse\nimport json\nfrom typing import List\n\nfrom ltp import LTP\n\nfrom transformers import BertTokenizer\n\n\n\n\n\n\ndef \t\t\t\t\t\t_snake_case\t\t\t\t\t( _snake_case\t\t\t\t\t\t\t: Dict\t\t\t\t\t\t\t) -> Any:\n\n\n\n\n\n\n '''simple docstring'''\n\n\n if (\n (cp >= 0X4e00 and cp <= 0X9fff)\n or (cp >= 0X3400 and cp <= 0X4dbf) #\n or (cp >= 0X2_0000 and cp <= 0X2_a6df) #\n or (cp >= 0X2_a700 and cp <= 0X2_b73f) #\n or (cp >= 0X2_b740 and cp <= 0X2_b81f) #\n or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #\n or (cp >= 0Xf900 and cp <= 0Xfaff)\n or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #\n ): #\n return True\n\n return False\n\n\n\n\n\n\ndef \t\t\t\t\t\t_snake_case\t\t\t\t\t( _snake_case\t\t\t\t\t\t\t: str\t\t\t\t\t\t\t) -> Tuple:\n\n\n\n\n\n\n '''simple docstring'''\n\n\n for char in word:\n _A = ord(_snake_case\t\t\t\t\t\t\t)\n if not _is_chinese_char(_snake_case\t\t\t\t\t\t\t):\n return 0\n return 1\n\n\n\n\n\n\ndef \t\t\t\t\t\t_snake_case\t\t\t\t\t( _snake_case\t\t\t\t\t\t\t: List[str]\t\t\t\t\t\t\t) -> Optional[Any]:\n\n\n\n\n\n\n '''simple docstring'''\n\n\n _A = set()\n\n for token in tokens:\n _A = len(_snake_case\t\t\t\t\t\t\t) > 1 and is_chinese(_snake_case\t\t\t\t\t\t\t)\n if chinese_word:\n word_set.add(_snake_case\t\t\t\t\t\t\t)\n _A = list(_snake_case\t\t\t\t\t\t\t)\n return word_list\n\n\n\n\n\n\ndef \t\t\t\t\t\t_snake_case\t\t\t\t\t( _snake_case\t\t\t\t\t\t\t: List[str] , _snake_case\t\t\t\t\t\t\t: set()\t\t\t\t\t\t\t) -> Optional[Any]:\n\n\n\n\n\n\n '''simple docstring'''\n\n\n if not chinese_word_set:\n return bert_tokens\n _A = max([len(_snake_case\t\t\t\t\t\t\t) for w in chinese_word_set]\t\t\t\t\t\t\t)\n\n _A = bert_tokens\n _A , _A = 0, len(_snake_case\t\t\t\t\t\t\t)\n while start < end:\n _A = True\n if is_chinese(bert_word[start]\t\t\t\t\t\t\t):\n _A = min(end - start , _snake_case\t\t\t\t\t\t\t)\n for i in range(_snake_case , 1 , -1\t\t\t\t\t\t\t):\n _A = ''.join(bert_word[start : start + i]\t\t\t\t\t\t\t)\n if whole_word in chinese_word_set:\n for j in range(start + 1 , start + i\t\t\t\t\t\t\t):\n _A = '##' + bert_word[j]\n _A = start + i\n _A = False\n break\n if single_word:\n start += 1\n return bert_word\n\n\n\n\n\n\ndef \t\t\t\t\t\t_snake_case\t\t\t\t\t( _snake_case\t\t\t\t\t\t\t: List[str] , _snake_case\t\t\t\t\t\t\t: LTP , _snake_case\t\t\t\t\t\t\t: BertTokenizer\t\t\t\t\t\t\t) -> str:\n\n\n\n\n\n\n '''simple docstring'''\n\n\n _A = []\n\n for i in range(0 , len(_snake_case\t\t\t\t\t\t\t) , 1_00\t\t\t\t\t\t\t):\n _A = ltp_tokenizer.seg(lines[i : i + 1_00]\t\t\t\t\t\t\t)[0]\n _A = [get_chinese_word(_snake_case\t\t\t\t\t\t\t) for r in res]\n ltp_res.extend(_snake_case\t\t\t\t\t\t\t)\n assert len(_snake_case\t\t\t\t\t\t\t) == len(_snake_case\t\t\t\t\t\t\t)\n\n _A = []\n for i in range(0 , len(_snake_case\t\t\t\t\t\t\t) , 1_00\t\t\t\t\t\t\t):\n _A = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=_snake_case , truncation=_snake_case , max_length=5_12\t\t\t\t\t\t\t)\n bert_res.extend(res['input_ids']\t\t\t\t\t\t\t)\n assert len(_snake_case\t\t\t\t\t\t\t) == len(_snake_case\t\t\t\t\t\t\t)\n\n _A = []\n for input_ids, chinese_word in zip(_snake_case , _snake_case\t\t\t\t\t\t\t):\n _A = []\n for id in input_ids:\n _A = bert_tokenizer._convert_id_to_token(_snake_case\t\t\t\t\t\t\t)\n input_tokens.append(_snake_case\t\t\t\t\t\t\t)\n _A = add_sub_symbol(_snake_case , _snake_case\t\t\t\t\t\t\t)\n _A = []\n # We only save pos of chinese subwords start with ##, which mean is part of a whole word.\n for i, token in enumerate(_snake_case\t\t\t\t\t\t\t):\n if token[:2] == \"##\":\n _A = token[2:]\n # save chinese tokens' pos\n if len(_snake_case\t\t\t\t\t\t\t) == 1 and _is_chinese_char(ord(_snake_case\t\t\t\t\t\t\t)\t\t\t\t\t\t\t):\n ref_id.append(_snake_case\t\t\t\t\t\t\t)\n ref_ids.append(_snake_case\t\t\t\t\t\t\t)\n\n assert len(_snake_case\t\t\t\t\t\t\t) == len(_snake_case\t\t\t\t\t\t\t)\n\n return ref_ids\n\n\n\n\n\n\ndef \t\t\t\t\t\t_snake_case\t\t\t\t\t( _snake_case\t\t\t\t\t\t\t: List[str]\t\t\t\t\t\t\t) -> Dict:\n\n\n\n\n\n\n '''simple docstring'''\n\n\n with open(args.file_name , 'r' , encoding='utf-8'\t\t\t\t\t\t\t) as f:\n _A = f.readlines()\n _A = [line.strip() for line in data if len(_snake_case\t\t\t\t\t\t\t) > 0 and not line.isspace()] # avoid delimiter like '\\u2029'\n _A = LTP(args.ltp\t\t\t\t\t\t\t) # faster in GPU device\n _A = BertTokenizer.from_pretrained(args.bert\t\t\t\t\t\t\t)\n\n _A = prepare_ref(_snake_case , _snake_case , _snake_case\t\t\t\t\t\t\t)\n\n with open(args.save_path , 'w' , encoding='utf-8'\t\t\t\t\t\t\t) as f:\n _A = [json.dumps(_snake_case\t\t\t\t\t\t\t) + '\\n' for ref in ref_ids]\n f.writelines(_snake_case\t\t\t\t\t\t\t)\n\n\nif __name__ == \"__main__\":\n a = argparse.ArgumentParser(description='''prepare_chinese_ref''')\n parser.add_argument(\n '''--file_name''',\n type=str,\n default='''./resources/chinese-demo.txt''',\n help='''file need process, same as training data in lm''',\n )\n parser.add_argument(\n '''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''\n )\n parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')\n parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')\n\n a = parser.parse_args()\n main(args)\n\n"},"style_context_codestyle":{"kind":"number","value":271,"string":"271"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":868,"cells":{"code":{"kind":"string","value":"\r\r\rimport inspect\rimport unittest\r\rfrom transformers import RegNetConfig\rfrom transformers.file_utils import cached_property, is_torch_available, is_vision_available\rfrom transformers.testing_utils import require_torch, require_vision, slow, torch_device\r\rfrom ...test_configuration_common import ConfigTester\rfrom ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor\rfrom ...test_pipeline_mixin import PipelineTesterMixin\r\r\rif is_torch_available():\r import torch\r from torch import nn\r\r from transformers import RegNetForImageClassification, RegNetModel\r from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST\r\r\rif is_vision_available():\r from PIL import Image\r\r from transformers import AutoImageProcessor\r\r\r\r\r\r\rclass _lowerCamelCase :\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r def __init__( self\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t, _SCREAMING_SNAKE_CASE=3\t\t\t\t, _SCREAMING_SNAKE_CASE=32\t\t\t\t, _SCREAMING_SNAKE_CASE=3\t\t\t\t, _SCREAMING_SNAKE_CASE=10\t\t\t\t, _SCREAMING_SNAKE_CASE=[10, 20, 30, 40]\t\t\t\t, _SCREAMING_SNAKE_CASE=[1, 1, 2, 1]\t\t\t\t, _SCREAMING_SNAKE_CASE=True\t\t\t\t, _SCREAMING_SNAKE_CASE=True\t\t\t\t, _SCREAMING_SNAKE_CASE=\"relu\"\t\t\t\t, _SCREAMING_SNAKE_CASE=3\t\t\t\t, _SCREAMING_SNAKE_CASE=None\t\t\t\t, )->List[str]:\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r A_ :\t\t\t\tstr\t\t =\t\t\t\t\tparent\r A_ :\t\t\t\tint\t\t =\t\t\t\t\tbatch_size\r A_ :\t\t\t\tList[str]\t\t =\t\t\t\t\timage_size\r A_ :\t\t\t\tDict\t\t =\t\t\t\t\tnum_channels\r A_ :\t\t\t\tTuple\t\t =\t\t\t\t\tembeddings_size\r A_ :\t\t\t\tUnion[str, Any]\t\t =\t\t\t\t\thidden_sizes\r A_ :\t\t\t\tDict\t\t =\t\t\t\t\tdepths\r A_ :\t\t\t\tstr\t\t =\t\t\t\t\tis_training\r A_ :\t\t\t\tUnion[str, Any]\t\t =\t\t\t\t\tuse_labels\r A_ :\t\t\t\tUnion[str, Any]\t\t =\t\t\t\t\thidden_act\r A_ :\t\t\t\tOptional[Any]\t\t =\t\t\t\t\tnum_labels\r A_ :\t\t\t\tTuple\t\t =\t\t\t\t\tscope\r A_ :\t\t\t\tOptional[int]\t\t =\t\t\t\t\tlen(_SCREAMING_SNAKE_CASE\t\t)\r\r\r\r def _snake_case ( self\t\t)->Optional[Any]:\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r A_ :\t\t\t\tList[str]\t\t =\t\t\t\t\tfloats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]\t\t)\r\r A_ :\t\t\t\tstr\t\t =\t\t\t\t\tNone\r if self.use_labels:\r A_ :\t\t\t\tUnion[str, Any]\t\t =\t\t\t\t\tids_tensor([self.batch_size]\t\t\t\t, self.num_labels\t\t)\r\r A_ :\t\t\t\tOptional[Any]\t\t =\t\t\t\t\tself.get_config()\r\r return config, pixel_values, labels\r\r\r\r def _snake_case ( self\t\t)->Union[str, Any]:\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r return RegNetConfig(\r num_channels=self.num_channels\t\t\t\t, embeddings_size=self.embeddings_size\t\t\t\t, hidden_sizes=self.hidden_sizes\t\t\t\t, depths=self.depths\t\t\t\t, hidden_act=self.hidden_act\t\t\t\t, num_labels=self.num_labels\t\t\t\t, )\r\r\r\r def _snake_case ( self\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t)->Union[str, Any]:\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r A_ :\t\t\t\tDict\t\t =\t\t\t\t\tRegNetModel(config=_SCREAMING_SNAKE_CASE\t\t)\r model.to(_SCREAMING_SNAKE_CASE\t\t)\r model.eval()\r A_ :\t\t\t\tAny\t\t =\t\t\t\t\tmodel(_SCREAMING_SNAKE_CASE\t\t)\r # expected last hidden states: B, C, H // 32, W // 32\r self.parent.assertEqual(\r result.last_hidden_state.shape\t\t\t\t, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32)\t\t\t\t, )\r\r\r\r def _snake_case ( self\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t)->Union[str, Any]:\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r A_ :\t\t\t\tUnion[str, Any]\t\t =\t\t\t\t\tself.num_labels\r A_ :\t\t\t\tDict\t\t =\t\t\t\t\tRegNetForImageClassification(_SCREAMING_SNAKE_CASE\t\t)\r model.to(_SCREAMING_SNAKE_CASE\t\t)\r model.eval()\r A_ :\t\t\t\tint\t\t =\t\t\t\t\tmodel(_SCREAMING_SNAKE_CASE\t\t\t\t, labels=_SCREAMING_SNAKE_CASE\t\t)\r self.parent.assertEqual(result.logits.shape\t\t\t\t, (self.batch_size, self.num_labels)\t\t)\r\r\r\r\r\r\r\r def _snake_case ( self\t\t)->Union[str, Any]:\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r A_ :\t\t\t\tTuple\t\t =\t\t\t\t\tself.prepare_config_and_inputs()\r A_ , A_ , A_ :\t\t\t\tstr\t\t =\t\t\t\t\tconfig_and_inputs\r A_ :\t\t\t\tAny\t\t =\t\t\t\t\t{'''pixel_values''': pixel_values}\r return config, inputs_dict\r\r\r\r\r\r\r@require_torch\rclass _lowerCamelCase (\t\t\t\t\tUpperCamelCase\t\t\t\t\t,\t\t\t\t\t\t\tUpperCamelCase\t\t\t\t\t,\t\t\t\t\t\t\tunittest.TestCase\t\t\t):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r snake_case = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()\r snake_case = (\r {\"feature-extraction\": RegNetModel, \"image-classification\": RegNetForImageClassification}\r if is_torch_available()\r else {}\r )\r\r snake_case = False\r snake_case = False\r snake_case = False\r snake_case = False\r\r\r\r def _snake_case ( self\t\t)->Union[str, Any]:\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r A_ :\t\t\t\tUnion[str, Any]\t\t =\t\t\t\t\tRegNetModelTester(self\t\t)\r A_ :\t\t\t\tUnion[str, Any]\t\t =\t\t\t\t\tConfigTester(self\t\t\t\t, config_class=_SCREAMING_SNAKE_CASE\t\t\t\t, has_text_modality=_SCREAMING_SNAKE_CASE\t\t)\r\r\r\r def _snake_case ( self\t\t)->Dict:\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r self.create_and_test_config_common_properties()\r self.config_tester.create_and_test_config_to_json_string()\r self.config_tester.create_and_test_config_to_json_file()\r self.config_tester.create_and_test_config_from_and_save_pretrained()\r self.config_tester.create_and_test_config_with_num_labels()\r self.config_tester.check_config_can_be_init_without_params()\r self.config_tester.check_config_arguments_init()\r\r\r\r def _snake_case ( self\t\t)->Tuple:\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r return\r\r\r\r @unittest.skip(reason='''RegNet does not use inputs_embeds'''\t\t)\r def _snake_case ( self\t\t)->Dict:\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r pass\r\r\r\r @unittest.skip(reason='''RegNet does not support input and output embeddings'''\t\t)\r def _snake_case ( self\t\t)->str:\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r pass\r\r\r\r def _snake_case ( self\t\t)->List[Any]:\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r A_ , A_ :\t\t\t\tOptional[Any]\t\t =\t\t\t\t\tself.model_tester.prepare_config_and_inputs_for_common()\r\r for model_class in self.all_model_classes:\r A_ :\t\t\t\tstr\t\t =\t\t\t\t\tmodel_class(_SCREAMING_SNAKE_CASE\t\t)\r A_ :\t\t\t\tUnion[str, Any]\t\t =\t\t\t\t\tinspect.signature(model.forward\t\t)\r # signature.parameters is an OrderedDict => so arg_names order is deterministic\r A_ :\t\t\t\tAny\t\t =\t\t\t\t\t[*signature.parameters.keys()]\r\r A_ :\t\t\t\tAny\t\t =\t\t\t\t\t['''pixel_values''']\r self.assertListEqual(arg_names[:1]\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t)\r\r\r\r def _snake_case ( self\t\t)->Any:\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r A_ :\t\t\t\tDict\t\t =\t\t\t\t\tself.model_tester.prepare_config_and_inputs()\r self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE\t\t)\r\r\r\r def _snake_case ( self\t\t)->Optional[Any]:\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r A_ , A_ :\t\t\t\tint\t\t =\t\t\t\t\tself.model_tester.prepare_config_and_inputs_for_common()\r\r for model_class in self.all_model_classes:\r A_ :\t\t\t\tUnion[str, Any]\t\t =\t\t\t\t\tmodel_class(config=_SCREAMING_SNAKE_CASE\t\t)\r for name, module in model.named_modules():\r if isinstance(_SCREAMING_SNAKE_CASE\t\t\t\t, (nn.BatchNormad, nn.GroupNorm)\t\t):\r self.assertTrue(\r torch.all(module.weight == 1\t\t)\t\t\t\t, msg=F'''Parameter {name} of model {model_class} seems not properly initialized'''\t\t\t\t, )\r self.assertTrue(\r torch.all(module.bias == 0\t\t)\t\t\t\t, msg=F'''Parameter {name} of model {model_class} seems not properly initialized'''\t\t\t\t, )\r\r\r\r def _snake_case ( self\t\t)->List[Any]:\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r def check_hidden_states_output(_SCREAMING_SNAKE_CASE\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t):\r A_ :\t\t\t\tstr\t\t =\t\t\t\t\tmodel_class(_SCREAMING_SNAKE_CASE\t\t)\r model.to(_SCREAMING_SNAKE_CASE\t\t)\r model.eval()\r\r with torch.no_grad():\r A_ :\t\t\t\tTuple\t\t =\t\t\t\t\tmodel(**self._prepare_for_class(_SCREAMING_SNAKE_CASE\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t)\t\t)\r\r A_ :\t\t\t\tUnion[str, Any]\t\t =\t\t\t\t\toutputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states\r\r A_ :\t\t\t\tOptional[int]\t\t =\t\t\t\t\tself.model_tester.num_stages\r self.assertEqual(len(_SCREAMING_SNAKE_CASE\t\t)\t\t\t\t, expected_num_stages + 1\t\t)\r\r # RegNet's feature maps are of shape (batch_size, num_channels, height, width)\r self.assertListEqual(\r list(hidden_states[0].shape[-2:]\t\t)\t\t\t\t, [self.model_tester.image_size // 2, self.model_tester.image_size // 2]\t\t\t\t, )\r\r A_ , A_ :\t\t\t\tTuple\t\t =\t\t\t\t\tself.model_tester.prepare_config_and_inputs_for_common()\r A_ :\t\t\t\tint\t\t =\t\t\t\t\t['''basic''', '''bottleneck''']\r for model_class in self.all_model_classes:\r for layer_type in layers_type:\r A_ :\t\t\t\tint\t\t =\t\t\t\t\tlayer_type\r A_ :\t\t\t\tList[Any]\t\t =\t\t\t\t\tTrue\r check_hidden_states_output(_SCREAMING_SNAKE_CASE\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t)\r\r # check that output_hidden_states also work using config\r del inputs_dict[\"output_hidden_states\"]\r A_ :\t\t\t\tstr\t\t =\t\t\t\t\tTrue\r\r check_hidden_states_output(_SCREAMING_SNAKE_CASE\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t)\r\r\r\r def _snake_case ( self\t\t)->Dict:\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r A_ :\t\t\t\tList[str]\t\t =\t\t\t\t\tself.model_tester.prepare_config_and_inputs()\r self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE\t\t)\r\r\r\r\r\r\r\r @slow\r def _snake_case ( self\t\t)->str:\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\r A_ :\t\t\t\tDict\t\t =\t\t\t\t\tRegNetModel.from_pretrained(_SCREAMING_SNAKE_CASE\t\t)\r self.assertIsNotNone(_SCREAMING_SNAKE_CASE\t\t)\r\r\r\r\rdef _SCREAMING_SNAKE_CASE\t( ):\r A_ :\t\t\t\tint\t\t =\t\t\t\t\tImage.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )\r return image\r\r\r\r\r\r\r\r@require_torch\r@require_vision\rclass _lowerCamelCase (\t\t\t\t\tunittest.TestCase\t\t\t):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r @cached_property\r def _snake_case ( self\t\t)->List[str]:\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r return (\r AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]\t\t)\r if is_vision_available()\r else None\r )\r\r\r\r\r\r\r\r @slow\r def _snake_case ( self\t\t)->Tuple:\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r A_ :\t\t\t\tList[Any]\t\t =\t\t\t\t\tRegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]\t\t).to(_SCREAMING_SNAKE_CASE\t\t)\r\r A_ :\t\t\t\tOptional[Any]\t\t =\t\t\t\t\tself.default_image_processor\r A_ :\t\t\t\tAny\t\t =\t\t\t\t\tprepare_img()\r A_ :\t\t\t\tOptional[Any]\t\t =\t\t\t\t\timage_processor(images=_SCREAMING_SNAKE_CASE\t\t\t\t, return_tensors='''pt'''\t\t).to(_SCREAMING_SNAKE_CASE\t\t)\r\r # forward pass\r with torch.no_grad():\r A_ :\t\t\t\tUnion[str, Any]\t\t =\t\t\t\t\tmodel(**_SCREAMING_SNAKE_CASE\t\t)\r\r # verify the logits\r A_ :\t\t\t\tUnion[str, Any]\t\t =\t\t\t\t\ttorch.Size((1, 1000)\t\t)\r self.assertEqual(outputs.logits.shape\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t)\r\r A_ :\t\t\t\tOptional[int]\t\t =\t\t\t\t\ttorch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6]\t\t).to(_SCREAMING_SNAKE_CASE\t\t)\r\r self.assertTrue(torch.allclose(outputs.logits[0, :3]\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t, atol=1e-4\t\t)\t\t)\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":186,"string":"186"},"style_context":{"kind":"string","value":"\r\r\rimport argparse\rimport pathlib\r\rimport fairseq\rimport torch\rfrom fairseq.models.roberta import RobertaModel as FairseqRobertaModel\rfrom fairseq.modules import TransformerSentenceEncoderLayer\rfrom packaging import version\r\rfrom transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification\rfrom transformers.models.bert.modeling_bert import (\r BertIntermediate,\r BertLayer,\r BertOutput,\r BertSelfAttention,\r BertSelfOutput,\r)\rfrom transformers.models.roberta.modeling_roberta import RobertaAttention\rfrom transformers.utils import logging\r\r\rif version.parse(fairseq.__version__) < version.parse(\"\"\"1.0.0a\"\"\"):\r raise Exception(\"\"\"requires fairseq >= 1.0.0a\"\"\")\r\rlogging.set_verbosity_info()\rUpperCamelCase\t\t\t\t\t\t\t\t=\tlogging.get_logger(__name__)\r\rUpperCamelCase\t\t\t\t\t\t\t\t=\t\"\"\"Hello world! cécé herlolip\"\"\"\r\r\rdef _SCREAMING_SNAKE_CASE\t( SCREAMING_SNAKE_CASE ,\t\t\tSCREAMING_SNAKE_CASE ,\t\t\tSCREAMING_SNAKE_CASE ):\r A_ :\t\t\t\tList[Any]\t\t =\t\t\t\t\tFairseqRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE )\r roberta.eval() # disable dropout\r A_ :\t\t\t\tDict\t\t =\t\t\t\t\troberta.model.encoder.sentence_encoder\r A_ :\t\t\t\tOptional[Any]\t\t =\t\t\t\t\tXLMRobertaConfig(\r vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,\t\t\thidden_size=roberta.cfg.model.encoder_embed_dim ,\t\t\tnum_hidden_layers=roberta.cfg.model.encoder_layers ,\t\t\tnum_attention_heads=roberta.cfg.model.encoder_attention_heads ,\t\t\tintermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,\t\t\tmax_position_embeddings=514 ,\t\t\ttype_vocab_size=1 ,\t\t\tlayer_norm_eps=1e-5 ,\t\t\t)\r if classification_head:\r A_ :\t\t\t\tOptional[int]\t\t =\t\t\t\t\troberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]\r\r print('''Our RoBERTa config:''' ,\t\t\tSCREAMING_SNAKE_CASE )\r\r A_ :\t\t\t\tList[str]\t\t =\t\t\t\t\tXLMRobertaXLForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(SCREAMING_SNAKE_CASE )\r model.eval()\r\r # Now let's copy all the weights.\r # Embeddings\r A_ :\t\t\t\tstr\t\t =\t\t\t\t\troberta_sent_encoder.embed_tokens.weight\r A_ :\t\t\t\tint\t\t =\t\t\t\t\troberta_sent_encoder.embed_positions.weight\r A_ :\t\t\t\tUnion[str, Any]\t\t =\t\t\t\t\ttorch.zeros_like(\r model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.\r\r A_ :\t\t\t\tUnion[str, Any]\t\t =\t\t\t\t\troberta_sent_encoder.layer_norm.weight\r A_ :\t\t\t\tint\t\t =\t\t\t\t\troberta_sent_encoder.layer_norm.bias\r\r for i in range(config.num_hidden_layers ):\r # Encoder: start of layer\r A_ :\t\t\t\tBertLayer\t\t =\t\t\t\t\tmodel.roberta.encoder.layer[i]\r A_ :\t\t\t\tTransformerSentenceEncoderLayer\t\t =\t\t\t\t\troberta_sent_encoder.layers[i]\r\r A_ :\t\t\t\tRobertaAttention\t\t =\t\t\t\t\tlayer.attention\r A_ :\t\t\t\tDict\t\t =\t\t\t\t\troberta_layer.self_attn_layer_norm.weight\r A_ :\t\t\t\tstr\t\t =\t\t\t\t\troberta_layer.self_attn_layer_norm.bias\r\r # self attention\r A_ :\t\t\t\tBertSelfAttention\t\t =\t\t\t\t\tlayer.attention.self\r assert (\r roberta_layer.self_attn.k_proj.weight.data.shape\r == roberta_layer.self_attn.q_proj.weight.data.shape\r == roberta_layer.self_attn.v_proj.weight.data.shape\r == torch.Size((config.hidden_size, config.hidden_size) )\r )\r\r A_ :\t\t\t\tstr\t\t =\t\t\t\t\troberta_layer.self_attn.q_proj.weight\r A_ :\t\t\t\tList[str]\t\t =\t\t\t\t\troberta_layer.self_attn.q_proj.bias\r A_ :\t\t\t\tint\t\t =\t\t\t\t\troberta_layer.self_attn.k_proj.weight\r A_ :\t\t\t\tList[Any]\t\t =\t\t\t\t\troberta_layer.self_attn.k_proj.bias\r A_ :\t\t\t\tDict\t\t =\t\t\t\t\troberta_layer.self_attn.v_proj.weight\r A_ :\t\t\t\tint\t\t =\t\t\t\t\troberta_layer.self_attn.v_proj.bias\r\r # self-attention output\r A_ :\t\t\t\tBertSelfOutput\t\t =\t\t\t\t\tlayer.attention.output\r assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape\r A_ :\t\t\t\tAny\t\t =\t\t\t\t\troberta_layer.self_attn.out_proj.weight\r A_ :\t\t\t\tOptional[Any]\t\t =\t\t\t\t\troberta_layer.self_attn.out_proj.bias\r\r # this one is final layer norm\r A_ :\t\t\t\tAny\t\t =\t\t\t\t\troberta_layer.final_layer_norm.weight\r A_ :\t\t\t\tint\t\t =\t\t\t\t\troberta_layer.final_layer_norm.bias\r\r # intermediate\r A_ :\t\t\t\tBertIntermediate\t\t =\t\t\t\t\tlayer.intermediate\r assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape\r A_ :\t\t\t\tint\t\t =\t\t\t\t\troberta_layer.fca.weight\r A_ :\t\t\t\tList[str]\t\t =\t\t\t\t\troberta_layer.fca.bias\r\r # output\r A_ :\t\t\t\tBertOutput\t\t =\t\t\t\t\tlayer.output\r assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape\r A_ :\t\t\t\tOptional[int]\t\t =\t\t\t\t\troberta_layer.fca.weight\r A_ :\t\t\t\tList[Any]\t\t =\t\t\t\t\troberta_layer.fca.bias\r # end of layer\r\r if classification_head:\r A_ :\t\t\t\tstr\t\t =\t\t\t\t\troberta.model.classification_heads['''mnli'''].dense.weight\r A_ :\t\t\t\tint\t\t =\t\t\t\t\troberta.model.classification_heads['''mnli'''].dense.bias\r A_ :\t\t\t\tstr\t\t =\t\t\t\t\troberta.model.classification_heads['''mnli'''].out_proj.weight\r A_ :\t\t\t\tDict\t\t =\t\t\t\t\troberta.model.classification_heads['''mnli'''].out_proj.bias\r else:\r # LM Head\r A_ :\t\t\t\tint\t\t =\t\t\t\t\troberta.model.encoder.lm_head.dense.weight\r A_ :\t\t\t\tList[str]\t\t =\t\t\t\t\troberta.model.encoder.lm_head.dense.bias\r A_ :\t\t\t\tOptional[Any]\t\t =\t\t\t\t\troberta.model.encoder.lm_head.layer_norm.weight\r A_ :\t\t\t\tint\t\t =\t\t\t\t\troberta.model.encoder.lm_head.layer_norm.bias\r A_ :\t\t\t\tOptional[int]\t\t =\t\t\t\t\troberta.model.encoder.lm_head.weight\r A_ :\t\t\t\tDict\t\t =\t\t\t\t\troberta.model.encoder.lm_head.bias\r\r # Let's check that we get the same results.\r A_ :\t\t\t\ttorch.Tensor\t\t =\t\t\t\t\troberta.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1\r\r A_ :\t\t\t\tUnion[str, Any]\t\t =\t\t\t\t\tmodel(SCREAMING_SNAKE_CASE )[0]\r if classification_head:\r A_ :\t\t\t\tstr\t\t =\t\t\t\t\troberta.model.classification_heads['''mnli'''](roberta.extract_features(SCREAMING_SNAKE_CASE ) )\r else:\r A_ :\t\t\t\tint\t\t =\t\t\t\t\troberta.model(SCREAMING_SNAKE_CASE )[0]\r print(our_output.shape ,\t\t\ttheir_output.shape )\r A_ :\t\t\t\tAny\t\t =\t\t\t\t\ttorch.max(torch.abs(our_output - their_output ) ).item()\r print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7\r A_ :\t\t\t\tTuple\t\t =\t\t\t\t\ttorch.allclose(SCREAMING_SNAKE_CASE ,\t\t\tSCREAMING_SNAKE_CASE ,\t\t\tatol=1e-3 )\r print('''Do both models output the same tensors?''' ,\t\t\t'''🔥''' if success else '''💩''' )\r if not success:\r raise Exception('''Something went wRoNg''' )\r\r pathlib.Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE ,\t\t\texist_ok=SCREAMING_SNAKE_CASE )\r print(f'''Saving model to {pytorch_dump_folder_path}''' )\r model.save_pretrained(SCREAMING_SNAKE_CASE )\r\r\rif __name__ == \"__main__\":\r UpperCamelCase\t\t\t\t\t\t\t\t=\targparse.ArgumentParser()\r # Required parameters\r parser.add_argument(\r \"\"\"--roberta_checkpoint_path\"\"\", default=None, type=str, required=True, help=\"\"\"Path the official PyTorch dump.\"\"\"\r )\r parser.add_argument(\r \"\"\"--pytorch_dump_folder_path\"\"\", default=None, type=str, required=True, help=\"\"\"Path to the output PyTorch model.\"\"\"\r )\r parser.add_argument(\r \"\"\"--classification_head\"\"\", action=\"\"\"store_true\"\"\", help=\"\"\"Whether to convert a final classification head.\"\"\"\r )\r UpperCamelCase\t\t\t\t\t\t\t\t=\tparser.parse_args()\r convert_xlm_roberta_xl_checkpoint_to_pytorch(\r args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head\r )\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":186,"string":"186"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":869,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport torch\r\n\r\nfrom transformers import CamembertForMaskedLM, CamembertTokenizer\r\n\r\n\r\ndef _snake_case (\t\t\t\t\tA\t\t\t, A\t\t\t, A\t\t\t, A=5\t)\t\t\t\t\t\t-> List[str]:\r\n\t\t\t\t\t\t\t# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py\r\n\t\t\t\t\t\t\tassert masked_input.count(''''''\t) == 1\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = torch.tensor(tokenizer.encode(A\t\t\t, add_special_tokens=A\t)\t).unsqueeze(0\t) # Batch size 1\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = model(A\t)[0] # The last hidden-state is the first element of the output tuple\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = logits[0, masked_index, :]\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = logits.softmax(dim=0\t)\r\n\t\t\t\t\t\t\tlowerCAmelCase__ , lowerCAmelCase__\t = prob.topk(k=A\t\t\t, dim=0\t)\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = ''' '''.join(\r\n\t\t\t\t\t\t\t [tokenizer.convert_ids_to_tokens(indices[i].item()\t) for i in range(len(A\t)\t)]\t)\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = tokenizer.mask_token\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = []\r\n\t\t\t\t\t\t\tfor index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' '''\t)\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__\t = predicted_token_bpe.replace('''\\u2581'''\t\t\t, ''' '''\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif \" {0}\".format(A\t) in masked_input:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttopk_filled_outputs.append(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t (\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t masked_input.replace(''' {0}'''.format(A\t)\t\t\t, A\t),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t values[index].item(),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t predicted_token,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t )\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttopk_filled_outputs.append(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t (\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t masked_input.replace(A\t\t\t, A\t),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t values[index].item(),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t predicted_token,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t )\t)\r\n\t\t\t\t\t\t\treturn topk_filled_outputs\r\n\r\n\r\n__UpperCAmelCase =\t\t\t\t\t\t\tCamembertTokenizer.from_pretrained('''camembert-base''')\r\n__UpperCAmelCase =\t\t\t\t\t\t\tCamembertForMaskedLM.from_pretrained('''camembert-base''')\r\nmodel.eval()\r\n\r\n__UpperCAmelCase =\t\t\t\t\t\t\t'''Le camembert est :)'''\r\n\r\n\r\n\r\n\r\n\r\n\r\nprint(fill_mask(masked_input, model, tokenizer, topk=3))"},"code_codestyle":{"kind":"number","value":228,"string":"228"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport math\r\nfrom enum import Enum\r\nfrom typing import Optional, Union\r\n\r\nfrom torch.optim import Optimizer\r\nfrom torch.optim.lr_scheduler import LambdaLR\r\n\r\nfrom .utils import logging\r\n\r\n\r\n__UpperCAmelCase =\t\t\t\t\t\t\tlogging.get_logger(__name__)\r\n\r\n\r\n\r\n\r\nclass \t\ta__ (\t\t\t\t\t\t\ta__ ):\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tlowercase__ : int \t\t\t\t\t\t=\t\t\t\t\"linear\"\r\n\t\t\t\t\t\t\tlowercase__ : Any \t\t\t\t\t\t=\t\t\t\t\"cosine\"\r\n\t\t\t\t\t\t\tlowercase__ : Optional[int] \t\t\t\t\t\t=\t\t\t\t\"cosine_with_restarts\"\r\n\t\t\t\t\t\t\tlowercase__ : Optional[Any] \t\t\t\t\t\t=\t\t\t\t\"polynomial\"\r\n\t\t\t\t\t\t\tlowercase__ : Tuple \t\t\t\t\t\t=\t\t\t\t\"constant\"\r\n\t\t\t\t\t\t\tlowercase__ : Optional[int] \t\t\t\t\t\t=\t\t\t\t\"constant_with_warmup\"\r\n\t\t\t\t\t\t\tlowercase__ : Optional[int] \t\t\t\t\t\t=\t\t\t\t\"piecewise_constant\"\r\n\r\n\r\ndef _snake_case (\t\t\t\t\tA\t\t\t, A = -1\t)\t\t\t\t\t\t-> Any:\r\n\t\t\t\t\t\t\treturn LambdaLR(A\t\t\t, lambda A\t: 1\t\t\t, last_epoch=A\t)\r\n\r\n\r\ndef _snake_case (\t\t\t\t\tA\t\t\t, A\t\t\t, A = -1\t)\t\t\t\t\t\t-> Optional[Any]:\r\n\r\n\t\t\t\t\t\t\tdef lr_lambda(A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif current_step < num_warmup_steps:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn float(A\t) / float(max(1.0\t\t\t, A\t)\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn 1.0\r\n\r\n\t\t\t\t\t\t\treturn LambdaLR(A\t\t\t, A\t\t\t, last_epoch=A\t)\r\n\r\n\r\ndef _snake_case (\t\t\t\t\tA\t\t\t, A\t\t\t, A = -1\t)\t\t\t\t\t\t-> Union[str, Any]:\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = {}\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = step_rules.split(''','''\t)\r\n\t\t\t\t\t\t\tfor rule_str in rule_list[:-1]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__ , lowerCAmelCase__\t = rule_str.split(''':'''\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__\t = int(A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__\t = float(A\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__\t = value\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = float(rule_list[-1]\t)\r\n\r\n\t\t\t\t\t\t\tdef create_rules_function(A\t\t\t, A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tdef rule_func(A\t) -> float:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__\t = sorted(rules_dict.keys()\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor i, sorted_step in enumerate(A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif steps < sorted_step:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn rules_dict[sorted_steps[i]]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn last_lr_multiple\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn rule_func\r\n\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = create_rules_function(A\t\t\t, A\t)\r\n\r\n\t\t\t\t\t\t\treturn LambdaLR(A\t\t\t, A\t\t\t, last_epoch=A\t)\r\n\r\n\r\ndef _snake_case (\t\t\t\t\tA\t\t\t, A\t\t\t, A\t\t\t, A=-1\t)\t\t\t\t\t\t-> Optional[int]:\r\n\r\n\t\t\t\t\t\t\tdef lr_lambda(A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif current_step < num_warmup_steps:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn float(A\t) / float(max(1\t\t\t, A\t)\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn max(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t 0.0\t\t\t, float(num_training_steps - current_step\t) / float(max(1\t\t\t, num_training_steps - num_warmup_steps\t)\t)\t)\r\n\r\n\t\t\t\t\t\t\treturn LambdaLR(A\t\t\t, A\t\t\t, A\t)\r\n\r\n\r\ndef _snake_case (\t\t\t\t\tA\t\t\t, A\t\t\t, A\t\t\t, A = 0.5\t\t\t, A = -1\t)\t\t\t\t\t\t-> List[str]:\r\n\r\n\t\t\t\t\t\t\tdef lr_lambda(A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif current_step < num_warmup_steps:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn float(A\t) / float(max(1\t\t\t, A\t)\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__\t = float(current_step - num_warmup_steps\t) / float(max(1\t\t\t, num_training_steps - num_warmup_steps\t)\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn max(0.0\t\t\t, 0.5 * (1.0 + math.cos(math.pi * float(A\t) * 2.0 * progress\t))\t)\r\n\r\n\t\t\t\t\t\t\treturn LambdaLR(A\t\t\t, A\t\t\t, A\t)\r\n\r\n\r\ndef _snake_case (\t\t\t\t\tA\t\t\t, A\t\t\t, A\t\t\t, A = 1\t\t\t, A = -1\t)\t\t\t\t\t\t-> Union[str, Any]:\r\n\r\n\t\t\t\t\t\t\tdef lr_lambda(A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif current_step < num_warmup_steps:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn float(A\t) / float(max(1\t\t\t, A\t)\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__\t = float(current_step - num_warmup_steps\t) / float(max(1\t\t\t, num_training_steps - num_warmup_steps\t)\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif progress >= 1.0:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn 0.0\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn max(0.0\t\t\t, 0.5 * (1.0 + math.cos(math.pi * ((float(A\t) * progress) % 1.0)\t))\t)\r\n\r\n\t\t\t\t\t\t\treturn LambdaLR(A\t\t\t, A\t\t\t, A\t)\r\n\r\n\r\ndef _snake_case (\t\t\t\t\tA\t\t\t, A\t\t\t, A\t\t\t, A=1E-7\t\t\t, A=1.0\t\t\t, A=-1\t)\t\t\t\t\t\t-> Union[str, Any]:\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = optimizer.defaults['''lr''']\r\n\t\t\t\t\t\t\tif not (lr_init > lr_end):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(F\"\"\"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})\"\"\"\t)\r\n\r\n\t\t\t\t\t\t\tdef lr_lambda(A\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif current_step < num_warmup_steps:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn float(A\t) / float(max(1\t\t\t, A\t)\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telif current_step > num_training_steps:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn lr_end / lr_init # as LambdaLR multiplies by lr_init\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__\t = lr_init - lr_end\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__\t = num_training_steps - num_warmup_steps\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__\t = 1 - (current_step - num_warmup_steps) / decay_steps\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase__\t = lr_range * pct_remaining**power + lr_end\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn decay / lr_init # as LambdaLR multiplies by lr_init\r\n\r\n\t\t\t\t\t\t\treturn LambdaLR(A\t\t\t, A\t\t\t, A\t)\r\n\r\n\r\n__UpperCAmelCase =\t\t\t\t\t\t\t{\r\n SchedulerType.LINEAR: get_linear_schedule_with_warmup,\r\n SchedulerType.COSINE: get_cosine_schedule_with_warmup,\r\n SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,\r\n SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,\r\n SchedulerType.CONSTANT: get_constant_schedule,\r\n SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,\r\n SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,\r\n}\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef _snake_case (\t\t\t\t\tA\t\t\t, A\t\t\t, A = None\t\t\t, A = None\t\t\t, A = None\t\t\t, A = 1\t\t\t, A = 1.0\t\t\t, A = -1\t\t\t, )\t\t\t\t\t\t-> int:\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = SchedulerType(A\t)\r\n\t\t\t\t\t\t\tlowerCAmelCase__\t = TYPE_TO_SCHEDULER_FUNCTION[name]\r\n\t\t\t\t\t\t\tif name == SchedulerType.CONSTANT:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn schedule_func(A\t\t\t, last_epoch=A\t)\r\n\r\n\t\t\t\t\t\t\tif name == SchedulerType.PIECEWISE_CONSTANT:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn schedule_func(A\t\t\t, step_rules=A\t\t\t, last_epoch=A\t)\r\n\r\n\t\t\t\t\t\t\t# All other schedulers require `num_warmup_steps`\r\n\t\t\t\t\t\t\tif num_warmup_steps is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(F\"\"\"{name} requires `num_warmup_steps`, please provide that argument.\"\"\"\t)\r\n\r\n\t\t\t\t\t\t\tif name == SchedulerType.CONSTANT_WITH_WARMUP:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn schedule_func(A\t\t\t, num_warmup_steps=A\t\t\t, last_epoch=A\t)\r\n\r\n\t\t\t\t\t\t\t# All other schedulers require `num_training_steps`\r\n\t\t\t\t\t\t\tif num_training_steps is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(F\"\"\"{name} requires `num_training_steps`, please provide that argument.\"\"\"\t)\r\n\r\n\t\t\t\t\t\t\tif name == SchedulerType.COSINE_WITH_RESTARTS:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn schedule_func(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t A\t\t\t, num_warmup_steps=A\t\t\t, num_training_steps=A\t\t\t, num_cycles=A\t\t\t, last_epoch=A\t\t\t, )\r\n\r\n\t\t\t\t\t\t\tif name == SchedulerType.POLYNOMIAL:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn schedule_func(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t A\t\t\t, num_warmup_steps=A\t\t\t, num_training_steps=A\t\t\t, power=A\t\t\t, last_epoch=A\t\t\t, )\r\n\r\n\t\t\t\t\t\t\treturn schedule_func(\r\n\t\t\t\t\t\t\t A\t\t\t, num_warmup_steps=A\t\t\t, num_training_steps=A\t\t\t, last_epoch=A\t)"},"style_context_codestyle":{"kind":"number","value":228,"string":"228"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":870,"cells":{"code":{"kind":"string","value":"\r\n\r\n'''simple docstring'''\r\n\r\nimport re\r\nimport time\r\nfrom typing import Optional\r\n\r\nimport IPython.display as disp\r\n\r\nfrom ..trainer_callback import TrainerCallback\r\nfrom ..trainer_utils import IntervalStrategy, has_length\r\n\r\n\r\n\r\n\r\n\r\ndef A_ ( snake_case ):\r\n SCREAMING_SNAKE_CASE:str \t\t\t\t\t= int(__snake_case )\r\n SCREAMING_SNAKE_CASE:Any \t\t\t\t\t= t // 3600, (t // 60) % 60, t % 60\r\n return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''\r\n\r\n\r\n\r\n\r\n\r\ndef A_ ( snake_case\t\t\t\t\t,\t\t\t\tsnake_case\t\t\t\t\t,\t\t\t\tsnake_case\t\t\t\t\t,\t\t\t\tsnake_case\t\t\t\t\t,\t\t\t\tsnake_case=300 ):\r\n # docstyle-ignore\r\n return F'''\n
\n {prefix}\n \n {label}\n
\n '''\r\n\r\n\r\n\r\n\r\n\r\ndef A_ ( snake_case ):\r\n SCREAMING_SNAKE_CASE:Optional[Any] \t\t\t\t\t= \"\\n\"\r\n html_code += \"\"\" \\n \\n\"\"\"\r\n for i in items[0]:\r\n html_code += F''' \\n'''\r\n html_code += \" \\n \\n \\n\"\r\n for line in items[1:]:\r\n html_code += \" \\n\"\r\n for elt in line:\r\n SCREAMING_SNAKE_CASE:int \t\t\t\t\t= F'''{elt:.6f}''' if isinstance(__snake_case\t\t\t\t\t,\t\t\t\t__snake_case ) else str(__snake_case )\r\n html_code += F''' \\n'''\r\n html_code += \" \\n\"\r\n html_code += \" \\n
{i}
{elt}

\"\r\n return html_code\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\t\t\t_snake_case :\r\n _A\t\t\t\t: List[Any]\t\t\t = 5\r\n _A\t\t\t\t: Union[str, Any]\t\t\t = 0.2\r\n def __init__( self\t:\t\t\t\t\tOptional[int] ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tstr ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tint = None ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tstr = True ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tTuple = None ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tList[Any] = 300 ,):\r\n SCREAMING_SNAKE_CASE:int \t\t\t\t\t= total\r\n SCREAMING_SNAKE_CASE:int \t\t\t\t\t= \"\" if prefix is None else prefix\r\n SCREAMING_SNAKE_CASE:Union[str, Any] \t\t\t\t\t= leave\r\n SCREAMING_SNAKE_CASE:Optional[int] \t\t\t\t\t= parent\r\n SCREAMING_SNAKE_CASE:Optional[int] \t\t\t\t\t= width\r\n SCREAMING_SNAKE_CASE:Union[str, Any] \t\t\t\t\t= None\r\n SCREAMING_SNAKE_CASE:str \t\t\t\t\t= None\r\n SCREAMING_SNAKE_CASE:List[str] \t\t\t\t\t= None\r\n def \t\t\t__UpperCamelCase ( self\t:\t\t\t\t\tAny ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tDict ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tUnion[str, Any] = False ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tList[str] = None\t\t\t\t\t\t\t):\r\n SCREAMING_SNAKE_CASE:str \t\t\t\t\t= value\r\n if comment is not None:\r\n SCREAMING_SNAKE_CASE:Optional[Any] \t\t\t\t\t= comment\r\n if self.last_value is None:\r\n SCREAMING_SNAKE_CASE:List[Any] \t\t\t\t\t= time.time()\r\n SCREAMING_SNAKE_CASE:str \t\t\t\t\t= value\r\n SCREAMING_SNAKE_CASE:Tuple \t\t\t\t\t= None\r\n SCREAMING_SNAKE_CASE:Any \t\t\t\t\t= self.warmup\r\n SCREAMING_SNAKE_CASE:Optional[int] \t\t\t\t\t= 1\r\n self.update_bar(_a\t\t\t\t\t\t\t)\r\n elif value <= self.last_value and not force_update:\r\n return\r\n elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for ,self.total\t\t\t\t\t\t\t):\r\n if self.first_calls > 0:\r\n self.first_calls -= 1\r\n SCREAMING_SNAKE_CASE:Union[str, Any] \t\t\t\t\t= time.time()\r\n SCREAMING_SNAKE_CASE:Optional[int] \t\t\t\t\t= current_time - self.start_time\r\n # We could have value = self.start_value if the update is called twixe with the same start value.\r\n if value > self.start_value:\r\n SCREAMING_SNAKE_CASE:List[str] \t\t\t\t\t= self.elapsed_time / (value - self.start_value)\r\n else:\r\n SCREAMING_SNAKE_CASE:List[str] \t\t\t\t\t= None\r\n if value >= self.total:\r\n SCREAMING_SNAKE_CASE:int \t\t\t\t\t= self.total\r\n SCREAMING_SNAKE_CASE:List[str] \t\t\t\t\t= None\r\n if not self.leave:\r\n self.close()\r\n elif self.average_time_per_item is not None:\r\n SCREAMING_SNAKE_CASE:List[str] \t\t\t\t\t= self.average_time_per_item * (self.total - value)\r\n self.update_bar(_a\t\t\t\t\t\t\t)\r\n SCREAMING_SNAKE_CASE:Any \t\t\t\t\t= value\r\n SCREAMING_SNAKE_CASE:List[str] \t\t\t\t\t= current_time\r\n if self.average_time_per_item is None:\r\n SCREAMING_SNAKE_CASE:int \t\t\t\t\t= 1\r\n else:\r\n SCREAMING_SNAKE_CASE:Union[str, Any] \t\t\t\t\t= max(int(self.update_every / self.average_time_per_item\t\t\t\t\t\t\t) ,1\t\t\t\t\t\t\t)\r\n def \t\t\t__UpperCamelCase ( self\t:\t\t\t\t\tOptional[Any] ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tDict ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tTuple=None\t\t\t\t\t\t\t):\r\n SCREAMING_SNAKE_CASE:List[str] \t\t\t\t\t= \" \" * (len(str(self.total\t\t\t\t\t\t\t)\t\t\t\t\t\t\t) - len(str(_a\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)) + str(_a\t\t\t\t\t\t\t)\r\n if self.elapsed_time is None:\r\n SCREAMING_SNAKE_CASE:Optional[Any] \t\t\t\t\t= F'''[{spaced_value}/{self.total} : < :'''\r\n elif self.predicted_remaining is None:\r\n SCREAMING_SNAKE_CASE:Optional[int] \t\t\t\t\t= F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time\t\t\t\t\t\t\t)}'''\r\n else:\r\n SCREAMING_SNAKE_CASE:str \t\t\t\t\t= (\r\n F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time\t\t\t\t\t\t\t)} <'''\r\n F''' {format_time(self.predicted_remaining\t\t\t\t\t\t\t)}'''\r\n )\r\n self.label += F''', {1/self.average_time_per_item:.2f} it/s'''\r\n self.label += \"]\" if self.comment is None or len(self.comment\t\t\t\t\t\t\t) == 0 else F''', {self.comment}]'''\r\n self.display()\r\n def \t\t\t__UpperCamelCase ( self\t:\t\t\t\t\tList[str]\t\t\t\t\t\t\t):\r\n SCREAMING_SNAKE_CASE:Dict \t\t\t\t\t= html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width\t\t\t\t\t\t\t)\r\n if self.parent is not None:\r\n # If this is a child bar, the parent will take care of the display.\r\n self.parent.display()\r\n return\r\n if self.output is None:\r\n SCREAMING_SNAKE_CASE:Any \t\t\t\t\t= disp.display(disp.HTML(self.html_code\t\t\t\t\t\t\t) ,display_id=_a\t\t\t\t\t\t\t)\r\n else:\r\n self.output.update(disp.HTML(self.html_code\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\t__UpperCamelCase ( self\t:\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t):\r\n if self.parent is None and self.output is not None:\r\n self.output.update(disp.HTML(\"\"\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\t\t\t_snake_case (\t\t\t\ta_ ):\r\n def __init__( self\t:\t\t\t\t\tstr ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tstr ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tOptional[int]=None\t\t\t\t\t\t\t):\r\n super().__init__(_a\t\t\t\t\t\t\t)\r\n SCREAMING_SNAKE_CASE:Optional[Any] \t\t\t\t\t= None if column_names is None else [column_names]\r\n SCREAMING_SNAKE_CASE:str \t\t\t\t\t= None\r\n def \t\t\t__UpperCamelCase ( self\t:\t\t\t\t\tList[Any]\t\t\t\t\t\t\t):\r\n SCREAMING_SNAKE_CASE:List[str] \t\t\t\t\t= html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width\t\t\t\t\t\t\t)\r\n if self.inner_table is not None:\r\n self.html_code += text_to_html_table(self.inner_table\t\t\t\t\t\t\t)\r\n if self.child_bar is not None:\r\n self.html_code += self.child_bar.html_code\r\n if self.output is None:\r\n SCREAMING_SNAKE_CASE:str \t\t\t\t\t= disp.display(disp.HTML(self.html_code\t\t\t\t\t\t\t) ,display_id=_a\t\t\t\t\t\t\t)\r\n else:\r\n self.output.update(disp.HTML(self.html_code\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\n def \t\t\t__UpperCamelCase ( self\t:\t\t\t\t\tTuple ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tDict\t\t\t\t\t\t\t):\r\n if self.inner_table is None:\r\n SCREAMING_SNAKE_CASE:Any \t\t\t\t\t= [list(values.keys()\t\t\t\t\t\t\t), list(values.values()\t\t\t\t\t\t\t)]\r\n else:\r\n SCREAMING_SNAKE_CASE:Optional[int] \t\t\t\t\t= self.inner_table[0]\r\n if len(self.inner_table\t\t\t\t\t\t\t) == 1:\r\n # We give a chance to update the column names at the first iteration\r\n for key in values.keys():\r\n if key not in columns:\r\n columns.append(_a\t\t\t\t\t\t\t)\r\n SCREAMING_SNAKE_CASE:Optional[int] \t\t\t\t\t= columns\r\n self.inner_table.append([values[c] for c in columns]\t\t\t\t\t\t\t)\r\n def \t\t\t__UpperCamelCase ( self\t:\t\t\t\t\tstr ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tList[Any] ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tOptional[Any]=None ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tUnion[str, Any]=300\t\t\t\t\t\t\t):\r\n SCREAMING_SNAKE_CASE:Optional[Any] \t\t\t\t\t= NotebookProgressBar(_a ,prefix=_a ,parent=self ,width=_a\t\t\t\t\t\t\t)\r\n return self.child_bar\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\t__UpperCamelCase ( self\t:\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t):\r\n SCREAMING_SNAKE_CASE:Union[str, Any] \t\t\t\t\t= None\r\n self.display()\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\t\t\t_snake_case (\t\t\t\ta_ ):\r\n def __init__( self\t:\t\t\t\t\tstr\t\t\t\t\t\t\t):\r\n SCREAMING_SNAKE_CASE:Union[str, Any] \t\t\t\t\t= None\r\n SCREAMING_SNAKE_CASE:List[Any] \t\t\t\t\t= None\r\n SCREAMING_SNAKE_CASE:Tuple \t\t\t\t\t= False\r\n def \t\t\t__UpperCamelCase ( self\t:\t\t\t\t\tList[Any] ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tstr ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tTuple ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tOptional[Any] ,**SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t):\r\n SCREAMING_SNAKE_CASE:int \t\t\t\t\t= \"Epoch\" if args.evaluation_strategy == IntervalStrategy.EPOCH else \"Step\"\r\n SCREAMING_SNAKE_CASE:int \t\t\t\t\t= 0\r\n SCREAMING_SNAKE_CASE:Union[str, Any] \t\t\t\t\t= 0\r\n SCREAMING_SNAKE_CASE:str \t\t\t\t\t= [self.first_column] + [\"Training Loss\"]\r\n if args.evaluation_strategy != IntervalStrategy.NO:\r\n column_names.append(\"Validation Loss\"\t\t\t\t\t\t\t)\r\n SCREAMING_SNAKE_CASE:List[Any] \t\t\t\t\t= NotebookTrainingTracker(state.max_steps ,_a\t\t\t\t\t\t\t)\r\n def \t\t\t__UpperCamelCase ( self\t:\t\t\t\t\tList[str] ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tOptional[Any] ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tTuple ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tList[str] ,**SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tAny\t\t\t\t\t\t\t):\r\n SCREAMING_SNAKE_CASE:int \t\t\t\t\t= int(state.epoch\t\t\t\t\t\t\t) if int(state.epoch\t\t\t\t\t\t\t) == state.epoch else F'''{state.epoch:.2f}'''\r\n self.training_tracker.update(\r\n state.global_step + 1 ,comment=F'''Epoch {epoch}/{state.num_train_epochs}''' ,force_update=self._force_next_update ,)\r\n SCREAMING_SNAKE_CASE:Union[str, Any] \t\t\t\t\t= False\r\n def \t\t\t__UpperCamelCase ( self\t:\t\t\t\t\tList[Any] ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tint ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tDict ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tUnion[str, Any] ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tDict=None ,**SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t):\r\n if not has_length(_a\t\t\t\t\t\t\t):\r\n return\r\n if self.prediction_bar is None:\r\n if self.training_tracker is not None:\r\n SCREAMING_SNAKE_CASE:Dict \t\t\t\t\t= self.training_tracker.add_child(len(_a\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\n else:\r\n SCREAMING_SNAKE_CASE:Union[str, Any] \t\t\t\t\t= NotebookProgressBar(len(_a\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\n self.prediction_bar.update(1\t\t\t\t\t\t\t)\r\n else:\r\n self.prediction_bar.update(self.prediction_bar.value + 1\t\t\t\t\t\t\t)\r\n def \t\t\t__UpperCamelCase ( self\t:\t\t\t\t\tAny ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tOptional[Any] ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tOptional[int] ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tOptional[Any] ,**SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t):\r\n if self.prediction_bar is not None:\r\n self.prediction_bar.close()\r\n SCREAMING_SNAKE_CASE:Any \t\t\t\t\t= None\r\n def \t\t\t__UpperCamelCase ( self\t:\t\t\t\t\tOptional[Any] ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tAny ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tList[str] ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tOptional[int] ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tstr=None ,**SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tint\t\t\t\t\t\t\t):\r\n # Only for when there is no evaluation\r\n if args.evaluation_strategy == IntervalStrategy.NO and \"loss\" in logs:\r\n SCREAMING_SNAKE_CASE:Any \t\t\t\t\t= {\"Training Loss\": logs[\"loss\"]}\r\n # First column is necessarily Step sine we're not in epoch eval strategy\r\n SCREAMING_SNAKE_CASE:Dict \t\t\t\t\t= state.global_step\r\n self.training_tracker.write_line(_a\t\t\t\t\t\t\t)\r\n def \t\t\t__UpperCamelCase ( self\t:\t\t\t\t\tAny ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tAny ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tTuple ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tDict ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tint=None ,**SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tint\t\t\t\t\t\t\t):\r\n if self.training_tracker is not None:\r\n SCREAMING_SNAKE_CASE:Dict \t\t\t\t\t= {\"Training Loss\": \"No log\", \"Validation Loss\": \"No log\"}\r\n for log in reversed(state.log_history\t\t\t\t\t\t\t):\r\n if \"loss\" in log:\r\n SCREAMING_SNAKE_CASE:Dict \t\t\t\t\t= log[\"loss\"]\r\n break\r\n\r\n if self.first_column == \"Epoch\":\r\n SCREAMING_SNAKE_CASE:Tuple \t\t\t\t\t= int(state.epoch\t\t\t\t\t\t\t)\r\n else:\r\n SCREAMING_SNAKE_CASE:Optional[int] \t\t\t\t\t= state.global_step\r\n SCREAMING_SNAKE_CASE:Any \t\t\t\t\t= \"eval\"\r\n for k in metrics:\r\n if k.endswith(\"_loss\"\t\t\t\t\t\t\t):\r\n SCREAMING_SNAKE_CASE:Tuple \t\t\t\t\t= re.sub(R\"\\_loss$\" ,\"\" ,_a\t\t\t\t\t\t\t)\r\n SCREAMING_SNAKE_CASE:int \t\t\t\t\t= metrics.pop(\"total_flos\" ,_a\t\t\t\t\t\t\t)\r\n SCREAMING_SNAKE_CASE:Any \t\t\t\t\t= metrics.pop(\"epoch\" ,_a\t\t\t\t\t\t\t)\r\n SCREAMING_SNAKE_CASE:Any \t\t\t\t\t= metrics.pop(F'''{metric_key_prefix}_runtime''' ,_a\t\t\t\t\t\t\t)\r\n SCREAMING_SNAKE_CASE:Any \t\t\t\t\t= metrics.pop(F'''{metric_key_prefix}_samples_per_second''' ,_a\t\t\t\t\t\t\t)\r\n SCREAMING_SNAKE_CASE:List[Any] \t\t\t\t\t= metrics.pop(F'''{metric_key_prefix}_steps_per_second''' ,_a\t\t\t\t\t\t\t)\r\n SCREAMING_SNAKE_CASE:int \t\t\t\t\t= metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' ,_a\t\t\t\t\t\t\t)\r\n for k, v in metrics.items():\r\n if k == F'''{metric_key_prefix}_loss''':\r\n SCREAMING_SNAKE_CASE:Optional[int] \t\t\t\t\t= v\r\n else:\r\n SCREAMING_SNAKE_CASE:List[str] \t\t\t\t\t= k.split(\"_\"\t\t\t\t\t\t\t)\r\n SCREAMING_SNAKE_CASE:Tuple \t\t\t\t\t= \" \".join([part.capitalize() for part in splits[1:]]\t\t\t\t\t\t\t)\r\n SCREAMING_SNAKE_CASE:Any \t\t\t\t\t= v\r\n self.training_tracker.write_line(_a\t\t\t\t\t\t\t)\r\n self.training_tracker.remove_child()\r\n SCREAMING_SNAKE_CASE:List[Any] \t\t\t\t\t= None\r\n # Evaluation takes a long time so we should force the next update.\r\n SCREAMING_SNAKE_CASE:Union[str, Any] \t\t\t\t\t= True\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\t__UpperCamelCase ( self\t:\t\t\t\t\tList[Any] ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tDict ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tUnion[str, Any] ,SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tstr ,**SCREAMING_SNAKE_CASE__\t:\t\t\t\t\tint\t\t\t\t\t\t\t):\r\n self.training_tracker.update(\r\n state.global_step ,comment=F'''Epoch {int(state.epoch\t\t\t\t\t\t\t)}/{state.num_train_epochs}''' ,force_update=_a\t\t\t\t\t\t\t)\r\n SCREAMING_SNAKE_CASE:Union[str, Any] \t\t\t\t\t= None\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":139,"string":"139"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\ndef __magic_name__\t( __snake_case\t\t\t\t: int , __snake_case\t\t\t\t: int , __snake_case\t\t\t\t: int ) ->\t\t\t\tfloat:\r\n lowercase :\t\t\t\tList[Any] \t\t=\t\t\t\t\t\t\t(num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)\r\n # formula for sum of series\r\n return total\r\n\r\n\r\ndef __magic_name__\t( ) ->\t\t\t\tint:\r\n print(sum_of_series(1 , 1 , 10 ) )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import doctest\r\n\r\n doctest.testmod()\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":202,"string":"202"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":871,"cells":{"code":{"kind":"string","value":"\n\n\nimport argparse\nimport json\nfrom pathlib import Path\n\nimport requests\nimport torch\nfrom huggingface_hub import cached_download, hf_hub_url\nfrom PIL import Image\n\nfrom transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor\nfrom transformers.utils import logging\n\n\nlogging.set_verbosity_info()\nUpperCAmelCase : Optional[Any] =\t\t\t\t\t\tlogging.get_logger(__name__)\n\n\n\n\n\n\ndef \t\t\t\t\t__lowerCamelCase\t\t( lowerCamelCase__\t\t\t\t\t\t:\t\tOptional[int] ):\n\n\n\n\n\n\t\t\t'''simple docstring'''\n\n\n\n\n\t\t\tlowerCamelCase \t\t\t\t\t\t= DPTConfig(embedding_type=\"\"\"hybrid\"\"\" )\n\n\t\t\tif \"large\" in checkpoint_url:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= 1024\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= 4096\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= 24\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= 16\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= [5, 11, 17, 23]\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= [256, 512, 1024, 1024]\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= (1, 384, 384)\n\n\t\t\tif \"nyu\" or \"midas\" in checkpoint_url:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= 768\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= [1, 1, 1, 0.5]\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= [256, 512, 768, 768]\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= 150\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= 16\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= (1, 384, 384)\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= False\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= \"\"\"project\"\"\"\n\n\t\t\tif \"ade\" in checkpoint_url:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= True\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= 768\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= [1, 1, 1, 0.5]\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= 150\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= 16\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= \"\"\"huggingface/label-files\"\"\"\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= \"\"\"ade20k-id2label.json\"\"\"\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= json.load(open(cached_download(hf_hub_url(lowerCamelCase__ , lowerCamelCase__ , repo_type=\"\"\"dataset\"\"\" ) ) , \"\"\"r\"\"\" ) )\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= {int(lowerCamelCase__ ): v for k, v in idalabel.items()}\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= idalabel\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= {v: k for k, v in idalabel.items()}\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= [1, 150, 480, 480]\n\n\t\t\treturn config, expected_shape\n\n\n\n\n\n\ndef \t\t\t\t\t__lowerCamelCase\t\t( lowerCamelCase__\t\t\t\t\t\t:\t\tstr ):\n\n\n\n\n\n\t\t\t'''simple docstring'''\n\n\n\n\n\t\t\tlowerCamelCase \t\t\t\t\t\t= [\"\"\"pretrained.model.head.weight\"\"\", \"\"\"pretrained.model.head.bias\"\"\"]\n\t\t\tfor k in ignore_keys:\n\t\t\t\t\t\tstate_dict.pop(lowerCamelCase__ , lowerCamelCase__ )\n\n\n\n\n\n\ndef \t\t\t\t\t__lowerCamelCase\t\t( lowerCamelCase__\t\t\t\t\t\t:\t\tstr ):\n\n\n\n\n\n\t\t\t'''simple docstring'''\n\n\n\n\n\t\t\tif (\n\t\t\t \"pretrained.model\" in name\n\t\t\t and \"cls_token\" not in name\n\t\t\t and \"pos_embed\" not in name\n\t\t\t and \"patch_embed\" not in name\n\t\t\t):\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"pretrained.model\"\"\" , \"\"\"dpt.encoder\"\"\" )\n\t\t\tif \"pretrained.model\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"pretrained.model\"\"\" , \"\"\"dpt.embeddings\"\"\" )\n\t\t\tif \"patch_embed\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"patch_embed\"\"\" , \"\"\"\"\"\" )\n\t\t\tif \"pos_embed\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"pos_embed\"\"\" , \"\"\"position_embeddings\"\"\" )\n\t\t\tif \"attn.proj\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"attn.proj\"\"\" , \"\"\"attention.output.dense\"\"\" )\n\t\t\tif \"proj\" in name and \"project\" not in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"proj\"\"\" , \"\"\"projection\"\"\" )\n\t\t\tif \"blocks\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"blocks\"\"\" , \"\"\"layer\"\"\" )\n\t\t\tif \"mlp.fc1\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"mlp.fc1\"\"\" , \"\"\"intermediate.dense\"\"\" )\n\t\t\tif \"mlp.fc2\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"mlp.fc2\"\"\" , \"\"\"output.dense\"\"\" )\n\t\t\tif \"norm1\" in name and \"backbone\" not in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"norm1\"\"\" , \"\"\"layernorm_before\"\"\" )\n\t\t\tif \"norm2\" in name and \"backbone\" not in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"norm2\"\"\" , \"\"\"layernorm_after\"\"\" )\n\t\t\tif \"scratch.output_conv\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"scratch.output_conv\"\"\" , \"\"\"head\"\"\" )\n\t\t\tif \"scratch\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"scratch\"\"\" , \"\"\"neck\"\"\" )\n\t\t\tif \"layer1_rn\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"layer1_rn\"\"\" , \"\"\"convs.0\"\"\" )\n\t\t\tif \"layer2_rn\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"layer2_rn\"\"\" , \"\"\"convs.1\"\"\" )\n\t\t\tif \"layer3_rn\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"layer3_rn\"\"\" , \"\"\"convs.2\"\"\" )\n\t\t\tif \"layer4_rn\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"layer4_rn\"\"\" , \"\"\"convs.3\"\"\" )\n\t\t\tif \"refinenet\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= int(name[len(\"\"\"neck.refinenet\"\"\" ) : len(\"\"\"neck.refinenet\"\"\" ) + 1] )\n\t\t\t\t\t\t# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )\n\t\t\tif \"out_conv\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"out_conv\"\"\" , \"\"\"projection\"\"\" )\n\t\t\tif \"resConfUnit1\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"resConfUnit1\"\"\" , \"\"\"residual_layer1\"\"\" )\n\t\t\tif \"resConfUnit2\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"resConfUnit2\"\"\" , \"\"\"residual_layer2\"\"\" )\n\t\t\tif \"conv1\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"conv1\"\"\" , \"\"\"convolution1\"\"\" )\n\t\t\tif \"conv2\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"conv2\"\"\" , \"\"\"convolution2\"\"\" )\n\t\t\t# readout blocks\n\t\t\tif \"pretrained.act_postprocess1.0.project.0\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"pretrained.act_postprocess1.0.project.0\"\"\" , \"\"\"neck.reassemble_stage.readout_projects.0.0\"\"\" )\n\t\t\tif \"pretrained.act_postprocess2.0.project.0\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"pretrained.act_postprocess2.0.project.0\"\"\" , \"\"\"neck.reassemble_stage.readout_projects.1.0\"\"\" )\n\t\t\tif \"pretrained.act_postprocess3.0.project.0\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"pretrained.act_postprocess3.0.project.0\"\"\" , \"\"\"neck.reassemble_stage.readout_projects.2.0\"\"\" )\n\t\t\tif \"pretrained.act_postprocess4.0.project.0\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"pretrained.act_postprocess4.0.project.0\"\"\" , \"\"\"neck.reassemble_stage.readout_projects.3.0\"\"\" )\n\n\t\t\t# resize blocks\n\t\t\tif \"pretrained.act_postprocess1.3\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"pretrained.act_postprocess1.3\"\"\" , \"\"\"neck.reassemble_stage.layers.0.projection\"\"\" )\n\t\t\tif \"pretrained.act_postprocess1.4\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"pretrained.act_postprocess1.4\"\"\" , \"\"\"neck.reassemble_stage.layers.0.resize\"\"\" )\n\t\t\tif \"pretrained.act_postprocess2.3\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"pretrained.act_postprocess2.3\"\"\" , \"\"\"neck.reassemble_stage.layers.1.projection\"\"\" )\n\t\t\tif \"pretrained.act_postprocess2.4\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"pretrained.act_postprocess2.4\"\"\" , \"\"\"neck.reassemble_stage.layers.1.resize\"\"\" )\n\t\t\tif \"pretrained.act_postprocess3.3\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"pretrained.act_postprocess3.3\"\"\" , \"\"\"neck.reassemble_stage.layers.2.projection\"\"\" )\n\t\t\tif \"pretrained.act_postprocess4.3\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"pretrained.act_postprocess4.3\"\"\" , \"\"\"neck.reassemble_stage.layers.3.projection\"\"\" )\n\t\t\tif \"pretrained.act_postprocess4.4\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"pretrained.act_postprocess4.4\"\"\" , \"\"\"neck.reassemble_stage.layers.3.resize\"\"\" )\n\t\t\tif \"pretrained\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"pretrained\"\"\" , \"\"\"dpt\"\"\" )\n\t\t\tif \"bn\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"bn\"\"\" , \"\"\"batch_norm\"\"\" )\n\t\t\tif \"head\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"head\"\"\" , \"\"\"head.head\"\"\" )\n\t\t\tif \"encoder.norm\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"encoder.norm\"\"\" , \"\"\"layernorm\"\"\" )\n\t\t\tif \"auxlayer\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"auxlayer\"\"\" , \"\"\"auxiliary_head.head\"\"\" )\n\t\t\tif \"backbone\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"backbone\"\"\" , \"\"\"backbone.bit.encoder\"\"\" )\n\n\t\t\tif \"..\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"..\"\"\" , \"\"\".\"\"\" )\n\n\t\t\tif \"stem.conv\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"stem.conv\"\"\" , \"\"\"bit.embedder.convolution\"\"\" )\n\t\t\tif \"blocks\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"blocks\"\"\" , \"\"\"layers\"\"\" )\n\t\t\tif \"convolution\" in name and \"backbone\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"convolution\"\"\" , \"\"\"conv\"\"\" )\n\t\t\tif \"layer\" in name and \"backbone\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"layer\"\"\" , \"\"\"layers\"\"\" )\n\t\t\tif \"backbone.bit.encoder.bit\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"backbone.bit.encoder.bit\"\"\" , \"\"\"backbone.bit\"\"\" )\n\t\t\tif \"embedder.conv\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"embedder.conv\"\"\" , \"\"\"embedder.convolution\"\"\" )\n\t\t\tif \"backbone.bit.encoder.stem.norm\" in name:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= name.replace(\"\"\"backbone.bit.encoder.stem.norm\"\"\" , \"\"\"backbone.bit.embedder.norm\"\"\" )\n\t\t\treturn name\n\n\n\n\n\n\ndef \t\t\t\t\t__lowerCamelCase\t\t( lowerCamelCase__\t\t\t\t\t\t:\t\tTuple , lowerCamelCase__\t\t\t\t\t\t:\t\tDict ):\n\n\n\n\n\n\t\t\t'''simple docstring'''\n\n\n\n\n\t\t\tfor i in range(config.num_hidden_layers ):\n\t\t\t\t\t\t# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )\n\t\t\t\t\t\t# next, add query, keys and values (in that order) to the state dict\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= in_proj_weight[: config.hidden_size, :]\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= in_proj_bias[: config.hidden_size]\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= in_proj_weight[\n\t\t\t\t\t\t config.hidden_size : config.hidden_size * 2, :\n\t\t\t\t\t\t]\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= in_proj_bias[\n\t\t\t\t\t\t config.hidden_size : config.hidden_size * 2\n\t\t\t\t\t\t]\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= in_proj_weight[\n\t\t\t\t\t\t -config.hidden_size :, :\n\t\t\t\t\t\t]\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= in_proj_bias[-config.hidden_size :]\n\n\n\n\n\n\ndef \t\t\t\t\t__lowerCamelCase\t\t( ):\n\n\n\n\n\n\t\t\t'''simple docstring'''\n\n\n\n\n\t\t\tlowerCamelCase \t\t\t\t\t\t= \"\"\"http://images.cocodataset.org/val2017/000000039769.jpg\"\"\"\n\t\t\tlowerCamelCase \t\t\t\t\t\t= Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )\n\t\t\treturn im\n\n\n\n\n\n\n@torch.no_grad()\ndef \t\t\t\t\t__lowerCamelCase\t\t( lowerCamelCase__\t\t\t\t\t\t:\t\tOptional[Any] , lowerCamelCase__\t\t\t\t\t\t:\t\tOptional[Any] , lowerCamelCase__\t\t\t\t\t\t:\t\tList[Any] , lowerCamelCase__\t\t\t\t\t\t:\t\tint , lowerCamelCase__\t\t\t\t\t\t:\t\tint ):\n\n\n\n\n\n\t\t\t'''simple docstring'''\n\n\n\n\n\t\t\tlowerCamelCase\t\t\t\t\t\t,\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= get_dpt_config(lowerCamelCase__ )\n\t\t\t# load original state_dict from URL\n\t\t\t# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location=\"cpu\")\n\t\t\tlowerCamelCase \t\t\t\t\t\t= torch.load(lowerCamelCase__ , map_location=\"\"\"cpu\"\"\" )\n\t\t\t# remove certain keys\n\t\t\tremove_ignore_keys_(lowerCamelCase__ )\n\t\t\t# rename keys\n\t\t\tfor key in state_dict.copy().keys():\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= state_dict.pop(lowerCamelCase__ )\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= val\n\t\t\t# read in qkv matrices\n\t\t\tread_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ )\n\n\t\t\t# load HuggingFace model\n\t\t\tlowerCamelCase \t\t\t\t\t\t= DPTForSemanticSegmentation(lowerCamelCase__ ) if \"\"\"ade\"\"\" in checkpoint_url else DPTForDepthEstimation(lowerCamelCase__ )\n\t\t\tmodel.load_state_dict(lowerCamelCase__ )\n\t\t\tmodel.eval()\n\n\t\t\t# Check outputs on an image\n\t\t\tlowerCamelCase \t\t\t\t\t\t= 480 if \"\"\"ade\"\"\" in checkpoint_url else 384\n\t\t\tlowerCamelCase \t\t\t\t\t\t= DPTImageProcessor(size=lowerCamelCase__ )\n\n\t\t\tlowerCamelCase \t\t\t\t\t\t= prepare_img()\n\t\t\tlowerCamelCase \t\t\t\t\t\t= image_processor(lowerCamelCase__ , return_tensors=\"\"\"pt\"\"\" )\n\n\t\t\t# forward pass\n\t\t\tlowerCamelCase \t\t\t\t\t\t= model(**lowerCamelCase__ ).logits if \"\"\"ade\"\"\" in checkpoint_url else model(**lowerCamelCase__ ).predicted_depth\n\n\t\t\tif show_prediction:\n\t\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= (\n\t\t\t\t\t\t torch.nn.functional.interpolate(\n\t\t\t\t\t\t outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode=\"\"\"bicubic\"\"\" , align_corners=lowerCamelCase__ , )\n\t\t\t\t\t\t .squeeze()\n\t\t\t\t\t\t .cpu()\n\t\t\t\t\t\t .numpy()\n\t\t\t\t\t\t)\n\n\t\t\t\t\t\tImage.fromarray((prediction / prediction.max()) * 255 ).show()\n\n\t\t\tif pytorch_dump_folder_path is not None:\n\t\t\t\t\t\tPath(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )\n\t\t\t\t\t\tprint(f'Saving model to {pytorch_dump_folder_path}' )\n\t\t\t\t\t\tmodel.save_pretrained(lowerCamelCase__ )\n\t\t\t\t\t\tprint(f'Saving image processor to {pytorch_dump_folder_path}' )\n\t\t\t\t\t\timage_processor.save_pretrained(lowerCamelCase__ )\n\n\t\t\tif push_to_hub:\n\t\t\t\t\t\tmodel.push_to_hub(\"\"\"ybelkada/dpt-hybrid-midas\"\"\" )\n\t\t\t\t\t\timage_processor.push_to_hub(\"\"\"ybelkada/dpt-hybrid-midas\"\"\" )\n\n\nif __name__ == \"__main__\":\n\t\tUpperCAmelCase : str =\t\t\t\t\t\targparse.ArgumentParser()\n\t\t# Required parameters\n\t\tparser.add_argument(\n\t\t \"--checkpoint_url\",\n\t\t default=\"https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt\",\n\t\t type=str,\n\t\t help=\"URL of the original DPT checkpoint you'd like to convert.\",\n\t\t)\n\t\tparser.add_argument(\n\t\t \"--pytorch_dump_folder_path\",\n\t\t default=None,\n\t\t type=str,\n\t\t required=False,\n\t\t help=\"Path to the output PyTorch model directory.\",\n\t\t)\n\t\tparser.add_argument(\n\t\t \"--push_to_hub\",\n\t\t action=\"store_true\",\n\t\t)\n\t\tparser.add_argument(\n\t\t \"--model_name\",\n\t\t default=\"dpt-large\",\n\t\t type=str,\n\t\t help=\"Name of the model, in case you're pushing to the hub.\",\n\t\t)\n\t\tparser.add_argument(\n\t\t \"--show_prediction\",\n\t\t action=\"store_true\",\n\t\t)\n\n\t\tUpperCAmelCase : Tuple =\t\t\t\t\t\tparser.parse_args()\n\t\tconvert_dpt_checkpoint(\n\t\t args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction\n\t\t)\n\n"},"code_codestyle":{"kind":"number","value":66,"string":"66"},"style_context":{"kind":"string","value":"\n\n\nUpperCAmelCase : Tuple =\t\t\t\t\t\t\"Tobias Carryer\"\n\nfrom time import time\n\n\n\n\n\n\n\nclass __lowercase :\n\n\n\n\t\t\"\"\"simple docstring\"\"\"\n\n\t\tdef __init__(\t\t\t\tself\t\t\t\t\t\t, A\t\t\t\t\t\t, A\t\t\t\t\t\t, A\t\t\t\t\t\t, A=int(time()\t\t\t\t\t)\t\t\t\t\t) ->\t\t\t\tOptional[int]: # noqa: B008\n\n\n\n\n\n\n\n\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\n\n\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= multiplier\n\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= increment\n\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= modulo\n\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= seed\n\n\n\n\n\n\n\n\t\tdef \t\t\t__A (\t\t\t\tself\t\t\t\t\t) ->\t\t\t\tUnion[str, Any]:\n\n\n\n\n\n\n\n\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\n\n\t\t\t\t\tlowerCamelCase \t\t\t\t\t\t= (self.multiplier * self.seed + self.increment) % self.modulo\n\t\t\t\t\treturn self.seed\n\n\nif __name__ == \"__main__\":\n\t\t# Show the LCG in action.\n\t\tUpperCAmelCase : List[Any] =\t\t\t\t\t\tLinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31)\n\t\twhile True:\n\t\t\t\tprint(lcg.next_number())\n\n"},"style_context_codestyle":{"kind":"number","value":66,"string":"66"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":872,"cells":{"code":{"kind":"string","value":"\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\nimport json\nimport sys\nimport tempfile\nimport unittest\nfrom pathlib import Path\n\nimport transformers\nfrom transformers import (\n CONFIG_MAPPING,\n FEATURE_EXTRACTOR_MAPPING,\n AutoConfig,\n AutoFeatureExtractor,\n WavaVecaConfig,\n WavaVecaFeatureExtractor,\n)\nfrom transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir\n\n\nsys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))\n\nfrom test_module.custom_configuration import CustomConfig # noqa E402\nfrom test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402\n\n\nlowerCAmelCase__ : str\t\t\t\t\t\t =\tget_tests_dir('fixtures')\nlowerCAmelCase__ : List[str]\t\t\t\t\t\t =\tget_tests_dir('fixtures/dummy_feature_extractor_config.json')\nlowerCAmelCase__ : List[Any]\t\t\t\t\t\t =\tget_tests_dir('fixtures/dummy-config.json')\nclass \t\t\t\tsnake_case ( unittest.TestCase\t\t\t\t):\n\n\n\n\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tOptional[Any]\t\t\t):\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\t0\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tList[str]\t\t\t):\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tAutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h'\t\t\t)\n\t\t\t\t\t\t\t\t\tself.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__\t\t\t)\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tOptional[Any]\t\t\t):\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tAutoFeatureExtractor.from_pretrained(lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\tself.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__\t\t\t)\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tTuple\t\t\t):\n\t\t\t\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdirname:\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tWavaVecaConfig()\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tAutoFeatureExtractor.from_pretrained(lowerCamelCase__\t\t\t).to_dict()\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tconfig_dict.pop('feature_extractor_type'\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tWavaVecaFeatureExtractor(**lowerCamelCase__\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# save in new folder\n\t\t\t\t\t\t\t\t\t\t\t\t\tmodel_config.save_pretrained(lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tconfig.save_pretrained(lowerCamelCase__\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tAutoFeatureExtractor.from_pretrained(lowerCamelCase__\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# make sure private variable is not incorrectly saved\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tjson.loads(config.to_json_string()\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue('_processor_class' not in dict_as_saved\t\t\t)\n\n\t\t\t\t\t\t\t\t\tself.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__\t\t\t)\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tList[str]\t\t\t):\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tAutoFeatureExtractor.from_pretrained(lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\tself.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__\t\t\t)\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tUnion[str, Any]\t\t\t):\n\t\t\t\t\t\t\t\t\twith self.assertRaisesRegex(\n\t\t\t\t\t\t\t\t\t lowerCamelCase__ ,'bert-base is not a local folder and is not a valid model identifier'\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tAutoFeatureExtractor.from_pretrained('bert-base'\t\t\t)\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tList[Any]\t\t\t):\n\t\t\t\t\t\t\t\t\twith self.assertRaisesRegex(\n\t\t\t\t\t\t\t\t\t lowerCamelCase__ ,R'aaaaaa is not a valid git identifier \\(branch name, tag name or commit id\\)'\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tAutoFeatureExtractor.from_pretrained(lowerCamelCase__ ,revision='aaaaaa'\t\t\t)\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tList[str]\t\t\t):\n\t\t\t\t\t\t\t\t\twith self.assertRaisesRegex(\n\t\t\t\t\t\t\t\t\t lowerCamelCase__ ,'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' ,):\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tAutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model'\t\t\t)\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tint\t\t\t):\n\t\t\t\t\t\t\t\t\t# If remote code is not set, we will time out when asking whether to load the model.\n\t\t\t\t\t\t\t\t\twith self.assertRaises(lowerCamelCase__\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tAutoFeatureExtractor.from_pretrained(\n\t\t\t\t\t\t\t\t\t\t\t\t\t 'hf-internal-testing/test_dynamic_feature_extractor'\t\t\t)\n\t\t\t\t\t\t\t\t\t# If remote code is disabled, we can't load this config.\n\t\t\t\t\t\t\t\t\twith self.assertRaises(lowerCamelCase__\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tAutoFeatureExtractor.from_pretrained(\n\t\t\t\t\t\t\t\t\t\t\t\t\t 'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=lowerCamelCase__\t\t\t)\n\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tAutoFeatureExtractor.from_pretrained(\n\t\t\t\t\t\t\t\t\t 'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\tself.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor'\t\t\t)\n\n\t\t\t\t\t\t\t\t\t# Test feature extractor can be reloaded.\n\t\t\t\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmp_dir:\n\t\t\t\t\t\t\t\t\t\t\t\t\tfeature_extractor.save_pretrained(lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tAutoFeatureExtractor.from_pretrained(lowerCamelCase__ ,trust_remote_code=lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\tself.assertEqual(reloaded_feature_extractor.__class__.__name__ ,'NewFeatureExtractor'\t\t\t)\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tstr\t\t\t):\n\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\t\t\tAutoConfig.register('custom' ,lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tAutoFeatureExtractor.register(lowerCamelCase__ ,lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t# Trying to register something existing in the Transformers library will raise an error\n\t\t\t\t\t\t\t\t\t\t\t\t\twith self.assertRaises(lowerCamelCase__\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tAutoFeatureExtractor.register(lowerCamelCase__ ,lowerCamelCase__\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# Now that the config is registered, it can be used as any other config with the auto-API\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tCustomFeatureExtractor.from_pretrained(lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmp_dir:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeature_extractor.save_pretrained(lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tAutoFeatureExtractor.from_pretrained(lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__\t\t\t)\n\n\t\t\t\t\t\t\t\t\tfinally:\n\t\t\t\t\t\t\t\t\t\t\t\t\tif \"custom\" in CONFIG_MAPPING._extra_content:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdel CONFIG_MAPPING._extra_content[\"custom\"]\n\t\t\t\t\t\t\t\t\t\t\t\t\tif CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdel FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tOptional[int]\t\t\t):\n\t\t\t\t\t\t\t\t\tclass \t\t\t\tsnake_case ( __UpperCAmelCase\t\t\t\t):\n\n\n\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t = True\n\n\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\t\t\tAutoConfig.register('custom' ,lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tAutoFeatureExtractor.register(lowerCamelCase__ ,lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t# If remote code is not set, the default is to use local\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tAutoFeatureExtractor.from_pretrained(\n\t\t\t\t\t\t\t\t\t\t\t\t\t 'hf-internal-testing/test_dynamic_feature_extractor'\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor'\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(feature_extractor.is_local\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# If remote code is disabled, we load the local one.\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tAutoFeatureExtractor.from_pretrained(\n\t\t\t\t\t\t\t\t\t\t\t\t\t 'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor'\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(feature_extractor.is_local\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# If remote is enabled, we load from the Hub\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tAutoFeatureExtractor.from_pretrained(\n\t\t\t\t\t\t\t\t\t\t\t\t\t 'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor'\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(not hasattr(lowerCamelCase__ ,'is_local'\t\t\t)\t\t\t)\n\n\t\t\t\t\t\t\t\t\tfinally:\n\t\t\t\t\t\t\t\t\t\t\t\t\tif \"custom\" in CONFIG_MAPPING._extra_content:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdel CONFIG_MAPPING._extra_content[\"custom\"]\n\t\t\t\t\t\t\t\t\t\t\t\t\tif CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdel FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]\n\n\n\n"},"code_codestyle":{"kind":"number","value":98,"string":"98"},"style_context":{"kind":"string","value":"\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\nimport tempfile\n\nimport torch\n\nfrom diffusers import PNDMScheduler\n\nfrom .test_schedulers import SchedulerCommonTest\nclass \t\t\t\tsnake_case ( __UpperCAmelCase\t\t\t\t):\n\n\n\n\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\t\t\t\t\tsnake_case__\t\t\t\t = (PNDMScheduler,)\n\t\t\t\t\tsnake_case__\t\t\t\t = ((\"num_inference_steps\", 50),)\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tList[str] ,**lowerCamelCase__ :\tstr\t\t\t):\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t 'num_train_timesteps': 1_000,\n\t\t\t\t\t\t\t\t\t 'beta_start': 0.0_0_0_1,\n\t\t\t\t\t\t\t\t\t 'beta_end': 0.0_2,\n\t\t\t\t\t\t\t\t\t 'beta_schedule': 'linear',\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\tconfig.update(**lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\treturn config\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tstr ,lowerCamelCase__ :\tOptional[Any]=0 ,**lowerCamelCase__ :\tList[str]\t\t\t):\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tdict(self.forward_default_kwargs\t\t\t)\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tkwargs.pop('num_inference_steps' ,lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tself.dummy_sample\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\t0.1 * sample\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\t[residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]\n\n\t\t\t\t\t\t\t\t\tfor scheduler_class in self.scheduler_classes:\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tself.get_scheduler_config(**lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tscheduler_class(**lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tscheduler.set_timesteps(lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t# copy over dummy past residuals\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tdummy_past_residuals[:]\n\n\t\t\t\t\t\t\t\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdirname:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tscheduler.save_config(lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tscheduler_class.from_pretrained(lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnew_scheduler.set_timesteps(lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# copy over dummy past residuals\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tdummy_past_residuals[:]\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tscheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__\t\t\t).prev_sample\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tnew_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__\t\t\t).prev_sample\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tassert torch.sum(torch.abs(output - new_output\t\t\t)\t\t\t) < 1e-5, \"Scheduler outputs are not identical\"\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tscheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__\t\t\t).prev_sample\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tnew_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__\t\t\t).prev_sample\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tassert torch.sum(torch.abs(output - new_output\t\t\t)\t\t\t) < 1e-5, \"Scheduler outputs are not identical\"\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tTuple\t\t\t):\n\t\t\t\t\t\t\t\t\tpass\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tDict ,lowerCamelCase__ :\tList[str]=0 ,**lowerCamelCase__ :\tTuple\t\t\t):\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tdict(self.forward_default_kwargs\t\t\t)\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tkwargs.pop('num_inference_steps' ,lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tself.dummy_sample\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\t0.1 * sample\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\t[residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]\n\n\t\t\t\t\t\t\t\t\tfor scheduler_class in self.scheduler_classes:\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tself.get_scheduler_config()\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tscheduler_class(**lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tscheduler.set_timesteps(lowerCamelCase__\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# copy over dummy past residuals (must be after setting timesteps)\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tdummy_past_residuals[:]\n\n\t\t\t\t\t\t\t\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdirname:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tscheduler.save_config(lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tscheduler_class.from_pretrained(lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# copy over dummy past residuals\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnew_scheduler.set_timesteps(lowerCamelCase__\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# copy over dummy past residual (must be after setting timesteps)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tdummy_past_residuals[:]\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tscheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__\t\t\t).prev_sample\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tnew_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__\t\t\t).prev_sample\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tassert torch.sum(torch.abs(output - new_output\t\t\t)\t\t\t) < 1e-5, \"Scheduler outputs are not identical\"\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tscheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__\t\t\t).prev_sample\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tnew_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__\t\t\t).prev_sample\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tassert torch.sum(torch.abs(output - new_output\t\t\t)\t\t\t) < 1e-5, \"Scheduler outputs are not identical\"\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tList[Any] ,**lowerCamelCase__ :\tint\t\t\t):\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tself.scheduler_classes[0]\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tself.get_scheduler_config(**lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tscheduler_class(**lowerCamelCase__\t\t\t)\n\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\t10\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tself.dummy_model()\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tself.dummy_sample_deter\n\t\t\t\t\t\t\t\t\tscheduler.set_timesteps(lowerCamelCase__\t\t\t)\n\n\t\t\t\t\t\t\t\t\tfor i, t in enumerate(scheduler.prk_timesteps\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tmodel(lowerCamelCase__ ,lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tscheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__\t\t\t).prev_sample\n\n\t\t\t\t\t\t\t\t\tfor i, t in enumerate(scheduler.plms_timesteps\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tmodel(lowerCamelCase__ ,lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tscheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__\t\t\t).prev_sample\n\n\t\t\t\t\t\t\t\t\treturn sample\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tint\t\t\t):\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tdict(self.forward_default_kwargs\t\t\t)\n\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tkwargs.pop('num_inference_steps' ,lowerCamelCase__\t\t\t)\n\n\t\t\t\t\t\t\t\t\tfor scheduler_class in self.scheduler_classes:\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tself.get_scheduler_config()\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tscheduler_class(**lowerCamelCase__\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tself.dummy_sample\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\t0.1 * sample\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tif num_inference_steps is not None and hasattr(lowerCamelCase__ ,'set_timesteps'\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tscheduler.set_timesteps(lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\telif num_inference_steps is not None and not hasattr(lowerCamelCase__ ,'set_timesteps'\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tnum_inference_steps\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# copy over dummy past residuals (must be done after set_timesteps)\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\t[residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tdummy_past_residuals[:]\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tscheduler.step_prk(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__\t\t\t).prev_sample\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tscheduler.step_prk(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__\t\t\t).prev_sample\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(output_a.shape ,sample.shape\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(output_a.shape ,output_a.shape\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tscheduler.step_plms(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__\t\t\t).prev_sample\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tscheduler.step_plms(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__\t\t\t).prev_sample\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(output_a.shape ,sample.shape\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(output_a.shape ,output_a.shape\t\t\t)\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tList[Any]\t\t\t):\n\t\t\t\t\t\t\t\t\tfor timesteps in [100, 1_000]:\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.check_over_configs(num_train_timesteps=lowerCamelCase__\t\t\t)\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tOptional[int]\t\t\t):\n\t\t\t\t\t\t\t\t\tfor steps_offset in [0, 1]:\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.check_over_configs(steps_offset=lowerCamelCase__\t\t\t)\n\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tself.scheduler_classes[0]\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tself.get_scheduler_config(steps_offset=1\t\t\t)\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tscheduler_class(**lowerCamelCase__\t\t\t)\n\t\t\t\t\t\t\t\t\tscheduler.set_timesteps(10\t\t\t)\n\t\t\t\t\t\t\t\t\tassert torch.equal(\n\t\t\t\t\t\t\t\t\t scheduler.timesteps ,torch.LongTensor(\n\t\t\t\t\t\t\t\t\t [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]\t\t\t) ,)\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tDict\t\t\t):\n\t\t\t\t\t\t\t\t\tfor beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2]\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.check_over_configs(beta_start=lowerCamelCase__ ,beta_end=lowerCamelCase__\t\t\t)\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tUnion[str, Any]\t\t\t):\n\t\t\t\t\t\t\t\t\tfor schedule in [\"linear\", \"squaredcos_cap_v2\"]:\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.check_over_configs(beta_schedule=lowerCamelCase__\t\t\t)\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tList[Any]\t\t\t):\n\t\t\t\t\t\t\t\t\tfor prediction_type in [\"epsilon\", \"v_prediction\"]:\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.check_over_configs(prediction_type=lowerCamelCase__\t\t\t)\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tOptional[Any]\t\t\t):\n\t\t\t\t\t\t\t\t\tfor t in [1, 5, 10]:\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.check_over_forward(time_step=lowerCamelCase__\t\t\t)\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tList[Any]\t\t\t):\n\t\t\t\t\t\t\t\t\tfor t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100]\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.check_over_forward(num_inference_steps=lowerCamelCase__\t\t\t)\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tint\t\t\t):\n\t\t\t\t\t\t\t\t\t# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\t27\n\n\t\t\t\t\t\t\t\t\tfor scheduler_class in self.scheduler_classes:\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tself.dummy_sample\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\t0.1 * sample\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tself.get_scheduler_config()\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tscheduler_class(**lowerCamelCase__\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tscheduler.set_timesteps(lowerCamelCase__\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t# before power of 3 fix, would error on first step, so we only need to do two\n\t\t\t\t\t\t\t\t\t\t\t\t\tfor i, t in enumerate(scheduler.prk_timesteps[:2]\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tscheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__\t\t\t).prev_sample\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tint\t\t\t):\n\t\t\t\t\t\t\t\t\twith self.assertRaises(lowerCamelCase__\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tself.scheduler_classes[0]\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tself.get_scheduler_config()\n\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tscheduler_class(**lowerCamelCase__\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tscheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample\t\t\t).prev_sample\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tTuple\t\t\t):\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tself.full_loop()\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\ttorch.sum(torch.abs(lowerCamelCase__\t\t\t)\t\t\t)\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\ttorch.mean(torch.abs(lowerCamelCase__\t\t\t)\t\t\t)\n\n\t\t\t\t\t\t\t\t\tassert abs(result_sum.item() - 1_9_8.1_3_1_8\t\t\t) < 1e-2\n\t\t\t\t\t\t\t\t\tassert abs(result_mean.item() - 0.2_5_8_0\t\t\t) < 1e-3\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tTuple\t\t\t):\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tself.full_loop(prediction_type='v_prediction'\t\t\t)\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\ttorch.sum(torch.abs(lowerCamelCase__\t\t\t)\t\t\t)\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\ttorch.mean(torch.abs(lowerCamelCase__\t\t\t)\t\t\t)\n\n\t\t\t\t\t\t\t\t\tassert abs(result_sum.item() - 6_7.3_9_8_6\t\t\t) < 1e-2\n\t\t\t\t\t\t\t\t\tassert abs(result_mean.item() - 0.0_8_7_8\t\t\t) < 1e-3\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tUnion[str, Any]\t\t\t):\n\t\t\t\t\t\t\t\t\t# We specify different beta, so that the first alpha is 0.99\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tself.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1\t\t\t)\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\ttorch.sum(torch.abs(lowerCamelCase__\t\t\t)\t\t\t)\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\ttorch.mean(torch.abs(lowerCamelCase__\t\t\t)\t\t\t)\n\n\t\t\t\t\t\t\t\t\tassert abs(result_sum.item() - 2_3_0.0_3_9_9\t\t\t) < 1e-2\n\t\t\t\t\t\t\t\t\tassert abs(result_mean.item() - 0.2_9_9_5\t\t\t) < 1e-3\n\n\n\n\n\n\n\n\t\t\t\t\tdef \t\t__lowerCAmelCase\t\t\t\t\t\t(\t\t\t\tself :\tTuple\t\t\t):\n\t\t\t\t\t\t\t\t\t# We specify different beta, so that the first alpha is 0.99\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\tself.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1\t\t\t)\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\ttorch.sum(torch.abs(lowerCamelCase__\t\t\t)\t\t\t)\n\t\t\t\t\t\t\t\t\tUpperCAmelCase__ =\t\t\t\t\t\t\ttorch.mean(torch.abs(lowerCamelCase__\t\t\t)\t\t\t)\n\n\t\t\t\t\t\t\t\t\tassert abs(result_sum.item() - 1_8_6.9_4_8_2\t\t\t) < 1e-2\n\t\t\t\t\t\t\t\t\tassert abs(result_mean.item() - 0.2_4_3_4\t\t\t) < 1e-3\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":98,"string":"98"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":873,"cells":{"code":{"kind":"string","value":"\r\r\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\rimport warnings\rfrom typing import List, Optional, Tuple, Union\r\rimport numpy as np\rimport PIL\rimport torch\r\rfrom ...models import UNetaDModel\rfrom ...schedulers import RePaintScheduler\rfrom ...utils import PIL_INTERPOLATION, logging, randn_tensor\rfrom ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput\r\r\rsnake_case__\t\t\t\t\t\t: List[str] \t\t\t=\t\t\tlogging.get_logger(__name__) # pylint: disable=invalid-name\r\r\rdef _snake_case ( _snake_case\t\t\t: Union[List, PIL.Image.Image, torch.Tensor] ):\r warnings.warn(\r '''The preprocess method is deprecated and will be removed in a future version. Please'''\r ''' use VaeImageProcessor.preprocess instead'''\t,\t\t\t\t_snake_case\t,\t\t\t\t)\r if isinstance(_snake_case\t,\t\t\t\ttorch.Tensor ):\r return image\r elif isinstance(_snake_case\t,\t\t\t\tPIL.Image.Image ):\r lowerCAmelCase :\t\tOptional[int] =\t\t\t\t[image]\r\r if isinstance(image[0]\t,\t\t\t\tPIL.Image.Image ):\r lowerCAmelCase, lowerCAmelCase :\t\tint =\t\t\t\timage[0].size\r lowerCAmelCase, lowerCAmelCase :\t\tOptional[int] =\t\t\t\t(x - x % 8 for x in (w, h)) # resize to integer multiple of 8\r\r lowerCAmelCase :\t\tUnion[str, Any] =\t\t\t\t[np.array(i.resize((w, h)\t,\t\t\t\tresample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]\r lowerCAmelCase :\t\tint =\t\t\t\tnp.concatenate(_snake_case\t,\t\t\t\taxis=0 )\r lowerCAmelCase :\t\tOptional[Any] =\t\t\t\tnp.array(_snake_case ).astype(np.floataa ) / 255.0\r lowerCAmelCase :\t\tList[Any] =\t\t\t\timage.transpose(0\t,\t\t\t\t3\t,\t\t\t\t1\t,\t\t\t\t2 )\r lowerCAmelCase :\t\tList[str] =\t\t\t\t2.0 * image - 1.0\r lowerCAmelCase :\t\tList[Any] =\t\t\t\ttorch.from_numpy(_snake_case )\r elif isinstance(image[0]\t,\t\t\t\ttorch.Tensor ):\r lowerCAmelCase :\t\tAny =\t\t\t\ttorch.cat(_snake_case\t,\t\t\t\tdim=0 )\r return image\r\r\rdef _snake_case ( _snake_case\t\t\t: Union[List, PIL.Image.Image, torch.Tensor] ):\r if isinstance(_snake_case\t,\t\t\t\ttorch.Tensor ):\r return mask\r elif isinstance(_snake_case\t,\t\t\t\tPIL.Image.Image ):\r lowerCAmelCase :\t\tstr =\t\t\t\t[mask]\r\r if isinstance(mask[0]\t,\t\t\t\tPIL.Image.Image ):\r lowerCAmelCase, lowerCAmelCase :\t\tint =\t\t\t\tmask[0].size\r lowerCAmelCase, lowerCAmelCase :\t\tDict =\t\t\t\t(x - x % 32 for x in (w, h)) # resize to integer multiple of 32\r lowerCAmelCase :\t\tList[str] =\t\t\t\t[np.array(m.convert('''L''' ).resize((w, h)\t,\t\t\t\tresample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]\r lowerCAmelCase :\t\tOptional[int] =\t\t\t\tnp.concatenate(_snake_case\t,\t\t\t\taxis=0 )\r lowerCAmelCase :\t\tDict =\t\t\t\tmask.astype(np.floataa ) / 255.0\r lowerCAmelCase :\t\tList[str] =\t\t\t\t0\r lowerCAmelCase :\t\tOptional[int] =\t\t\t\t1\r lowerCAmelCase :\t\tList[Any] =\t\t\t\ttorch.from_numpy(_snake_case )\r elif isinstance(mask[0]\t,\t\t\t\ttorch.Tensor ):\r lowerCAmelCase :\t\tOptional[int] =\t\t\t\ttorch.cat(_snake_case\t,\t\t\t\tdim=0 )\r return mask\r\r\r\r\r\r\rclass \t\t\t\t\t\tsnake_case_( a__ ):\r __UpperCamelCase\t\t\t\t\t= 42\r __UpperCamelCase\t\t\t\t\t= 42\r\r\r\r\r\r\r def __init__(\t\tself\t\t\t\t\t: List[Any] ,\t\t\tUpperCamelCase_\t\t\t\t\t: List[str] ,\t\t\tUpperCamelCase_\t\t\t\t\t: Optional[Any]\t\t\t):\r super().__init__()\r self.register_modules(unet=UpperCamelCase_ ,\t\t\tscheduler=UpperCamelCase_\t\t\t)\r\r\r\r\r\r\r @torch.no_grad()\r def __call__(\t\tself\t\t\t\t\t: Union[str, Any] ,\t\t\tUpperCamelCase_\t\t\t\t\t: Union[torch.Tensor, PIL.Image.Image] ,\t\t\tUpperCamelCase_\t\t\t\t\t: Union[torch.Tensor, PIL.Image.Image] ,\t\t\tUpperCamelCase_\t\t\t\t\t: int = 2_5_0 ,\t\t\tUpperCamelCase_\t\t\t\t\t: float = 0.0 ,\t\t\tUpperCamelCase_\t\t\t\t\t: int = 1_0 ,\t\t\tUpperCamelCase_\t\t\t\t\t: int = 1_0 ,\t\t\tUpperCamelCase_\t\t\t\t\t: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,\t\t\tUpperCamelCase_\t\t\t\t\t: Optional[str] = \"pil\" ,\t\t\tUpperCamelCase_\t\t\t\t\t: bool = True ,\t\t\t):\r lowerCAmelCase :\t\tOptional[Any] =\t\t\t\timage\r\r lowerCAmelCase :\t\tTuple =\t\t\t\t_preprocess_image(UpperCamelCase_\t\t\t)\r lowerCAmelCase :\t\tint =\t\t\t\toriginal_image.to(device=self.device ,\t\t\tdtype=self.unet.dtype\t\t\t)\r lowerCAmelCase :\t\tOptional[Any] =\t\t\t\t_preprocess_mask(UpperCamelCase_\t\t\t)\r lowerCAmelCase :\t\tstr =\t\t\t\tmask_image.to(device=self.device ,\t\t\tdtype=self.unet.dtype\t\t\t)\r\r lowerCAmelCase :\t\tUnion[str, Any] =\t\t\t\toriginal_image.shape[0]\r\r # sample gaussian noise to begin the loop\r if isinstance(UpperCamelCase_ ,\t\t\tUpperCamelCase_\t\t\t) and len(UpperCamelCase_\t\t\t) != batch_size:\r raise ValueError(\r F'''You have passed a list of generators of length {len(UpperCamelCase_\t\t\t)}, but requested an effective batch'''\r F''' size of {batch_size}. Make sure the batch size matches the length of the generators.'''\t\t\t)\r\r lowerCAmelCase :\t\tUnion[str, Any] =\t\t\t\toriginal_image.shape\r lowerCAmelCase :\t\tstr =\t\t\t\trandn_tensor(UpperCamelCase_ ,\t\t\tgenerator=UpperCamelCase_ ,\t\t\tdevice=self.device ,\t\t\tdtype=self.unet.dtype\t\t\t)\r\r # set step values\r self.scheduler.set_timesteps(UpperCamelCase_ ,\t\t\tUpperCamelCase_ ,\t\t\tUpperCamelCase_ ,\t\t\tself.device\t\t\t)\r lowerCAmelCase :\t\tOptional[int] =\t\t\t\teta\r\r lowerCAmelCase :\t\tList[str] =\t\t\t\tself.scheduler.timesteps[0] + 1\r lowerCAmelCase :\t\tList[str] =\t\t\t\tgenerator[0] if isinstance(UpperCamelCase_ ,\t\t\tUpperCamelCase_\t\t\t) else generator\r for i, t in enumerate(self.progress_bar(self.scheduler.timesteps\t\t\t)\t\t\t):\r if t < t_last:\r # predict the noise residual\r lowerCAmelCase :\t\tUnion[str, Any] =\t\t\t\tself.unet(UpperCamelCase_ ,\t\t\tUpperCamelCase_\t\t\t).sample\r # compute previous image: x_t -> x_t-1\r lowerCAmelCase :\t\tstr =\t\t\t\tself.scheduler.step(UpperCamelCase_ ,\t\t\tUpperCamelCase_ ,\t\t\tUpperCamelCase_ ,\t\t\tUpperCamelCase_ ,\t\t\tUpperCamelCase_ ,\t\t\tUpperCamelCase_\t\t\t).prev_sample\r\r else:\r # compute the reverse: x_t-1 -> x_t\r lowerCAmelCase :\t\tOptional[Any] =\t\t\t\tself.scheduler.undo_step(UpperCamelCase_ ,\t\t\tUpperCamelCase_ ,\t\t\tUpperCamelCase_\t\t\t)\r lowerCAmelCase :\t\tList[Any] =\t\t\t\tt\r\r lowerCAmelCase :\t\tint =\t\t\t\t(image / 2 + 0.5).clamp(0 ,\t\t\t1\t\t\t)\r lowerCAmelCase :\t\tUnion[str, Any] =\t\t\t\timage.cpu().permute(0 ,\t\t\t2 ,\t\t\t3 ,\t\t\t1\t\t\t).numpy()\r if output_type == \"pil\":\r lowerCAmelCase :\t\tTuple =\t\t\t\tself.numpy_to_pil(UpperCamelCase_\t\t\t)\r\r if not return_dict:\r return (image,)\r\r return ImagePipelineOutput(images=UpperCamelCase_\t\t\t)\r\r\r"},"code_codestyle":{"kind":"number","value":314,"string":"314"},"style_context":{"kind":"string","value":"\r\r\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\rimport torch\r\rfrom diffusers import DDPMScheduler\r\rfrom .test_schedulers import SchedulerCommonTest\r\r\r\rclass \t\t\t\t\t\tsnake_case_( a__ ):\r __UpperCamelCase\t\t\t\t\t= (DDPMScheduler,)\r\r\r\r\r\r\r def lowerCamelCase__ (\t\tself\t\t\t\t\t: List[Any] ,\t\t\t**UpperCamelCase_\t\t\t\t\t: Union[str, Any]\t\t\t):\r lowerCAmelCase :\t\tOptional[Any] =\t\t\t\t{\r '''num_train_timesteps''': 1_0_0_0,\r '''beta_start''': 0.0_001,\r '''beta_end''': 0.02,\r '''beta_schedule''': '''linear''',\r '''variance_type''': '''fixed_small''',\r '''clip_sample''': True,\r }\r\r config.update(**UpperCamelCase_\t\t\t)\r return config\r\r\r\r\r\r\r def lowerCamelCase__ (\t\tself\t\t\t\t\t: Optional[int]\t\t\t):\r for timesteps in [1, 5, 1_0_0, 1_0_0_0]:\r self.check_over_configs(num_train_timesteps=UpperCamelCase_\t\t\t)\r\r\r\r\r\r\r def lowerCamelCase__ (\t\tself\t\t\t\t\t: Tuple\t\t\t):\r for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] ,\t\t\t[0.002, 0.02, 0.2, 2]\t\t\t):\r self.check_over_configs(beta_start=UpperCamelCase_ ,\t\t\tbeta_end=UpperCamelCase_\t\t\t)\r\r\r\r\r\r\r def lowerCamelCase__ (\t\tself\t\t\t\t\t: str\t\t\t):\r for schedule in [\"linear\", \"squaredcos_cap_v2\"]:\r self.check_over_configs(beta_schedule=UpperCamelCase_\t\t\t)\r\r\r\r\r\r\r def lowerCamelCase__ (\t\tself\t\t\t\t\t: Optional[Any]\t\t\t):\r for variance in [\"fixed_small\", \"fixed_large\", \"other\"]:\r self.check_over_configs(variance_type=UpperCamelCase_\t\t\t)\r\r\r\r\r\r\r def lowerCamelCase__ (\t\tself\t\t\t\t\t: Optional[int]\t\t\t):\r for clip_sample in [True, False]:\r self.check_over_configs(clip_sample=UpperCamelCase_\t\t\t)\r\r\r\r\r\r\r def lowerCamelCase__ (\t\tself\t\t\t\t\t: Any\t\t\t):\r self.check_over_configs(thresholding=UpperCamelCase_\t\t\t)\r for threshold in [0.5, 1.0, 2.0]:\r for prediction_type in [\"epsilon\", \"sample\", \"v_prediction\"]:\r self.check_over_configs(\r thresholding=UpperCamelCase_ ,\t\t\tprediction_type=UpperCamelCase_ ,\t\t\tsample_max_value=UpperCamelCase_ ,\t\t\t)\r\r\r\r\r\r\r def lowerCamelCase__ (\t\tself\t\t\t\t\t: Tuple\t\t\t):\r for prediction_type in [\"epsilon\", \"sample\", \"v_prediction\"]:\r self.check_over_configs(prediction_type=UpperCamelCase_\t\t\t)\r\r\r\r\r\r\r def lowerCamelCase__ (\t\tself\t\t\t\t\t: str\t\t\t):\r for t in [0, 5_0_0, 9_9_9]:\r self.check_over_forward(time_step=UpperCamelCase_\t\t\t)\r\r\r\r\r\r\r def lowerCamelCase__ (\t\tself\t\t\t\t\t: int\t\t\t):\r lowerCAmelCase :\t\tstr =\t\t\t\tself.scheduler_classes[0]\r lowerCAmelCase :\t\tDict =\t\t\t\tself.get_scheduler_config()\r lowerCAmelCase :\t\tDict =\t\t\t\tscheduler_class(**UpperCamelCase_\t\t\t)\r\r assert torch.sum(torch.abs(scheduler._get_variance(0\t\t\t) - 0.0\t\t\t)\t\t\t) < 1E-5\r assert torch.sum(torch.abs(scheduler._get_variance(4_8_7\t\t\t) - 0.00_979\t\t\t)\t\t\t) < 1E-5\r assert torch.sum(torch.abs(scheduler._get_variance(9_9_9\t\t\t) - 0.02\t\t\t)\t\t\t) < 1E-5\r\r\r\r\r\r\r def lowerCamelCase__ (\t\tself\t\t\t\t\t: Tuple\t\t\t):\r lowerCAmelCase :\t\tList[Any] =\t\t\t\tself.scheduler_classes[0]\r lowerCAmelCase :\t\tList[Any] =\t\t\t\tself.get_scheduler_config()\r lowerCAmelCase :\t\tList[str] =\t\t\t\tscheduler_class(**UpperCamelCase_\t\t\t)\r\r lowerCAmelCase :\t\tUnion[str, Any] =\t\t\t\tlen(UpperCamelCase_\t\t\t)\r\r lowerCAmelCase :\t\tList[str] =\t\t\t\tself.dummy_model()\r lowerCAmelCase :\t\tUnion[str, Any] =\t\t\t\tself.dummy_sample_deter\r lowerCAmelCase :\t\tList[Any] =\t\t\t\ttorch.manual_seed(0\t\t\t)\r\r for t in reversed(range(UpperCamelCase_\t\t\t)\t\t\t):\r # 1. predict noise residual\r lowerCAmelCase :\t\tOptional[int] =\t\t\t\tmodel(UpperCamelCase_ ,\t\t\tUpperCamelCase_\t\t\t)\r\r # 2. predict previous mean of sample x_t-1\r lowerCAmelCase :\t\tOptional[Any] =\t\t\t\tscheduler.step(UpperCamelCase_ ,\t\t\tUpperCamelCase_ ,\t\t\tUpperCamelCase_ ,\t\t\tgenerator=UpperCamelCase_\t\t\t).prev_sample\r\r # if t > 0:\r # noise = self.dummy_sample_deter\r # variance = scheduler.get_variance(t) ** (0.5) * noise\r #\r # sample = pred_prev_sample + variance\r lowerCAmelCase :\t\tUnion[str, Any] =\t\t\t\tpred_prev_sample\r\r lowerCAmelCase :\t\tstr =\t\t\t\ttorch.sum(torch.abs(UpperCamelCase_\t\t\t)\t\t\t)\r lowerCAmelCase :\t\tint =\t\t\t\ttorch.mean(torch.abs(UpperCamelCase_\t\t\t)\t\t\t)\r\r assert abs(result_sum.item() - 258.9_606\t\t\t) < 1E-2\r assert abs(result_mean.item() - 0.3_372\t\t\t) < 1E-3\r\r\r\r\r\r\r def lowerCamelCase__ (\t\tself\t\t\t\t\t: Any\t\t\t):\r lowerCAmelCase :\t\tOptional[int] =\t\t\t\tself.scheduler_classes[0]\r lowerCAmelCase :\t\tAny =\t\t\t\tself.get_scheduler_config(prediction_type='''v_prediction'''\t\t\t)\r lowerCAmelCase :\t\tTuple =\t\t\t\tscheduler_class(**UpperCamelCase_\t\t\t)\r\r lowerCAmelCase :\t\tDict =\t\t\t\tlen(UpperCamelCase_\t\t\t)\r\r lowerCAmelCase :\t\tAny =\t\t\t\tself.dummy_model()\r lowerCAmelCase :\t\tAny =\t\t\t\tself.dummy_sample_deter\r lowerCAmelCase :\t\tList[Any] =\t\t\t\ttorch.manual_seed(0\t\t\t)\r\r for t in reversed(range(UpperCamelCase_\t\t\t)\t\t\t):\r # 1. predict noise residual\r lowerCAmelCase :\t\tstr =\t\t\t\tmodel(UpperCamelCase_ ,\t\t\tUpperCamelCase_\t\t\t)\r\r # 2. predict previous mean of sample x_t-1\r lowerCAmelCase :\t\tList[Any] =\t\t\t\tscheduler.step(UpperCamelCase_ ,\t\t\tUpperCamelCase_ ,\t\t\tUpperCamelCase_ ,\t\t\tgenerator=UpperCamelCase_\t\t\t).prev_sample\r\r # if t > 0:\r # noise = self.dummy_sample_deter\r # variance = scheduler.get_variance(t) ** (0.5) * noise\r #\r # sample = pred_prev_sample + variance\r lowerCAmelCase :\t\tList[Any] =\t\t\t\tpred_prev_sample\r\r lowerCAmelCase :\t\tList[str] =\t\t\t\ttorch.sum(torch.abs(UpperCamelCase_\t\t\t)\t\t\t)\r lowerCAmelCase :\t\tDict =\t\t\t\ttorch.mean(torch.abs(UpperCamelCase_\t\t\t)\t\t\t)\r\r assert abs(result_sum.item() - 202.0_296\t\t\t) < 1E-2\r assert abs(result_mean.item() - 0.2_631\t\t\t) < 1E-3\r\r\r\r\r\r\r def lowerCamelCase__ (\t\tself\t\t\t\t\t: Any\t\t\t):\r lowerCAmelCase :\t\tDict =\t\t\t\tself.scheduler_classes[0]\r lowerCAmelCase :\t\tTuple =\t\t\t\tself.get_scheduler_config()\r lowerCAmelCase :\t\tint =\t\t\t\tscheduler_class(**UpperCamelCase_\t\t\t)\r\r lowerCAmelCase :\t\tList[Any] =\t\t\t\t[1_0_0, 8_7, 5_0, 1, 0]\r\r scheduler.set_timesteps(timesteps=UpperCamelCase_\t\t\t)\r\r lowerCAmelCase :\t\tDict =\t\t\t\tscheduler.timesteps\r\r for i, timestep in enumerate(UpperCamelCase_\t\t\t):\r if i == len(UpperCamelCase_\t\t\t) - 1:\r lowerCAmelCase :\t\tList[Any] =\t\t\t\t-1\r else:\r lowerCAmelCase :\t\tUnion[str, Any] =\t\t\t\ttimesteps[i + 1]\r\r lowerCAmelCase :\t\tAny =\t\t\t\tscheduler.previous_timestep(UpperCamelCase_\t\t\t)\r lowerCAmelCase :\t\tDict =\t\t\t\tprev_t.item()\r\r self.assertEqual(UpperCamelCase_ ,\t\t\tUpperCamelCase_\t\t\t)\r\r\r\r\r\r\r def lowerCamelCase__ (\t\tself\t\t\t\t\t: Dict\t\t\t):\r lowerCAmelCase :\t\tUnion[str, Any] =\t\t\t\tself.scheduler_classes[0]\r lowerCAmelCase :\t\tList[Any] =\t\t\t\tself.get_scheduler_config()\r lowerCAmelCase :\t\tTuple =\t\t\t\tscheduler_class(**UpperCamelCase_\t\t\t)\r\r lowerCAmelCase :\t\tint =\t\t\t\t[1_0_0, 8_7, 5_0, 5_1, 0]\r\r with self.assertRaises(UpperCamelCase_ ,\t\t\tmsg='''`custom_timesteps` must be in descending order.'''\t\t\t):\r scheduler.set_timesteps(timesteps=UpperCamelCase_\t\t\t)\r\r\r\r\r\r\r def lowerCamelCase__ (\t\tself\t\t\t\t\t: Tuple\t\t\t):\r lowerCAmelCase :\t\tAny =\t\t\t\tself.scheduler_classes[0]\r lowerCAmelCase :\t\tOptional[int] =\t\t\t\tself.get_scheduler_config()\r lowerCAmelCase :\t\tstr =\t\t\t\tscheduler_class(**UpperCamelCase_\t\t\t)\r\r lowerCAmelCase :\t\tList[str] =\t\t\t\t[1_0_0, 8_7, 5_0, 1, 0]\r lowerCAmelCase :\t\tint =\t\t\t\tlen(UpperCamelCase_\t\t\t)\r\r with self.assertRaises(UpperCamelCase_ ,\t\t\tmsg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''\t\t\t):\r scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ ,\t\t\ttimesteps=UpperCamelCase_\t\t\t)\r\r\r\r\r\r\r def lowerCamelCase__ (\t\tself\t\t\t\t\t: Optional[Any]\t\t\t):\r lowerCAmelCase :\t\tList[Any] =\t\t\t\tself.scheduler_classes[0]\r lowerCAmelCase :\t\tTuple =\t\t\t\tself.get_scheduler_config()\r lowerCAmelCase :\t\tDict =\t\t\t\tscheduler_class(**UpperCamelCase_\t\t\t)\r\r lowerCAmelCase :\t\tOptional[Any] =\t\t\t\t[scheduler.config.num_train_timesteps]\r\r with self.assertRaises(\r UpperCamelCase_ ,\t\t\tmsg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' ,\t\t\t):\r scheduler.set_timesteps(timesteps=UpperCamelCase_\t\t\t)\r\r\r"},"style_context_codestyle":{"kind":"number","value":314,"string":"314"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":874,"cells":{"code":{"kind":"string","value":"\rfrom collections import OrderedDict\rfrom typing import TYPE_CHECKING, Any, Mapping, Optional, Union\r\rfrom ...configuration_utils import PretrainedConfig\rfrom ...onnx import OnnxConfig\rfrom ...utils import logging\r\r\rif TYPE_CHECKING:\r from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType\r\r\rlowercase_ \t\t\t\t\t\t\t= logging.get_logger(__name__)\r\rlowercase_ \t\t\t\t\t\t\t= {\r \"\"\"microsoft/deberta-v2-xlarge\"\"\": \"\"\"https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json\"\"\",\r \"\"\"microsoft/deberta-v2-xxlarge\"\"\": \"\"\"https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json\"\"\",\r \"\"\"microsoft/deberta-v2-xlarge-mnli\"\"\": (\r \"\"\"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json\"\"\"\r ),\r \"\"\"microsoft/deberta-v2-xxlarge-mnli\"\"\": (\r \"\"\"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json\"\"\"\r ),\r}\r\r\r\r\r\rclass __UpperCamelCase (\t\t\t\tlowerCAmelCase__ ):\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r lowerCAmelCase_\t\t\t\t\t\t\t\t\t= '''deberta-v2'''\r\r\r\r def __init__(\t\t\t\t\t\t\tself\t\t\t\t:\t\t\t\tAny\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tTuple=12_8100\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tint=1536\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tint=24\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tList[Any]=24\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tAny=6144\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tList[Any]=\"gelu\"\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tList[str]=0.1\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tAny=0.1\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tTuple=512\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tOptional[int]=0\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tList[Any]=0.02\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tOptional[int]=1e-7\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tTuple=False\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tOptional[Any]=-1\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tUnion[str, Any]=0\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tUnion[str, Any]=True\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tList[Any]=None\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tOptional[int]=0\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tOptional[int]=\"gelu\"\t\t\t,\t\t**_A\t\t\t\t:\t\t\t\tList[str]\t\t\t,\t\t):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r super().__init__(**_A )\r\r __SCREAMING_SNAKE_CASE\t: List[Any] =\t\t\t\t\t\thidden_size\r __SCREAMING_SNAKE_CASE\t: Tuple =\t\t\t\t\t\tnum_hidden_layers\r __SCREAMING_SNAKE_CASE\t: List[str] =\t\t\t\t\t\tnum_attention_heads\r __SCREAMING_SNAKE_CASE\t: List[Any] =\t\t\t\t\t\tintermediate_size\r __SCREAMING_SNAKE_CASE\t: Optional[Any] =\t\t\t\t\t\thidden_act\r __SCREAMING_SNAKE_CASE\t: Union[str, Any] =\t\t\t\t\t\thidden_dropout_prob\r __SCREAMING_SNAKE_CASE\t: Optional[Any] =\t\t\t\t\t\tattention_probs_dropout_prob\r __SCREAMING_SNAKE_CASE\t: Union[str, Any] =\t\t\t\t\t\tmax_position_embeddings\r __SCREAMING_SNAKE_CASE\t: Any =\t\t\t\t\t\ttype_vocab_size\r __SCREAMING_SNAKE_CASE\t: Any =\t\t\t\t\t\tinitializer_range\r __SCREAMING_SNAKE_CASE\t: str =\t\t\t\t\t\trelative_attention\r __SCREAMING_SNAKE_CASE\t: Tuple =\t\t\t\t\t\tmax_relative_positions\r __SCREAMING_SNAKE_CASE\t: List[str] =\t\t\t\t\t\tpad_token_id\r __SCREAMING_SNAKE_CASE\t: Optional[Any] =\t\t\t\t\t\tposition_biased_input\r\r # Backwards compatibility\r if type(_A ) == str:\r __SCREAMING_SNAKE_CASE\t: List[str] =\t\t\t\t\t\t[x.strip() for x in pos_att_type.lower().split('''|''' )]\r\r __SCREAMING_SNAKE_CASE\t: Optional[Any] =\t\t\t\t\t\tpos_att_type\r __SCREAMING_SNAKE_CASE\t: List[str] =\t\t\t\t\t\tvocab_size\r __SCREAMING_SNAKE_CASE\t: Union[str, Any] =\t\t\t\t\t\tlayer_norm_eps\r\r __SCREAMING_SNAKE_CASE\t: Any =\t\t\t\t\t\tkwargs.get('''pooler_hidden_size'''\t\t\t,\t\t_A )\r __SCREAMING_SNAKE_CASE\t: Tuple =\t\t\t\t\t\tpooler_dropout\r __SCREAMING_SNAKE_CASE\t: int =\t\t\t\t\t\tpooler_hidden_act\r\r\r\r\r\r\rclass __UpperCamelCase (\t\t\t\tlowerCAmelCase__ ):\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r @property\r def UpperCAmelCase__\t\t\t\t(\t\t\t\t\t\t\tself\t\t\t\t:\t\t\t\tOptional[int] ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r if self.task == \"multiple-choice\":\r __SCREAMING_SNAKE_CASE\t: int =\t\t\t\t\t\t{0: '''batch''', 1: '''choice''', 2: '''sequence'''}\r else:\r __SCREAMING_SNAKE_CASE\t: Optional[int] =\t\t\t\t\t\t{0: '''batch''', 1: '''sequence'''}\r if self._config.type_vocab_size > 0:\r return OrderedDict(\r [('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )\r else:\r return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )\r\r\r\r @property\r def UpperCAmelCase__\t\t\t\t(\t\t\t\t\t\t\tself\t\t\t\t:\t\t\t\tDict ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r return 12\r\r\r\r\r def UpperCAmelCase__\t\t\t\t(\t\t\t\t\t\t\tself\t\t\t\t:\t\t\t\tList[Any]\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tUnion[\"PreTrainedTokenizerBase\", \"FeatureExtractionMixin\"]\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tint = -1\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tint = -1\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tint = -1\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tbool = False\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tOptional[\"TensorType\"] = None\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tint = 3\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tint = 40\t\t\t,\t\t_A\t\t\t\t:\t\t\t\tint = 40\t\t\t,\t\t_A\t\t\t\t:\t\t\t\t\"PreTrainedTokenizerBase\" = None\t\t\t,\t\t):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r __SCREAMING_SNAKE_CASE\t: List[str] =\t\t\t\t\t\tsuper().generate_dummy_inputs(preprocessor=_A\t\t\t,\t\tframework=_A )\r if self._config.type_vocab_size == 0 and \"token_type_ids\" in dummy_inputs:\r del dummy_inputs[\"token_type_ids\"]\r return dummy_inputs\r"},"code_codestyle":{"kind":"number","value":303,"string":"303"},"style_context":{"kind":"string","value":"\rimport argparse\rimport json\rimport os\rimport pickle\rimport shutil\r\rimport numpy as np\rimport torch\rfrom distiller import Distiller\rfrom lm_seqs_dataset import LmSeqsDataset\r\rfrom transformers import (\r BertConfig,\r BertForMaskedLM,\r BertTokenizer,\r DistilBertConfig,\r DistilBertForMaskedLM,\r DistilBertTokenizer,\r GPTaConfig,\r GPTaLMHeadModel,\r GPTaTokenizer,\r RobertaConfig,\r RobertaForMaskedLM,\r RobertaTokenizer,\r)\rfrom utils import git_log, init_gpu_params, logger, set_seed\r\r\rlowercase_ \t\t\t\t\t\t\t= {\r \"\"\"distilbert\"\"\": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),\r \"\"\"roberta\"\"\": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),\r \"\"\"bert\"\"\": (BertConfig, BertForMaskedLM, BertTokenizer),\r \"\"\"gpt2\"\"\": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),\r}\r\r\rdef \t\t\t\ta__ (\t\t\t\tsnake_case\t\t\t\t):\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)\r assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)\r if args.mlm:\r assert os.path.isfile(args.token_counts\t\t\t\t)\r assert (args.student_type in [\"roberta\", \"distilbert\"]) and (args.teacher_type in [\"roberta\", \"bert\"])\r else:\r assert (args.student_type in [\"gpt2\"]) and (args.teacher_type in [\"gpt2\"])\r\r assert args.teacher_type == args.student_type or (\r args.student_type == \"distilbert\" and args.teacher_type == \"bert\"\r )\r assert os.path.isfile(args.student_config\t\t\t\t)\r if args.student_pretrained_weights is not None:\r assert os.path.isfile(args.student_pretrained_weights\t\t\t\t)\r\r if args.freeze_token_type_embds:\r assert args.student_type in [\"roberta\"]\r\r assert args.alpha_ce >= 0.0\r assert args.alpha_mlm >= 0.0\r assert args.alpha_clm >= 0.0\r assert args.alpha_mse >= 0.0\r assert args.alpha_cos >= 0.0\r assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0\r\r\rdef \t\t\t\ta__ (\t\t\t\tsnake_case , snake_case\t\t\t\t):\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r if args.student_type == \"roberta\":\r __SCREAMING_SNAKE_CASE\t: int =\t\t\t\t\t\tFalse\r elif args.student_type == \"gpt2\":\r __SCREAMING_SNAKE_CASE\t: Optional[int] =\t\t\t\t\t\tFalse\r\r\rdef \t\t\t\ta__ (\t\t\t\tsnake_case , snake_case\t\t\t\t):\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r if args.student_type == \"roberta\":\r __SCREAMING_SNAKE_CASE\t: Dict =\t\t\t\t\t\tFalse\r\r\rdef \t\t\t\ta__ (\t\t\t\t):\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r __SCREAMING_SNAKE_CASE\t: Dict =\t\t\t\t\t\targparse.ArgumentParser(description='''Training'''\t\t\t\t)\r parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.'''\t\t\t\t)\r\r parser.add_argument(\r '''--dump_path''' , type=snake_case , required=snake_case , help='''The output directory (log, checkpoints, parameters, etc.)'''\t\t\t\t)\r parser.add_argument(\r '''--data_file''' , type=snake_case , required=snake_case , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )\r\r parser.add_argument(\r '''--student_type''' , type=snake_case , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''The student type (DistilBERT, RoBERTa).''' , )\r parser.add_argument('''--student_config''' , type=snake_case , required=snake_case , help='''Path to the student configuration.'''\t\t\t\t)\r parser.add_argument(\r '''--student_pretrained_weights''' , default=snake_case , type=snake_case , help='''Load student initialization checkpoint.'''\t\t\t\t)\r\r parser.add_argument(\r '''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''Teacher type (BERT, RoBERTa).'''\t\t\t\t)\r parser.add_argument('''--teacher_name''' , type=snake_case , required=snake_case , help='''The teacher model.'''\t\t\t\t)\r\r parser.add_argument('''--temperature''' , default=2.0 , type=snake_case , help='''Temperature for the softmax temperature.'''\t\t\t\t)\r parser.add_argument(\r '''--alpha_ce''' , default=0.5 , type=snake_case , help='''Linear weight for the distillation loss. Must be >=0.'''\t\t\t\t)\r parser.add_argument(\r '''--alpha_mlm''' , default=0.0 , type=snake_case , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )\r parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case , help='''Linear weight for the CLM loss. Must be >=0.'''\t\t\t\t)\r parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case , help='''Linear weight of the MSE loss. Must be >=0.'''\t\t\t\t)\r parser.add_argument(\r '''--alpha_cos''' , default=0.0 , type=snake_case , help='''Linear weight of the cosine embedding loss. Must be >=0.'''\t\t\t\t)\r\r parser.add_argument(\r '''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.'''\t\t\t\t)\r parser.add_argument(\r '''--mlm_mask_prop''' , default=0.15 , type=snake_case , help='''Proportion of tokens for which we need to make a prediction.''' , )\r parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case , help='''Proportion of tokens to mask out.'''\t\t\t\t)\r parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case , help='''Proportion of tokens to keep.'''\t\t\t\t)\r parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case , help='''Proportion of tokens to randomly replace.'''\t\t\t\t)\r parser.add_argument(\r '''--mlm_smoothing''' , default=0.7 , type=snake_case , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )\r parser.add_argument('''--token_counts''' , type=snake_case , help='''The token counts in the data_file for MLM.'''\t\t\t\t)\r\r parser.add_argument(\r '''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )\r parser.add_argument(\r '''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\\'roberta\\', \\'gpt2\\'] only.''' , )\r parser.add_argument(\r '''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\\'roberta\\'] only.''' , )\r\r parser.add_argument('''--n_epoch''' , type=snake_case , default=3 , help='''Number of pass on the whole dataset.'''\t\t\t\t)\r parser.add_argument('''--batch_size''' , type=snake_case , default=5 , help='''Batch size (for each process).'''\t\t\t\t)\r parser.add_argument(\r '''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )\r\r parser.add_argument(\r '''--gradient_accumulation_steps''' , type=snake_case , default=50 , help='''Gradient accumulation for larger training batches.''' , )\r parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case , help='''Linear warmup proportion.'''\t\t\t\t)\r parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case , help='''Weight decay if we apply some.'''\t\t\t\t)\r parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case , help='''The initial learning rate for Adam.'''\t\t\t\t)\r parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case , help='''Epsilon for Adam optimizer.'''\t\t\t\t)\r parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case , help='''Max gradient norm.'''\t\t\t\t)\r parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case , help='''Random initialization range.'''\t\t\t\t)\r\r parser.add_argument(\r '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )\r parser.add_argument(\r '''--fp16_opt_level''' , type=snake_case , default='''O1''' , help=(\r '''For fp16: Apex AMP optimization level selected in [\\'O0\\', \\'O1\\', \\'O2\\', and \\'O3\\'].'''\r '''See details at https://nvidia.github.io/apex/amp.html'''\r ) , )\r parser.add_argument('''--n_gpu''' , type=snake_case , default=1 , help='''Number of GPUs in the node.'''\t\t\t\t)\r parser.add_argument('''--local_rank''' , type=snake_case , default=-1 , help='''Distributed training - Local rank'''\t\t\t\t)\r parser.add_argument('''--seed''' , type=snake_case , default=56 , help='''Random seed'''\t\t\t\t)\r\r parser.add_argument('''--log_interval''' , type=snake_case , default=500 , help='''Tensorboard logging interval.'''\t\t\t\t)\r parser.add_argument('''--checkpoint_interval''' , type=snake_case , default=4_000 , help='''Checkpoint interval.'''\t\t\t\t)\r __SCREAMING_SNAKE_CASE\t: Optional[int] =\t\t\t\t\t\tparser.parse_args()\r sanity_checks(snake_case\t\t\t\t)\r\r # ARGS #\r init_gpu_params(snake_case\t\t\t\t)\r set_seed(snake_case\t\t\t\t)\r if args.is_master:\r if os.path.exists(args.dump_path\t\t\t\t):\r if not args.force:\r raise ValueError(\r F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''\r ''' itUse `--force` if you want to overwrite it'''\t\t\t\t)\r else:\r shutil.rmtree(args.dump_path\t\t\t\t)\r\r if not os.path.exists(args.dump_path\t\t\t\t):\r os.makedirs(args.dump_path\t\t\t\t)\r logger.info(F'''Experiment will be dumped and logged in {args.dump_path}'''\t\t\t\t)\r\r # SAVE PARAMS #\r logger.info(F'''Param: {args}'''\t\t\t\t)\r with open(os.path.join(args.dump_path , '''parameters.json'''\t\t\t\t) , '''w'''\t\t\t\t) as f:\r json.dump(vars(snake_case\t\t\t\t) , snake_case , indent=4\t\t\t\t)\r git_log(args.dump_path\t\t\t\t)\r\r __SCREAMING_SNAKE_CASE,\t\t\t\t\t\t__SCREAMING_SNAKE_CASE,\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t: str =\t\t\t\t\t\tMODEL_CLASSES[args.student_type]\r __SCREAMING_SNAKE_CASE,\t\t\t\t\t\t__SCREAMING_SNAKE_CASE,\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t: Tuple =\t\t\t\t\t\tMODEL_CLASSES[args.teacher_type]\r\r # TOKENIZER #\r __SCREAMING_SNAKE_CASE\t: Optional[int] =\t\t\t\t\t\tteacher_tokenizer_class.from_pretrained(args.teacher_name\t\t\t\t)\r __SCREAMING_SNAKE_CASE\t: Optional[Any] =\t\t\t\t\t\t{}\r for tok_name, tok_symbol in tokenizer.special_tokens_map.items():\r __SCREAMING_SNAKE_CASE\t: Any =\t\t\t\t\t\ttokenizer.all_special_tokens.index(snake_case\t\t\t\t)\r __SCREAMING_SNAKE_CASE\t: List[Any] =\t\t\t\t\t\ttokenizer.all_special_ids[idx]\r logger.info(F'''Special tokens {special_tok_ids}'''\t\t\t\t)\r __SCREAMING_SNAKE_CASE\t: Any =\t\t\t\t\t\tspecial_tok_ids\r __SCREAMING_SNAKE_CASE\t: List[Any] =\t\t\t\t\t\ttokenizer.max_model_input_sizes[args.teacher_name]\r\r # DATA LOADER #\r logger.info(F'''Loading data from {args.data_file}'''\t\t\t\t)\r with open(args.data_file , '''rb'''\t\t\t\t) as fp:\r __SCREAMING_SNAKE_CASE\t: List[str] =\t\t\t\t\t\tpickle.load(snake_case\t\t\t\t)\r\r if args.mlm:\r logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)'''\t\t\t\t)\r with open(args.token_counts , '''rb'''\t\t\t\t) as fp:\r __SCREAMING_SNAKE_CASE\t: Optional[Any] =\t\t\t\t\t\tpickle.load(snake_case\t\t\t\t)\r\r __SCREAMING_SNAKE_CASE\t: List[Any] =\t\t\t\t\t\tnp.maximum(snake_case , 1\t\t\t\t) ** -args.mlm_smoothing\r for idx in special_tok_ids.values():\r __SCREAMING_SNAKE_CASE\t: Any =\t\t\t\t\t\t0.0 # do not predict special tokens\r __SCREAMING_SNAKE_CASE\t: Union[str, Any] =\t\t\t\t\t\ttorch.from_numpy(snake_case\t\t\t\t)\r else:\r __SCREAMING_SNAKE_CASE\t: Optional[int] =\t\t\t\t\t\tNone\r\r __SCREAMING_SNAKE_CASE\t: Optional[Any] =\t\t\t\t\t\tLmSeqsDataset(params=snake_case , data=snake_case\t\t\t\t)\r logger.info('''Data loader created.'''\t\t\t\t)\r\r # STUDENT #\r logger.info(F'''Loading student config from {args.student_config}'''\t\t\t\t)\r __SCREAMING_SNAKE_CASE\t: Optional[Any] =\t\t\t\t\t\tstudent_config_class.from_pretrained(args.student_config\t\t\t\t)\r __SCREAMING_SNAKE_CASE\t: Dict =\t\t\t\t\t\tTrue\r\r if args.student_pretrained_weights is not None:\r logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}'''\t\t\t\t)\r __SCREAMING_SNAKE_CASE\t: Optional[Any] =\t\t\t\t\t\tstudent_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case\t\t\t\t)\r else:\r __SCREAMING_SNAKE_CASE\t: str =\t\t\t\t\t\tstudent_model_class(snake_case\t\t\t\t)\r\r if args.n_gpu > 0:\r student.to(F'''cuda:{args.local_rank}'''\t\t\t\t)\r logger.info('''Student loaded.'''\t\t\t\t)\r\r # TEACHER #\r __SCREAMING_SNAKE_CASE\t: List[str] =\t\t\t\t\t\tteacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case\t\t\t\t)\r if args.n_gpu > 0:\r teacher.to(F'''cuda:{args.local_rank}'''\t\t\t\t)\r logger.info(F'''Teacher loaded from {args.teacher_name}.'''\t\t\t\t)\r\r # FREEZING #\r if args.freeze_pos_embs:\r freeze_pos_embeddings(snake_case , snake_case\t\t\t\t)\r if args.freeze_token_type_embds:\r freeze_token_type_embeddings(snake_case , snake_case\t\t\t\t)\r\r # SANITY CHECKS #\r assert student.config.vocab_size == teacher.config.vocab_size\r assert student.config.hidden_size == teacher.config.hidden_size\r assert student.config.max_position_embeddings == teacher.config.max_position_embeddings\r if args.mlm:\r assert token_probs.size(0\t\t\t\t) == stu_architecture_config.vocab_size\r\r # DISTILLER #\r torch.cuda.empty_cache()\r __SCREAMING_SNAKE_CASE\t: int =\t\t\t\t\t\tDistiller(\r params=snake_case , dataset=snake_case , token_probs=snake_case , student=snake_case , teacher=snake_case\t\t\t\t)\r distiller.train()\r logger.info('''Let\\'s go get some drinks.'''\t\t\t\t)\r\r\rif __name__ == \"__main__\":\r main()\r"},"style_context_codestyle":{"kind":"number","value":303,"string":"303"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":875,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n'''simple docstring'''\n\n\n\n\n\nfrom typing import TYPE_CHECKING\n\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available\n\n\n_SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t: Dict\t\t\t\t\t\t=\t\t\t{\n \"configuration_poolformer\": [\n \"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP\",\n \"PoolFormerConfig\",\n \"PoolFormerOnnxConfig\",\n ]\n}\n\ntry:\n\t\tif not is_vision_available():\n\t\t\t\traise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n\t\tpass\nelse:\n\t\t_SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t: int\t\t\t\t\t\t=\t\t\t[\"PoolFormerFeatureExtractor\"]\n\t\t_SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t: str\t\t\t\t\t\t=\t\t\t[\"PoolFormerImageProcessor\"]\n\ntry:\n\t\tif not is_torch_available():\n\t\t\t\traise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n\t\tpass\nelse:\n\t\t_SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t: List[str]\t\t\t\t\t\t=\t\t\t[\n\t\t \"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST\",\n\t\t \"PoolFormerForImageClassification\",\n\t\t \"PoolFormerModel\",\n\t\t \"PoolFormerPreTrainedModel\",\n\t\t]\n\n\nif TYPE_CHECKING:\n\t\tfrom .configuration_poolformer import (\n\t\t POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,\n\t\t PoolFormerConfig,\n\t\t PoolFormerOnnxConfig,\n\t\t)\n\n\t\ttry:\n\t\t\t\tif not is_vision_available():\n\t\t\t\t\t\traise OptionalDependencyNotAvailable()\n\t\texcept OptionalDependencyNotAvailable:\n\t\t\t\tpass\n\t\telse:\n\t\t\t\tfrom .feature_extraction_poolformer import PoolFormerFeatureExtractor\n\t\t\t\tfrom .image_processing_poolformer import PoolFormerImageProcessor\n\n\t\ttry:\n\t\t\t\tif not is_torch_available():\n\t\t\t\t\t\traise OptionalDependencyNotAvailable()\n\t\texcept OptionalDependencyNotAvailable:\n\t\t\t\tpass\n\t\telse:\n\t\t\t\tfrom .modeling_poolformer import (\n\t\t\t\t POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,\n\t\t\t\t PoolFormerForImageClassification,\n\t\t\t\t PoolFormerModel,\n\t\t\t\t PoolFormerPreTrainedModel,\n\t\t\t\t)\n\n\nelse:\n\t\timport sys\n\n\t\t_SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t: List[Any]\t\t\t\t\t\t=\t\t\t_LazyModule(__name__, globals()[\"__file__\"], _import_structure)\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":92,"string":"92"},"style_context":{"kind":"string","value":"\n\n\n\n\n'''simple docstring'''\n\n\n\n\n\nimport logging\nimport os\n\nfrom .state import PartialState\n\n\n\n\nclass \t\t\t_snake_case\t\t( logging.LoggerAdapter\t\t\t\t\t\t\t):\n\n\n\n\n\t\t\t@staticmethod\n\t\t\tdef lowerCAmelCase__ ( a__\t\t\t\t\t\t\t) -> Optional[Any]:\n\n\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\t\t\t\t\t\t\t\t\t\tsnake_case_ =\t\t\t\tPartialState()\n\t\t\t\t\t\t\t\t\t\treturn not main_process_only or (main_process_only and state.is_main_process)\n\n\n\n\n\t\t\tdef lowerCAmelCase__ ( self ,\t\t\t\t\t\t\ta__ ,\t\t\t\t\t\t\ta__ ,\t\t\t\t\t\t\t*a__ ,\t\t\t\t\t\t\t**a__\t\t\t\t\t\t\t) -> List[Any]:\n\n\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\t\t\t\t\t\t\t\t\t\tif PartialState._shared_state == {}:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise RuntimeError(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.\"\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\tsnake_case_ =\t\t\t\tkwargs.pop(\"main_process_only\" ,\t\t\t\t\t\t\ta__\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\tsnake_case_ =\t\t\t\tkwargs.pop(\"in_order\" ,\t\t\t\t\t\t\ta__\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\tif self.isEnabledFor(a__\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif self._should_log(a__\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_ ,\t\t\t\tsnake_case_ =\t\t\t\tself.process(a__ ,\t\t\t\t\t\t\ta__\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.logger.log(a__ ,\t\t\t\t\t\t\ta__ ,\t\t\t\t\t\t\t*a__ ,\t\t\t\t\t\t\t**a__\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif in_order:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_ =\t\t\t\tPartialState()\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor i in range(state.num_processes\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif i == state.process_index:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_ ,\t\t\t\tsnake_case_ =\t\t\t\tself.process(a__ ,\t\t\t\t\t\t\ta__\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.logger.log(a__ ,\t\t\t\t\t\t\ta__ ,\t\t\t\t\t\t\t*a__ ,\t\t\t\t\t\t\t**a__\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tstate.wait_for_everyone()\n\n\ndef \t\t\t\t\tUpperCamelCase_( snake_case\t\t\t\t\t\t\t:\tstr\t\t\t\t\t\t\t, snake_case\t\t\t\t\t\t\t:\tstr = None\t\t\t\t\t):\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\n\t\t\t\t\t\t\tif log_level is None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_ =\t\t\t\tos.environ.get(\"ACCELERATE_LOG_LEVEL\"\t\t\t\t\t\t\t, snake_case\t\t\t\t\t)\n\t\t\t\t\t\t\tsnake_case_ =\t\t\t\tlogging.getLogger(snake_case\t\t\t\t\t)\n\t\t\t\t\t\t\tif log_level is not None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.setLevel(log_level.upper()\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.root.setLevel(log_level.upper()\t\t\t\t\t)\n\t\t\t\t\t\t\treturn MultiProcessAdapter(snake_case\t\t\t\t\t\t\t, {}\t\t\t\t\t)\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":92,"string":"92"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":876,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\nfrom ..utils import (\r\n OptionalDependencyNotAvailable,\r\n is_flax_available,\r\n is_scipy_available,\r\n is_torch_available,\r\n is_torchsde_available,\r\n)\r\n\r\n\r\ntry:\r\n if not is_torch_available():\r\n raise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n from ..utils.dummy_pt_objects import * # noqa F403\r\nelse:\r\n from .scheduling_consistency_models import CMStochasticIterativeScheduler\r\n from .scheduling_ddim import DDIMScheduler\r\n from .scheduling_ddim_inverse import DDIMInverseScheduler\r\n from .scheduling_ddim_parallel import DDIMParallelScheduler\r\n from .scheduling_ddpm import DDPMScheduler\r\n from .scheduling_ddpm_parallel import DDPMParallelScheduler\r\n from .scheduling_deis_multistep import DEISMultistepScheduler\r\n from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler\r\n from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler\r\n from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler\r\n from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler\r\n from .scheduling_euler_discrete import EulerDiscreteScheduler\r\n from .scheduling_heun_discrete import HeunDiscreteScheduler\r\n from .scheduling_ipndm import IPNDMScheduler\r\n from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler\r\n from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler\r\n from .scheduling_karras_ve import KarrasVeScheduler\r\n from .scheduling_pndm import PNDMScheduler\r\n from .scheduling_repaint import RePaintScheduler\r\n from .scheduling_sde_ve import ScoreSdeVeScheduler\r\n from .scheduling_sde_vp import ScoreSdeVpScheduler\r\n from .scheduling_unclip import UnCLIPScheduler\r\n from .scheduling_unipc_multistep import UniPCMultistepScheduler\r\n from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin\r\n from .scheduling_vq_diffusion import VQDiffusionScheduler\r\n\r\ntry:\r\n if not is_flax_available():\r\n raise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n from ..utils.dummy_flax_objects import * # noqa F403\r\nelse:\r\n from .scheduling_ddim_flax import FlaxDDIMScheduler\r\n from .scheduling_ddpm_flax import FlaxDDPMScheduler\r\n from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler\r\n from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler\r\n from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler\r\n from .scheduling_pndm_flax import FlaxPNDMScheduler\r\n from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler\r\n from .scheduling_utils_flax import (\r\n FlaxKarrasDiffusionSchedulers,\r\n FlaxSchedulerMixin,\r\n FlaxSchedulerOutput,\r\n broadcast_to_shape_from_left,\r\n )\r\n\r\n\r\ntry:\r\n if not (is_torch_available() and is_scipy_available()):\r\n raise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n from ..utils.dummy_torch_and_scipy_objects import * # noqa F403\r\nelse:\r\n from .scheduling_lms_discrete import LMSDiscreteScheduler\r\n\r\ntry:\r\n if not (is_torch_available() and is_torchsde_available()):\r\n raise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403\r\nelse:\r\n from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":283,"string":"283"},"style_context":{"kind":"string","value":"\r\n\r\n\r\nfrom math import sqrt\r\n\r\nimport numpy as np\r\nfrom sympy import symbols\r\n\r\n# Coefficient\r\n# Speed of light (m/s)\r\n_snake_case = 2_99_79_24_58\r\n\r\n# Symbols\r\n_snake_case , _snake_case , _snake_case , _snake_case = symbols('''ct x y z''')\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\tlowercase_( SCREAMING_SNAKE_CASE_\t):\r\n\r\n\r\n '''simple docstring'''\r\n\r\n if velocity > c:\r\n raise ValueError(\"Speed must not exceed light speed 299,792,458 [m/s]!\"\t)\r\n elif velocity < 1:\r\n # Usually the speed should be much higher than 1 (c order of magnitude)\r\n raise ValueError(\"Speed must be greater than or equal to 1!\"\t)\r\n\r\n return velocity / c\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\tlowercase_( SCREAMING_SNAKE_CASE_\t):\r\n\r\n\r\n '''simple docstring'''\r\n\r\n return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE_\t) ** 2\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\tlowercase_( SCREAMING_SNAKE_CASE_\t):\r\n\r\n\r\n '''simple docstring'''\r\n\r\n return np.array(\r\n [\r\n [gamma(SCREAMING_SNAKE_CASE_\t), -gamma(SCREAMING_SNAKE_CASE_\t) * beta(SCREAMING_SNAKE_CASE_\t), 0, 0],\r\n [-gamma(SCREAMING_SNAKE_CASE_\t) * beta(SCREAMING_SNAKE_CASE_\t), gamma(SCREAMING_SNAKE_CASE_\t), 0, 0],\r\n [0, 0, 1, 0],\r\n [0, 0, 0, 1],\r\n ]\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\tlowercase_( SCREAMING_SNAKE_CASE_ ,\t\t\t\t\t\tSCREAMING_SNAKE_CASE_ = None\t):\r\n\r\n\r\n '''simple docstring'''\r\n\r\n if event is None:\r\n lowerCamelCase :\t\t\t\t\tTuple =\t\t\t\tnp.array([ct, x, y, z]\t) # Symbolic four vector\r\n else:\r\n event[0] *= c # x0 is ct (speed of light * time)\r\n\r\n return transformation_matrix(SCREAMING_SNAKE_CASE_\t) @ event\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import doctest\r\n\r\n doctest.testmod()\r\n\r\n # Example of symbolic vector:\r\n _snake_case = transform(29_97_92_45)\r\n print('''Example of four vector: ''')\r\n print(f'''ct\\' = {four_vector[0]}''')\r\n print(f'''x\\' = {four_vector[1]}''')\r\n print(f'''y\\' = {four_vector[2]}''')\r\n print(f'''z\\' = {four_vector[3]}''')\r\n\r\n # Substitute symbols with numerical values\r\n _snake_case = {ct: c, x: 1, y: 1, z: 1}\r\n _snake_case = [four_vector[i].subs(sub_dict) for i in range(4)]\r\n\r\n print(f'''\\n{numerical_vector}''')\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":283,"string":"283"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":877,"cells":{"code":{"kind":"string","value":"\r\r\r\r\"\"\"simple docstring\"\"\"\rfrom ...configuration_utils import PretrainedConfig\rfrom ...utils import logging\r\r\rA: int \t\t\t\t\t\t\t=\t\tlogging.get_logger(__name__)\r\rA: str \t\t\t\t\t\t\t=\t\t{\r \"sayakpaul/vit-msn-base\": \"https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json\",\r # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn\r}\r\r\r\rclass \t\t\t\tSCREAMING_SNAKE_CASE__ ( UpperCAmelCase__\t\t\t):\r __lowerCAmelCase : List[Any]\t\t\t\t\t\t=\t\t\t\t\t\t'vit_msn'\r\r def __init__( self\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=768\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=12\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=12\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=3072\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=\"gelu\"\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=0.0\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=0.0\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=0.02\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=1E-06\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=224\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=16\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=3\t\t\t\t,\t\t\t\t\t_SCREAMING_SNAKE_CASE=True\t\t\t\t,\t\t\t\t\t**_SCREAMING_SNAKE_CASE\t\t\t\t,\t\t\t\t\t) ->\t\t\t\tDict:\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r super().__init__(**_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\r\r UpperCAmelCase : Any = hidden_size\r UpperCAmelCase : Dict = num_hidden_layers\r UpperCAmelCase : int = num_attention_heads\r UpperCAmelCase : Union[str, Any] = intermediate_size\r UpperCAmelCase : Optional[Any] = hidden_act\r UpperCAmelCase : Union[str, Any] = hidden_dropout_prob\r UpperCAmelCase : List[str] = attention_probs_dropout_prob\r UpperCAmelCase : Optional[Any] = initializer_range\r UpperCAmelCase : List[Any] = layer_norm_eps\r UpperCAmelCase : List[str] = image_size\r UpperCAmelCase : Optional[int] = patch_size\r UpperCAmelCase : str = num_channels\r UpperCAmelCase : Union[str, Any] = qkv_bias\r"},"code_codestyle":{"kind":"number","value":76,"string":"76"},"style_context":{"kind":"string","value":"\r\r\r\r\"\"\"simple docstring\"\"\"\rimport math\rimport sys\r\rdef \t\t\t\t_snake_case\t\t\t\t\t\t(\t\t\t\t\t\tUpperCamelCase\t\t\t\t\t\t:\t\tstr\t\t\t\t\t\t):\r UpperCAmelCase : Dict = \"\"\"\"\"\"\r try:\r with open(UpperCamelCase ,\t\t\"\"\"rb\"\"\"\t\t\t\t\t\t) as binary_file:\r UpperCAmelCase : str = binary_file.read()\r for dat in data:\r UpperCAmelCase : List[Any] = F\"{dat:08b}\"\r result += curr_byte\r return result\r except OSError:\r print(\"\"\"File not accessible\"\"\"\t\t\t\t\t\t)\r sys.exit()\r\rdef \t\t\t\t_snake_case\t\t\t\t\t\t(\t\t\t\t\t\tUpperCamelCase\t\t\t\t\t\t:\t\tstr\t\t\t\t\t\t):\r UpperCAmelCase : Optional[int] = {\"\"\"0\"\"\": \"\"\"0\"\"\", \"\"\"1\"\"\": \"\"\"1\"\"\"}\r UpperCAmelCase ,\t\tUpperCAmelCase : Optional[int] = \"\"\"\"\"\", \"\"\"\"\"\"\r UpperCAmelCase : int = len(UpperCamelCase\t\t\t\t\t\t)\r\r for i in range(len(UpperCamelCase\t\t\t\t\t\t)\t\t\t\t\t\t):\r curr_string += data_bits[i]\r if curr_string not in lexicon:\r continue\r\r UpperCAmelCase : Any = lexicon[curr_string]\r result += last_match_id\r UpperCAmelCase : Any = last_match_id + \"\"\"0\"\"\"\r\r if math.loga(UpperCamelCase\t\t\t\t\t\t).is_integer():\r UpperCAmelCase : Optional[Any] = {}\r for curr_key in list(UpperCamelCase\t\t\t\t\t\t):\r UpperCAmelCase : Dict = lexicon.pop(UpperCamelCase\t\t\t\t\t\t)\r UpperCAmelCase : int = new_lex\r\r UpperCAmelCase : int = last_match_id + \"\"\"1\"\"\"\r index += 1\r UpperCAmelCase : List[str] = \"\"\"\"\"\"\r return result\r\rdef \t\t\t\t_snake_case\t\t\t\t\t\t(\t\t\t\t\t\tUpperCamelCase\t\t\t\t\t\t:\t\tstr ,\t\tUpperCamelCase\t\t\t\t\t\t:\t\tstr\t\t\t\t\t\t):\r UpperCAmelCase : Dict = 8\r try:\r with open(UpperCamelCase ,\t\t\"\"\"wb\"\"\"\t\t\t\t\t\t) as opened_file:\r UpperCAmelCase : Union[str, Any] = [\r to_write[i : i + byte_length]\r for i in range(0 ,\t\tlen(UpperCamelCase\t\t\t\t\t\t) ,\t\tUpperCamelCase\t\t\t\t\t\t)\r ]\r\r if len(result_byte_array[-1]\t\t\t\t\t\t) % byte_length == 0:\r result_byte_array.append(\"\"\"10000000\"\"\"\t\t\t\t\t\t)\r else:\r result_byte_array[-1] += \"1\" + \"0\" * (\r byte_length - len(result_byte_array[-1]\t\t\t\t\t\t) - 1\r )\r\r for elem in result_byte_array[:-1]:\r opened_file.write(int(UpperCamelCase ,\t\t2\t\t\t\t\t\t).to_bytes(1 ,\t\tbyteorder=\"\"\"big\"\"\"\t\t\t\t\t\t)\t\t\t\t\t\t)\r except OSError:\r print(\"\"\"File not accessible\"\"\"\t\t\t\t\t\t)\r sys.exit()\r\rdef \t\t\t\t_snake_case\t\t\t\t\t\t(\t\t\t\t\t\tUpperCamelCase\t\t\t\t\t\t:\t\tstr\t\t\t\t\t\t):\r UpperCAmelCase : Any = 0\r for letter in data_bits:\r if letter == \"1\":\r break\r counter += 1\r\r UpperCAmelCase : List[str] = data_bits[counter:]\r UpperCAmelCase : Tuple = data_bits[counter + 1 :]\r return data_bits\r\rdef \t\t\t\t_snake_case\t\t\t\t\t\t(\t\t\t\t\t\tUpperCamelCase\t\t\t\t\t\t:\t\tstr ,\t\tUpperCamelCase\t\t\t\t\t\t:\t\tstr\t\t\t\t\t\t):\r UpperCAmelCase : int = read_file_binary(UpperCamelCase\t\t\t\t\t\t)\r UpperCAmelCase : str = remove_prefix(UpperCamelCase\t\t\t\t\t\t)\r UpperCAmelCase : Any = decompress_data(UpperCamelCase\t\t\t\t\t\t)\r write_file_binary(UpperCamelCase ,\t\tUpperCamelCase\t\t\t\t\t\t)\r\r\rif __name__ == \"__main__\":\r compress(sys.argv[1], sys.argv[2])\r"},"style_context_codestyle":{"kind":"number","value":76,"string":"76"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":878,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\n\n\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\nclass \t\t\t\t\t\t\tlowerCAmelCase__\t\t\t\t: # Public class to implement a graph\n\n\n\n\n\n\n '''simple docstring'''\n\n def __init__(\t\t\t\t\t\tself : Tuple\t\t\t\t\t\t,\tlowercase_ : int\t\t\t\t\t\t,\tlowercase_ : int\t\t\t\t\t\t,\tlowercase_ : list[list[bool]]):\n\n\n\n\n '''simple docstring'''\n\n\n SCREAMING_SNAKE_CASE_ : Union[str, Any] \t\t\t\t\t\t=\t\trow\n SCREAMING_SNAKE_CASE_ : Tuple \t\t\t\t\t\t=\t\tcol\n SCREAMING_SNAKE_CASE_ : Union[str, Any] \t\t\t\t\t\t=\t\tgraph\n def _SCREAMING_SNAKE_CASE (\t\t\t\t\t\tself : Tuple\t\t\t\t\t\t,\tlowercase_ : int\t\t\t\t\t\t,\tlowercase_ : int\t\t\t\t\t\t,\tlowercase_ : list[list[bool]]):\n\n\n\n\n '''simple docstring'''\n\n\n return (\n 0 <= i < self.ROW\n and 0 <= j < self.COL\n and not visited[i][j]\n and self.graph[i][j]\n )\n def _SCREAMING_SNAKE_CASE (\t\t\t\t\t\tself : Tuple\t\t\t\t\t\t,\tlowercase_ : int\t\t\t\t\t\t,\tlowercase_ : int\t\t\t\t\t\t,\tlowercase_ : list[list[bool]]):\n\n\n\n\n '''simple docstring'''\n\n\n SCREAMING_SNAKE_CASE_ : Optional[Any] \t\t\t\t\t\t=\t\t[-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order\n SCREAMING_SNAKE_CASE_ : Tuple \t\t\t\t\t\t=\t\t[-1, 0, 1, -1, 1, -1, 0, 1]\n SCREAMING_SNAKE_CASE_ : Tuple \t\t\t\t\t\t=\t\tTrue # Make those cells visited\n for k in range(8):\n if self.is_safe(i + row_nbr[k]\t\t\t\t\t\t,\tj + col_nbr[k]\t\t\t\t\t\t,\tlowercase_):\n self.diffs(i + row_nbr[k]\t\t\t\t\t\t,\tj + col_nbr[k]\t\t\t\t\t\t,\tlowercase_)\n\n\n\n def _SCREAMING_SNAKE_CASE (\t\t\t\t\t\tself : int): # And finally, count all islands.\n\n\n\n\n '''simple docstring'''\n\n\n SCREAMING_SNAKE_CASE_ : str \t\t\t\t\t\t=\t\t[[False for j in range(self.COL)] for i in range(self.ROW)]\n SCREAMING_SNAKE_CASE_ : Dict \t\t\t\t\t\t=\t\t0\n for i in range(self.ROW):\n for j in range(self.COL):\n if visited[i][j] is False and self.graph[i][j] == 1:\n self.diffs(lowercase_\t\t\t\t\t\t,\tlowercase_\t\t\t\t\t\t,\tlowercase_)\n count += 1\n return count\n\n\n"},"code_codestyle":{"kind":"number","value":91,"string":"91"},"style_context":{"kind":"string","value":"\n\n\n\n\n\n\n\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\nfrom __future__ import annotations\n\n\n\n\nclass \t\t\t\t\t\t\tlowerCAmelCase__\t\t\t\t:\n\n\n\n\n\n\n '''simple docstring'''\n\n def __init__(\t\t\t\t\t\tself : Any\t\t\t\t\t\t,\tlowercase_ : int = 0):\n\n\n\n\n '''simple docstring'''\n\n\n SCREAMING_SNAKE_CASE_ : List[Any] \t\t\t\t\t\t=\t\tkey\n def _SCREAMING_SNAKE_CASE (\t\t\t\t\t\tself : Any\t\t\t\t\t\t,\tlowercase_ : str\t\t\t\t\t\t,\tlowercase_ : int):\n\n\n\n\n '''simple docstring'''\n\n\n assert isinstance(lowercase_\t\t\t\t\t\t,\tlowercase_) and isinstance(lowercase_\t\t\t\t\t\t,\tlowercase_)\n\n SCREAMING_SNAKE_CASE_ : Dict \t\t\t\t\t\t=\t\tkey or self.__key or 1\n\n # make sure key is an appropriate size\n key %= 255\n\n return [chr(ord(lowercase_) ^ key) for ch in content]\n def _SCREAMING_SNAKE_CASE (\t\t\t\t\t\tself : str\t\t\t\t\t\t,\tlowercase_ : str\t\t\t\t\t\t,\tlowercase_ : int):\n\n\n\n\n '''simple docstring'''\n\n\n assert isinstance(lowercase_\t\t\t\t\t\t,\tlowercase_) and isinstance(lowercase_\t\t\t\t\t\t,\tlowercase_)\n\n SCREAMING_SNAKE_CASE_ : Union[str, Any] \t\t\t\t\t\t=\t\tkey or self.__key or 1\n\n # make sure key is an appropriate size\n key %= 255\n\n return [chr(ord(lowercase_) ^ key) for ch in content]\n def _SCREAMING_SNAKE_CASE (\t\t\t\t\t\tself : Tuple\t\t\t\t\t\t,\tlowercase_ : str\t\t\t\t\t\t,\tlowercase_ : int = 0):\n\n\n\n\n '''simple docstring'''\n\n\n assert isinstance(lowercase_\t\t\t\t\t\t,\tlowercase_) and isinstance(lowercase_\t\t\t\t\t\t,\tlowercase_)\n\n SCREAMING_SNAKE_CASE_ : int \t\t\t\t\t\t=\t\tkey or self.__key or 1\n\n # make sure key can be any size\n while key > 255:\n key -= 255\n\n # This will be returned\n SCREAMING_SNAKE_CASE_ : List[str] \t\t\t\t\t\t=\t\t''''''\n\n for ch in content:\n ans += chr(ord(lowercase_) ^ key)\n\n return ans\n def _SCREAMING_SNAKE_CASE (\t\t\t\t\t\tself : int\t\t\t\t\t\t,\tlowercase_ : str\t\t\t\t\t\t,\tlowercase_ : int = 0):\n\n\n\n\n '''simple docstring'''\n\n\n assert isinstance(lowercase_\t\t\t\t\t\t,\tlowercase_) and isinstance(lowercase_\t\t\t\t\t\t,\tlowercase_)\n\n SCREAMING_SNAKE_CASE_ : Union[str, Any] \t\t\t\t\t\t=\t\tkey or self.__key or 1\n\n # make sure key can be any size\n while key > 255:\n key -= 255\n\n # This will be returned\n SCREAMING_SNAKE_CASE_ : List[Any] \t\t\t\t\t\t=\t\t''''''\n\n for ch in content:\n ans += chr(ord(lowercase_) ^ key)\n\n return ans\n def _SCREAMING_SNAKE_CASE (\t\t\t\t\t\tself : str\t\t\t\t\t\t,\tlowercase_ : str\t\t\t\t\t\t,\tlowercase_ : int = 0):\n\n\n\n\n '''simple docstring'''\n\n\n assert isinstance(lowercase_\t\t\t\t\t\t,\tlowercase_) and isinstance(lowercase_\t\t\t\t\t\t,\tlowercase_)\n\n try:\n with open(lowercase_) as fin, open('''encrypt.out'''\t\t\t\t\t\t,\t'''w+''') as fout:\n # actual encrypt-process\n for line in fin:\n fout.write(self.encrypt_string(lowercase_\t\t\t\t\t\t,\tlowercase_))\n\n except OSError:\n return False\n\n return True\n\n\n\n def _SCREAMING_SNAKE_CASE (\t\t\t\t\t\tself : Optional[int]\t\t\t\t\t\t,\tlowercase_ : str\t\t\t\t\t\t,\tlowercase_ : int):\n\n\n\n\n '''simple docstring'''\n\n\n assert isinstance(lowercase_\t\t\t\t\t\t,\tlowercase_) and isinstance(lowercase_\t\t\t\t\t\t,\tlowercase_)\n\n try:\n with open(lowercase_) as fin, open('''decrypt.out'''\t\t\t\t\t\t,\t'''w+''') as fout:\n # actual encrypt-process\n for line in fin:\n fout.write(self.decrypt_string(lowercase_\t\t\t\t\t\t,\tlowercase_))\n\n except OSError:\n return False\n\n return True\n\n\n# Tests\n# crypt = XORCipher()\n# key = 67\n\n# # test encrypt\n# print(crypt.encrypt(\"hallo welt\",key))\n# # test decrypt\n# print(crypt.decrypt(crypt.encrypt(\"hallo welt\",key), key))\n\n# # test encrypt_string\n# print(crypt.encrypt_string(\"hallo welt\",key))\n\n# # test decrypt_string\n# print(crypt.decrypt_string(crypt.encrypt_string(\"hallo welt\",key),key))\n\n# if (crypt.encrypt_file(\"test.txt\",key)):\n# print(\"encrypt successful\")\n# else:\n# print(\"encrypt unsuccessful\")\n\n# if (crypt.decrypt_file(\"encrypt.out\",key)):\n# print(\"decrypt successful\")\n# else:\n# print(\"decrypt unsuccessful\")\n\n\n"},"style_context_codestyle":{"kind":"number","value":91,"string":"91"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":879,"cells":{"code":{"kind":"string","value":"import numpy as np\r\nfrom PIL import Image\r\ndef \t\t\t\t\t\t\tlowerCAmelCase( __lowerCamelCase\t\t\t\t, __lowerCamelCase\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t):\r\n\t\t\t\t\t__a\t\t = np.array(__lowerCamelCase\t\t\t\t\t\t)\r\n\t\t\t\t\tif arr.shape[0] != arr.shape[1]:\r\n\t\t\t\t\t\t\t\t\t\traise ValueError('The input array is not a square matrix'\t\t\t\t\t\t)\r\n\t\t\t\t\t__a\t\t = 0\r\n\t\t\t\t\t__a\t\t = 0\r\n\t\t\t\t\t__a\t\t = 0\r\n\t\t\t\t\t__a\t\t = 0\r\n\r\n\t\t\t\t\t# compute the shape of the output matrix\r\n\t\t\t\t\t__a\t\t = (arr.shape[0] - size) // stride + 1\r\n\t\t\t\t\t# initialize the output matrix with zeros of shape maxpool_shape\r\n\t\t\t\t\t__a\t\t = np.zeros((maxpool_shape, maxpool_shape)\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\twhile i < arr.shape[0]:\r\n\t\t\t\t\t\t\t\t\t\tif i + size > arr.shape[0]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# if the end of the matrix is reached, break\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\t\t\t\t\twhile j < arr.shape[1]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# if the end of the matrix is reached, break\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif j + size > arr.shape[1]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# compute the maximum of the pooling matrix\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t = np.max(arr[i : i + size, j : j + size]\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# shift the pooling matrix by stride of column pixels\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tj += stride\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmat_j += 1\r\n\r\n\t\t\t\t\t\t\t\t\t\t# shift the pooling matrix by stride of row pixels\r\n\t\t\t\t\t\t\t\t\t\ti += stride\r\n\t\t\t\t\t\t\t\t\t\tmat_i += 1\r\n\r\n\t\t\t\t\t\t\t\t\t\t# reset the column index to 0\r\n\t\t\t\t\t\t\t\t\t\t__a\t\t = 0\r\n\t\t\t\t\t\t\t\t\t\t__a\t\t = 0\r\n\r\n\t\t\t\t\treturn updated_arr\r\ndef \t\t\t\t\t\t\tlowerCAmelCase( __lowerCamelCase\t\t\t\t, __lowerCamelCase\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t):\r\n\t\t\t\t\t__a\t\t = np.array(__lowerCamelCase\t\t\t\t\t\t)\r\n\t\t\t\t\tif arr.shape[0] != arr.shape[1]:\r\n\t\t\t\t\t\t\t\t\t\traise ValueError('The input array is not a square matrix'\t\t\t\t\t\t)\r\n\t\t\t\t\t__a\t\t = 0\r\n\t\t\t\t\t__a\t\t = 0\r\n\t\t\t\t\t__a\t\t = 0\r\n\t\t\t\t\t__a\t\t = 0\r\n\r\n\t\t\t\t\t# compute the shape of the output matrix\r\n\t\t\t\t\t__a\t\t = (arr.shape[0] - size) // stride + 1\r\n\t\t\t\t\t# initialize the output matrix with zeros of shape avgpool_shape\r\n\t\t\t\t\t__a\t\t = np.zeros((avgpool_shape, avgpool_shape)\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\twhile i < arr.shape[0]:\r\n\t\t\t\t\t\t\t\t\t\t# if the end of the matrix is reached, break\r\n\t\t\t\t\t\t\t\t\t\tif i + size > arr.shape[0]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\t\t\t\t\twhile j < arr.shape[1]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# if the end of the matrix is reached, break\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif j + size > arr.shape[1]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# compute the average of the pooling matrix\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__a\t\t = int(np.average(arr[i : i + size, j : j + size]\t\t\t\t\t\t)\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# shift the pooling matrix by stride of column pixels\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tj += stride\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmat_j += 1\r\n\r\n\t\t\t\t\t\t\t\t\t\t# shift the pooling matrix by stride of row pixels\r\n\t\t\t\t\t\t\t\t\t\ti += stride\r\n\t\t\t\t\t\t\t\t\t\tmat_i += 1\r\n\t\t\t\t\t\t\t\t\t\t# reset the column index to 0\r\n\t\t\t\t\t\t\t\t\t\t__a\t\t = 0\r\n\t\t\t\t\t\t\t\t\t\t__a\t\t = 0\r\n\r\n\t\t\t\t\treturn updated_arr\r\n\r\n\r\n# Main Function\r\nif __name__ == \"__main__\":\r\n\tfrom doctest import testmod\r\n\r\n\ttestmod(name=\"\"\"avgpooling\"\"\", verbose=True)\r\n\r\n\t# Loading the image\r\n\tlowerCamelCase_ :\t\tAny\t\t\t\t\t\t\t=\tImage.open(\"\"\"path_to_image\"\"\")\r\n\r\n\t# Converting the image to numpy array and maxpooling, displaying the result\r\n\t# Ensure that the image is a square matrix\r\n\r\n\tImage.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()\r\n\r\n\t# Converting the image to numpy array and averagepooling, displaying the result\r\n\t# Ensure that the image is a square matrix\r\n\r\n\tImage.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":197,"string":"197"},"style_context":{"kind":"string","value":"from typing import TYPE_CHECKING\r\n\r\nfrom ...utils import (\r\n OptionalDependencyNotAvailable,\r\n _LazyModule,\r\n is_sentencepiece_available,\r\n is_tf_available,\r\n is_tokenizers_available,\r\n is_torch_available,\r\n)\r\n\r\n\r\nlowerCamelCase_ :\t\tOptional[int]\t\t\t\t\t\t\t=\t{\"\"\"configuration_xlnet\"\"\": [\"\"\"XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP\"\"\", \"\"\"XLNetConfig\"\"\"]}\r\n\r\ntry:\r\n\tif not is_sentencepiece_available():\r\n\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\tpass\r\nelse:\r\n\tlowerCamelCase_ :\t\tstr\t\t\t\t\t\t\t=\t[\"\"\"XLNetTokenizer\"\"\"]\r\n\r\ntry:\r\n\tif not is_tokenizers_available():\r\n\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\tpass\r\nelse:\r\n\tlowerCamelCase_ :\t\tOptional[Any]\t\t\t\t\t\t\t=\t[\"\"\"XLNetTokenizerFast\"\"\"]\r\n\r\ntry:\r\n\tif not is_torch_available():\r\n\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\tpass\r\nelse:\r\n\tlowerCamelCase_ :\t\tUnion[str, Any]\t\t\t\t\t\t\t=\t[\r\n\t \"\"\"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST\"\"\",\r\n\t \"\"\"XLNetForMultipleChoice\"\"\",\r\n\t \"\"\"XLNetForQuestionAnswering\"\"\",\r\n\t \"\"\"XLNetForQuestionAnsweringSimple\"\"\",\r\n\t \"\"\"XLNetForSequenceClassification\"\"\",\r\n\t \"\"\"XLNetForTokenClassification\"\"\",\r\n\t \"\"\"XLNetLMHeadModel\"\"\",\r\n\t \"\"\"XLNetModel\"\"\",\r\n\t \"\"\"XLNetPreTrainedModel\"\"\",\r\n\t \"\"\"load_tf_weights_in_xlnet\"\"\",\r\n\t]\r\n\r\ntry:\r\n\tif not is_tf_available():\r\n\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\tpass\r\nelse:\r\n\tlowerCamelCase_ :\t\tOptional[int]\t\t\t\t\t\t\t=\t[\r\n\t \"\"\"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST\"\"\",\r\n\t \"\"\"TFXLNetForMultipleChoice\"\"\",\r\n\t \"\"\"TFXLNetForQuestionAnsweringSimple\"\"\",\r\n\t \"\"\"TFXLNetForSequenceClassification\"\"\",\r\n\t \"\"\"TFXLNetForTokenClassification\"\"\",\r\n\t \"\"\"TFXLNetLMHeadModel\"\"\",\r\n\t \"\"\"TFXLNetMainLayer\"\"\",\r\n\t \"\"\"TFXLNetModel\"\"\",\r\n\t \"\"\"TFXLNetPreTrainedModel\"\"\",\r\n\t]\r\n\r\n\r\nif TYPE_CHECKING:\r\n\tfrom .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig\r\n\r\n\ttry:\r\n\t\tif not is_sentencepiece_available():\r\n\t\t\traise OptionalDependencyNotAvailable()\r\n\texcept OptionalDependencyNotAvailable:\r\n\t\tpass\r\n\telse:\r\n\t\tfrom .tokenization_xlnet import XLNetTokenizer\r\n\r\n\ttry:\r\n\t\tif not is_tokenizers_available():\r\n\t\t\traise OptionalDependencyNotAvailable()\r\n\texcept OptionalDependencyNotAvailable:\r\n\t\tpass\r\n\telse:\r\n\t\tfrom .tokenization_xlnet_fast import XLNetTokenizerFast\r\n\r\n\ttry:\r\n\t\tif not is_torch_available():\r\n\t\t\traise OptionalDependencyNotAvailable()\r\n\texcept OptionalDependencyNotAvailable:\r\n\t\tpass\r\n\telse:\r\n\t\tfrom .modeling_xlnet import (\r\n\t\t XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t XLNetForMultipleChoice,\r\n\t\t XLNetForQuestionAnswering,\r\n\t\t XLNetForQuestionAnsweringSimple,\r\n\t\t XLNetForSequenceClassification,\r\n\t\t XLNetForTokenClassification,\r\n\t\t XLNetLMHeadModel,\r\n\t\t XLNetModel,\r\n\t\t XLNetPreTrainedModel,\r\n\t\t load_tf_weights_in_xlnet,\r\n\t\t)\r\n\r\n\ttry:\r\n\t\tif not is_tf_available():\r\n\t\t\traise OptionalDependencyNotAvailable()\r\n\texcept OptionalDependencyNotAvailable:\r\n\t\tpass\r\n\telse:\r\n\t\tfrom .modeling_tf_xlnet import (\r\n\t\t TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t TFXLNetForMultipleChoice,\r\n\t\t TFXLNetForQuestionAnsweringSimple,\r\n\t\t TFXLNetForSequenceClassification,\r\n\t\t TFXLNetForTokenClassification,\r\n\t\t TFXLNetLMHeadModel,\r\n\t\t TFXLNetMainLayer,\r\n\t\t TFXLNetModel,\r\n\t\t TFXLNetPreTrainedModel,\r\n\t\t)\r\n\r\nelse:\r\n\timport sys\r\n\r\n\tlowerCamelCase_ :\t\tTuple\t\t\t\t\t\t\t=\t_LazyModule(__name__, globals()[\"\"\"__file__\"\"\"], _import_structure, module_spec=__spec__)\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":197,"string":"197"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":880,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\n\nlowercase \t\t\t\t\t\t\t=\t\t\t\t\t{\"a\": [\"c\", \"b\"], \"b\": [\"d\", \"e\"], \"c\": [], \"d\": [], \"e\": []}\nlowercase \t\t\t\t\t\t\t=\t\t\t\t\t[\"a\", \"b\", \"c\", \"d\", \"e\"]\n\n\n\n\n\ndef \t\t\t\t\t\t__UpperCAmelCase\t\t\t( a_\t\t\t\t\t\t\t, a_\t\t\t\t\t\t\t, a_):\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tstart\n\t\t\t\t\t\t\t# add current to visited\n\t\t\t\t\t\t\tvisited.append(a_)\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tedges[current]\n\t\t\t\t\t\t\tfor neighbor in neighbors:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# if neighbor not in visited, visit\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif neighbor not in visited:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\ttopological_sort(a_\t\t\t\t\t\t\t, a_\t\t\t\t\t\t\t, a_)\n # if all neighbors visited add current to sort\n\t\t\t\t\t\t\tsort.append(a_)\n\t\t\t\t\t\t\t# if all vertices haven't been visited select a new one to visit\n\t\t\t\t\t\t\tif len(a_) != len(a_):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor vertice in vertices:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif vertice not in visited:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\ttopological_sort(a_\t\t\t\t\t\t\t, a_\t\t\t\t\t\t\t, a_)\n # return sort\n\t\t\t\t\t\t\treturn sort\n\n\nif __name__ == \"__main__\":\n\tlowercase \t\t\t\t\t\t\t=\t\t\t\t\ttopological_sort(\"a\", [], [])\n\tprint(sort)\n\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":178,"string":"178"},"style_context":{"kind":"string","value":"\n\n\n\n\n\n\nimport unittest\n\nfrom transformers import GPTNeoXJapaneseConfig, is_torch_available\nfrom transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer\nfrom transformers.testing_utils import require_torch, slow, torch_device\n\nfrom ...test_configuration_common import ConfigTester\nfrom ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask\nfrom ...test_pipeline_mixin import PipelineTesterMixin\n\n\nif is_torch_available():\n\timport torch\n\n\tfrom transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel\n\n\n\n\nclass UpperCamelCase_ :\n\n\n\n\t'''simple docstring'''\n\n\n\n\n\tdef __init__( self ,\t\t\ta ,\t\t\ta=13 ,\t\t\ta=7 ,\t\t\ta=True ,\t\t\ta=True ,\t\t\ta=True ,\t\t\ta=True ,\t\t\ta=99 ,\t\t\ta=32 ,\t\t\ta=5 ,\t\t\ta=4 ,\t\t\ta=4 ,\t\t\ta=\"gelu\" ,\t\t\ta=0.0 ,\t\t\ta=0.1 ,\t\t\ta=True ,\t\t\ta=5_12 ,\t\t\ta=16 ,\t\t\ta=2 ,\t\t\ta=0.02 ,\t\t\ta=3 ,\t\t\ta=4 ,\t\t\ta=None ,\t\t\t) -> Optional[Any]:\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tparent\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tbatch_size\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tseq_length\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tis_training\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tuse_input_mask\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tuse_token_type_ids\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tuse_labels\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tvocab_size\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\thidden_size\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tnum_hidden_layers\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tnum_attention_heads\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tintermediate_multiple_size\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\thidden_act\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\thidden_dropout\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tattention_dropout\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tweight_tying\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tmax_position_embeddings\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\ttype_vocab_size\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\ttype_sequence_label_size\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tinitializer_range\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tnum_labels\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tnum_choices\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tscope\n\n\n\n\n\tdef \t\t\t\t\t\t\t_UpperCamelCase\t\t\t( self\t\t\t\t\t) -> List[Any]:\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tids_tensor([self.batch_size, self.seq_length] ,\t\t\tself.vocab_size\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tNone\n\t\t\t\t\t\t\t\tif self.use_input_mask:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\trandom_attention_mask([self.batch_size, self.seq_length]\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tNone\n\t\t\t\t\t\t\t\tif self.use_labels:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tids_tensor([self.batch_size, self.seq_length] ,\t\t\tself.num_labels\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tself.get_config()\n\n\t\t\t\t\t\t\t\treturn config, input_ids, input_mask, token_labels\n\n\n\n\n\tdef \t\t\t\t\t\t\t_UpperCamelCase\t\t\t( self\t\t\t\t\t) -> Dict:\n\t\t\t\t\t\t\t\treturn GPTNeoXJapaneseConfig(\n\t\t\t\t\t\t\t\t vocab_size=self.vocab_size ,\t\t\thidden_size=self.hidden_size ,\t\t\tnum_hidden_layers=self.num_hidden_layers ,\t\t\tnum_attention_heads=self.num_attention_heads ,\t\t\tintermediate_multiple_size=self.intermediate_multiple_size ,\t\t\thidden_act=self.hidden_act ,\t\t\thidden_dropout=self.hidden_dropout ,\t\t\tattention_dropout=self.attention_dropout ,\t\t\tweight_tying=self.weight_tying ,\t\t\tmax_position_embeddings=self.max_position_embeddings ,\t\t\ttype_vocab_size=self.type_vocab_size ,\t\t\tis_decoder=a ,\t\t\tinitializer_range=self.initializer_range ,\t\t\t)\n\n\n\n\n\tdef \t\t\t\t\t\t\t_UpperCamelCase\t\t\t( self\t\t\t\t\t) -> int:\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t,\t\tsnake_case_\t\t\t,\t\tsnake_case_\t\t\t,\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tself.prepare_config_and_inputs()\n\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tTrue\n\n\t\t\t\t\t\t\t\treturn config, input_ids, input_mask, token_labels\n\n\n\n\n\tdef \t\t\t\t\t\t\t_UpperCamelCase\t\t\t( self ,\t\t\ta ,\t\t\ta ,\t\t\ta\t\t\t\t\t) -> Any:\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tGPTNeoXJapaneseModel(config=a\t\t\t\t\t)\n\t\t\t\t\t\t\t\tmodel.to(a\t\t\t\t\t)\n\t\t\t\t\t\t\t\tmodel.eval()\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tmodel(a ,\t\t\tattention_mask=a\t\t\t\t\t)\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tmodel(a\t\t\t\t\t)\n\t\t\t\t\t\t\t\tself.parent.assertEqual(result.last_hidden_state.shape ,\t\t\t(self.batch_size, self.seq_length, self.hidden_size)\t\t\t\t\t)\n\n\n\n\n\tdef \t\t\t\t\t\t\t_UpperCamelCase\t\t\t( self ,\t\t\ta ,\t\t\ta ,\t\t\ta\t\t\t\t\t) -> Union[str, Any]:\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tTrue\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tGPTNeoXJapaneseModel(a\t\t\t\t\t)\n\t\t\t\t\t\t\t\tmodel.to(a\t\t\t\t\t)\n\t\t\t\t\t\t\t\tmodel.eval()\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tmodel(a ,\t\t\tattention_mask=a\t\t\t\t\t)\n\t\t\t\t\t\t\t\tself.parent.assertEqual(result.last_hidden_state.shape ,\t\t\t(self.batch_size, self.seq_length, self.hidden_size)\t\t\t\t\t)\n\n\n\n\n\tdef \t\t\t\t\t\t\t_UpperCamelCase\t\t\t( self ,\t\t\ta ,\t\t\ta ,\t\t\ta ,\t\t\ta\t\t\t\t\t) -> int:\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tGPTNeoXJapaneseForCausalLM(config=a\t\t\t\t\t)\n\t\t\t\t\t\t\t\tmodel.to(a\t\t\t\t\t)\n\t\t\t\t\t\t\t\tmodel.eval()\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tmodel(a ,\t\t\tattention_mask=a ,\t\t\tlabels=a\t\t\t\t\t)\n\t\t\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape ,\t\t\t(self.batch_size, self.seq_length, self.vocab_size)\t\t\t\t\t)\n\n\n\n\n\tdef \t\t\t\t\t\t\t_UpperCamelCase\t\t\t( self ,\t\t\ta ,\t\t\ta ,\t\t\ta\t\t\t\t\t) -> Tuple:\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tTrue\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tGPTNeoXJapaneseForCausalLM(config=a\t\t\t\t\t)\n\t\t\t\t\t\t\t\tmodel.to(a\t\t\t\t\t)\n\t\t\t\t\t\t\t\tmodel.eval()\n\n\t\t\t\t\t\t\t\t# first forward pass\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tmodel(a ,\t\t\tattention_mask=a ,\t\t\tuse_cache=a\t\t\t\t\t)\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\toutputs.past_key_values\n\n\t\t\t\t\t\t\t\t# create hypothetical multiple next token and extent to next_input_ids\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tids_tensor((self.batch_size, 3) ,\t\t\tconfig.vocab_size\t\t\t\t\t)\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tids_tensor((self.batch_size, 3) ,\t\t\tvocab_size=2\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t# append to next input_ids and\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\ttorch.cat([input_ids, next_tokens] ,\t\t\tdim=-1\t\t\t\t\t)\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\ttorch.cat([input_mask, next_mask] ,\t\t\tdim=-1\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tmodel(a ,\t\t\tattention_mask=a ,\t\t\toutput_hidden_states=a\t\t\t\t\t)\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\toutput_from_no_past['hidden_states'][0]\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tmodel(\n\t\t\t\t\t\t\t\t a ,\t\t\tattention_mask=a ,\t\t\tpast_key_values=a ,\t\t\toutput_hidden_states=a ,\t\t\t)['hidden_states'][0]\n\n\t\t\t\t\t\t\t\t# select random slice\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tids_tensor((1,) ,\t\t\toutput_from_past.shape[-1]\t\t\t\t\t).item()\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\toutput_from_no_past[:, -3:, random_slice_idx].detach()\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\toutput_from_past[:, :, random_slice_idx].detach()\n\n\t\t\t\t\t\t\t\tself.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t# test that outputs are equal for slice\n\t\t\t\t\t\t\t\tself.parent.assertTrue(torch.allclose(a ,\t\t\ta ,\t\t\tatol=1E-3\t\t\t\t\t)\t\t\t\t\t)\n\n\n\n\n\n\n\tdef \t\t\t\t\t\t\t_UpperCamelCase\t\t\t( self\t\t\t\t\t) -> Dict:\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tself.prepare_config_and_inputs()\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t,\t\tsnake_case_\t\t\t,\t\tsnake_case_\t\t\t,\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tconfig_and_inputs\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\t{'input_ids': input_ids, 'attention_mask': input_mask}\n\t\t\t\t\t\t\t\treturn config, inputs_dict\n\n\n\n\n\n\n@require_torch\nclass UpperCamelCase_ (\t\t\tsnake_case_ ,\t\t\t\t\tsnake_case_ ,\t\t\t\t\tunittest.TestCase\t\t\t\t):\n\n\n\n\t'''simple docstring'''\n\n\n\n\n\tlowerCAmelCase\t\t\t\t\t\t\t\t\t\t\t\t\t= (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()\n\tlowerCAmelCase\t\t\t\t\t\t\t\t\t\t\t\t\t= (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()\n\tlowerCAmelCase\t\t\t\t\t\t\t\t\t\t\t\t\t= (\n\t {'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}\n\t if is_torch_available()\n\t else {}\n\t)\n\tlowerCAmelCase\t\t\t\t\t\t\t\t\t\t\t\t\t= False\n\tlowerCAmelCase\t\t\t\t\t\t\t\t\t\t\t\t\t= False\n\tlowerCAmelCase\t\t\t\t\t\t\t\t\t\t\t\t\t= False\n\tlowerCAmelCase\t\t\t\t\t\t\t\t\t\t\t\t\t= False\n\n\n\n\n\tdef \t\t\t\t\t\t\t_UpperCamelCase\t\t\t( self\t\t\t\t\t) -> List[Any]:\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tGPTNeoXJapaneseModelTester(self\t\t\t\t\t)\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tConfigTester(self ,\t\t\tconfig_class=a ,\t\t\thidden_size=37\t\t\t\t\t)\n\n\n\n\n\tdef \t\t\t\t\t\t\t_UpperCamelCase\t\t\t( self\t\t\t\t\t) -> str:\n\t\t\t\t\t\t\t\tself.config_tester.run_common_tests()\n\n\n\n\n\tdef \t\t\t\t\t\t\t_UpperCamelCase\t\t\t( self\t\t\t\t\t) -> Optional[Any]:\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t,\t\tsnake_case_\t\t\t,\t\tsnake_case_\t\t\t,\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tself.model_tester.prepare_config_and_inputs()\n\t\t\t\t\t\t\t\tself.model_tester.create_and_check_model(a ,\t\t\ta ,\t\t\ta\t\t\t\t\t)\n\n\n\n\n\tdef \t\t\t\t\t\t\t_UpperCamelCase\t\t\t( self\t\t\t\t\t) -> Union[str, Any]:\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t,\t\tsnake_case_\t\t\t,\t\tsnake_case_\t\t\t,\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tself.model_tester.prepare_config_and_inputs_for_decoder()\n\t\t\t\t\t\t\t\tself.model_tester.create_and_check_model_as_decoder(a ,\t\t\ta ,\t\t\ta\t\t\t\t\t)\n\n\n\n\n\tdef \t\t\t\t\t\t\t_UpperCamelCase\t\t\t( self\t\t\t\t\t) -> Optional[int]:\n\t\t\t\t\t\t\t\t# This regression test was failing with PyTorch < 1.3\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t,\t\tsnake_case_\t\t\t,\t\tsnake_case_\t\t\t,\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tself.model_tester.prepare_config_and_inputs_for_decoder()\n\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tNone\n\n\t\t\t\t\t\t\t\tself.model_tester.create_and_check_model_as_decoder(a ,\t\t\ta ,\t\t\ta\t\t\t\t\t)\n\n\n\n\n\tdef \t\t\t\t\t\t\t_UpperCamelCase\t\t\t( self\t\t\t\t\t) -> Dict:\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t,\t\tsnake_case_\t\t\t,\t\tsnake_case_\t\t\t,\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tself.model_tester.prepare_config_and_inputs()\n\t\t\t\t\t\t\t\tself.model_tester.create_and_check_decoder_model_past_large_inputs(a ,\t\t\ta ,\t\t\ta\t\t\t\t\t)\n\n\n\n\n\tdef \t\t\t\t\t\t\t_UpperCamelCase\t\t\t( self\t\t\t\t\t) -> List[Any]:\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tself.model_tester.prepare_config_and_inputs()\n\t\t\t\t\t\t\t\tself.model_tester.create_and_check_for_causal_lm(*a\t\t\t\t\t)\n\n\n\n\n\n\n\t@slow\n\tdef \t\t\t\t\t\t\t_UpperCamelCase\t\t\t( self\t\t\t\t\t) -> Any:\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\t'abeja/gpt-neox-japanese-2.7b'\n\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\t['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']\n\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t 'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',\n\t\t\t\t\t\t\t\t '100年後に必要とされる会社は、「人」が中心の会社です。',\n\t\t\t\t\t\t\t\t 'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',\n\t\t\t\t\t\t\t\t '国境の長いトンネルを抜けると、そこは雪国だった。',\n\t\t\t\t\t\t\t\t '美味しい日本食といえば、やっぱりお寿司ですよね。',\n\t\t\t\t\t\t\t\t]\n\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tGPTNeoXJapaneseTokenizer.from_pretrained(a\t\t\t\t\t)\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tGPTNeoXJapaneseForCausalLM.from_pretrained(a\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\t[]\n\t\t\t\t\t\t\t\tfor prompt in prompts:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\ttokenizer(a ,\t\t\treturn_tensors='pt'\t\t\t\t\t).input_ids\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\tmodel.generate(a ,\t\t\tmax_length=50\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t=\t\t\t\t\t\t\ttokenizer.batch_decode(a ,\t\t\tskip_special_tokens=a\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpredicted_outputs += generated_string\n\t\t\t\t\t\t\t\tself.assertListEqual(a ,\t\t\ta\t\t\t\t\t)\n\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":178,"string":"178"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":881,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\nimport json\r\nimport os\r\nimport tempfile\r\nimport unittest\r\n\r\nimport numpy as np\r\nfrom datasets import load_dataset\r\n\r\nfrom transformers.testing_utils import require_torch, require_vision, slow\r\nfrom transformers.utils import is_torch_available, is_vision_available\r\n\r\nfrom ...test_image_processing_common import ImageProcessingSavingTestMixin\r\n\r\n\r\nif is_torch_available():\r\n\t\timport torch\r\n\r\nif is_vision_available():\r\n\t\tfrom PIL import Image\r\n\r\n\t\tfrom transformers import ImageGPTImageProcessor\r\n\r\n\r\n\r\n\r\n\r\nclass _a\t\t\t\t\t\t\t( unittest.TestCase):\r\n\r\n\r\n\r\n\r\n\tdef __init__( self\t\t\t\t\t\t:\t\tUnion[str, Any]\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t\t\t:\t\tList[str]\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t\t\t:\t\tList[str]=7\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t\t\t:\t\tTuple=3\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t\t\t:\t\tAny=18\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t\t\t:\t\tstr=30\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t\t\t:\t\tList[str]=400\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t\t\t:\t\tList[Any]=True\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t\t\t:\t\tUnion[str, Any]=None\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t\t\t:\t\tList[str]=True\t\t, )->\tAny:\r\n\t\t\tlowerCAmelCase__ : int \t\t\t\t\t= size if size is not None else {'height': 18, 'width': 18}\r\n\t\t\tlowerCAmelCase__ : List[str] \t\t\t\t\t= parent\r\n\t\t\tlowerCAmelCase__ : Optional[Any] \t\t\t\t\t= batch_size\r\n\t\t\tlowerCAmelCase__ : Optional[int] \t\t\t\t\t= num_channels\r\n\t\t\tlowerCAmelCase__ : Dict \t\t\t\t\t= image_size\r\n\t\t\tlowerCAmelCase__ : int \t\t\t\t\t= min_resolution\r\n\t\t\tlowerCAmelCase__ : Tuple \t\t\t\t\t= max_resolution\r\n\t\t\tlowerCAmelCase__ : List[str] \t\t\t\t\t= do_resize\r\n\t\t\tlowerCAmelCase__ : Dict \t\t\t\t\t= size\r\n\t\t\tlowerCAmelCase__ : Dict \t\t\t\t\t= do_normalize\r\n\r\n\r\n\r\n\r\n\tdef \t\t\t\t\t\t\tUpperCAmelCase__( self\t\t\t\t\t\t:\t\tList[str]\t\t)->\tList[Any]:\r\n\t\t\treturn {\r\n\t\t\t # here we create 2 clusters for the sake of simplicity\r\n\t\t\t \"clusters\": np.asarray(\r\n\t\t\t [\r\n\t\t\t [0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],\r\n\t\t\t [-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],\r\n\t\t\t ]\t\t),\r\n\t\t\t \"do_resize\": self.do_resize,\r\n\t\t\t \"size\": self.size,\r\n\t\t\t \"do_normalize\": self.do_normalize,\r\n\t\t\t}\r\n\r\n\r\n\r\n\r\n\r\n@require_torch\r\n@require_vision\r\nclass _a\t\t\t\t\t\t\t( _lowercase\t\t\t\t\t,\t\t\t\t\t\tunittest.TestCase):\r\n\t_a : Optional[int]\t\t\t\t =\t\t\t\t\t\t\tImageGPTImageProcessor if is_vision_available() else None\r\n\r\n\r\n\r\n\r\n\tdef \t\t\t\t\t\t\tUpperCAmelCase__( self\t\t\t\t\t\t:\t\tDict\t\t)->\tOptional[Any]:\r\n\t\t\tlowerCAmelCase__ : Dict \t\t\t\t\t= ImageGPTImageProcessingTester(self\t\t)\r\n\r\n\r\n\r\n\r\n\t@property\r\n\tdef \t\t\t\t\t\t\tUpperCAmelCase__( self\t\t\t\t\t\t:\t\tstr\t\t)->\tTuple:\r\n\t\t\treturn self.image_processor_tester.prepare_image_processor_dict()\r\n\r\n\r\n\r\n\r\n\tdef \t\t\t\t\t\t\tUpperCAmelCase__( self\t\t\t\t\t\t:\t\tAny\t\t)->\tOptional[int]:\r\n\t\t\tlowerCAmelCase__ : Optional[Any] \t\t\t\t\t= self.image_processing_class(**self.image_processor_dict\t\t)\r\n\t\t\tself.assertTrue(hasattr(_A\t\t, '''clusters'''\t\t)\t\t)\r\n\t\t\tself.assertTrue(hasattr(_A\t\t, '''do_resize'''\t\t)\t\t)\r\n\t\t\tself.assertTrue(hasattr(_A\t\t, '''size'''\t\t)\t\t)\r\n\t\t\tself.assertTrue(hasattr(_A\t\t, '''do_normalize'''\t\t)\t\t)\r\n\r\n\r\n\r\n\r\n\tdef \t\t\t\t\t\t\tUpperCAmelCase__( self\t\t\t\t\t\t:\t\tList[Any]\t\t)->\tAny:\r\n\t\t\tlowerCAmelCase__ : Optional[int] \t\t\t\t\t= self.image_processing_class.from_dict(self.image_processor_dict\t\t)\r\n\t\t\tself.assertEqual(image_processor.size\t\t, {'''height''': 18, '''width''': 18}\t\t)\r\n\r\n\t\t\tlowerCAmelCase__ : Optional[int] \t\t\t\t\t= self.image_processing_class.from_dict(self.image_processor_dict\t\t, size=42\t\t)\r\n\t\t\tself.assertEqual(image_processor.size\t\t, {'''height''': 42, '''width''': 42}\t\t)\r\n\r\n\r\n\r\n\r\n\tdef \t\t\t\t\t\t\tUpperCAmelCase__( self\t\t\t\t\t\t:\t\tOptional[Any]\t\t)->\tint:\r\n\t\t\tlowerCAmelCase__ : int \t\t\t\t\t= self.image_processing_class(**self.image_processor_dict\t\t)\r\n\t\t\tlowerCAmelCase__ : Optional[int] \t\t\t\t\t= json.loads(image_processor.to_json_string()\t\t)\r\n\t\t\tfor key, value in self.image_processor_dict.items():\r\n\t\t\t\t\tif key == \"clusters\":\r\n\t\t\t\t\t\t\tself.assertTrue(np.array_equal(_A\t\t, obj[key]\t\t)\t\t)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tself.assertEqual(obj[key]\t\t, _A\t\t)\r\n\r\n\r\n\r\n\r\n\tdef \t\t\t\t\t\t\tUpperCAmelCase__( self\t\t\t\t\t\t:\t\tAny\t\t)->\tOptional[int]:\r\n\t\t\tlowerCAmelCase__ : Dict \t\t\t\t\t= self.image_processing_class(**self.image_processor_dict\t\t)\r\n\r\n\t\t\twith tempfile.TemporaryDirectory() as tmpdirname:\r\n\t\t\t\t\tlowerCAmelCase__ : List[Any] \t\t\t\t\t= os.path.join(_A\t\t, '''image_processor.json'''\t\t)\r\n\t\t\t\t\timage_processor_first.to_json_file(_A\t\t)\r\n\t\t\t\t\tlowerCAmelCase__ : Optional[Any] \t\t\t\t\t= self.image_processing_class.from_json_file(_A\t\t).to_dict()\r\n\r\n\t\t\tlowerCAmelCase__ : Optional[int] \t\t\t\t\t= image_processor_first.to_dict()\r\n\t\t\tfor key, value in image_processor_first.items():\r\n\t\t\t\t\tif key == \"clusters\":\r\n\t\t\t\t\t\t\tself.assertTrue(np.array_equal(_A\t\t, image_processor_second[key]\t\t)\t\t)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tself.assertEqual(image_processor_first[key]\t\t, _A\t\t)\r\n\r\n\r\n\r\n\r\n\tdef \t\t\t\t\t\t\tUpperCAmelCase__( self\t\t\t\t\t\t:\t\tint\t\t)->\tTuple:\r\n\t\t\tlowerCAmelCase__ : Union[str, Any] \t\t\t\t\t= self.image_processing_class(**self.image_processor_dict\t\t)\r\n\r\n\t\t\twith tempfile.TemporaryDirectory() as tmpdirname:\r\n\t\t\t\t\timage_processor_first.save_pretrained(_A\t\t)\r\n\t\t\t\t\tlowerCAmelCase__ : Optional[int] \t\t\t\t\t= self.image_processing_class.from_pretrained(_A\t\t).to_dict()\r\n\r\n\t\t\tlowerCAmelCase__ : Any \t\t\t\t\t= image_processor_first.to_dict()\r\n\t\t\tfor key, value in image_processor_first.items():\r\n\t\t\t\t\tif key == \"clusters\":\r\n\t\t\t\t\t\t\tself.assertTrue(np.array_equal(_A\t\t, image_processor_second[key]\t\t)\t\t)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tself.assertEqual(image_processor_first[key]\t\t, _A\t\t)\r\n\r\n\r\n\r\n\r\n\t@unittest.skip('''ImageGPT requires clusters at initialization'''\t\t)\r\n\tdef \t\t\t\t\t\t\tUpperCAmelCase__( self\t\t\t\t\t\t:\t\tDict\t\t)->\tint:\r\n\t\t\tpass\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\tlowerCamelCase_ (\t):\r\n\r\n\r\n\r\n\r\n\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\tlowerCAmelCase__ : Tuple \t\t\t\t\t= load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test'''\t)\r\n\r\n\t\tlowerCAmelCase__ : List[str] \t\t\t\t\t= Image.open(dataset[4]['''file''']\t)\r\n\t\tlowerCAmelCase__ : List[Any] \t\t\t\t\t= Image.open(dataset[5]['''file''']\t)\r\n\r\n\t\tlowerCAmelCase__ : Any \t\t\t\t\t= [imagea, imagea]\r\n\r\n\t\treturn images\r\n\r\n\r\n\r\n\r\n\r\n@require_vision\r\n@require_torch\r\nclass _a\t\t\t\t\t\t\t( unittest.TestCase):\r\n\r\n\r\n\r\n\r\n\t@slow\r\n\tdef \t\t\t\t\t\t\tUpperCAmelCase__( self\t\t\t\t\t\t:\t\tOptional[Any]\t\t)->\tAny:\r\n\t\t\tlowerCAmelCase__ : Optional[int] \t\t\t\t\t= ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small'''\t\t)\r\n\r\n\t\t\tlowerCAmelCase__ : Union[str, Any] \t\t\t\t\t= prepare_images()\r\n\r\n\t\t\t# test non-batched\r\n\t\t\tlowerCAmelCase__ : List[Any] \t\t\t\t\t= image_processing(images[0]\t\t, return_tensors='''pt'''\t\t)\r\n\r\n\t\t\tself.assertIsInstance(encoding.input_ids\t\t, torch.LongTensor\t\t)\r\n\t\t\tself.assertEqual(encoding.input_ids.shape\t\t, (1, 1024)\t\t)\r\n\r\n\t\t\tlowerCAmelCase__ : int \t\t\t\t\t= [306, 191, 191]\r\n\t\t\tself.assertEqual(encoding.input_ids[0, :3].tolist()\t\t, _A\t\t)\r\n\r\n\t\t\t# test batched\r\n\t\t\tlowerCAmelCase__ : List[str] \t\t\t\t\t= image_processing(_A\t\t, return_tensors='''pt'''\t\t)\r\n\r\n\t\t\tself.assertIsInstance(encoding.input_ids\t\t, torch.LongTensor\t\t)\r\n\t\t\tself.assertEqual(encoding.input_ids.shape\t\t, (2, 1024)\t\t)\r\n\r\n\t\t\tlowerCAmelCase__ : Any \t\t\t\t\t= [303, 13, 13]\r\n\t\t\tself.assertEqual(encoding.input_ids[1, -3:].tolist()\t\t, _A\t\t)\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":357,"string":"357"},"style_context":{"kind":"string","value":"\r\n\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import (\r\n OptionalDependencyNotAvailable,\r\n _LazyModule,\r\n is_flax_available,\r\n is_torch_available,\r\n is_vision_available,\r\n)\r\n\r\n\r\nlowerCamelCase\t\t\t\t\t\t=\t\t{'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}\r\n\r\ntry:\r\n\t\tif not is_vision_available():\r\n\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\tpass\r\nelse:\r\n\t\tlowerCamelCase\t\t\t\t\t\t=\t\t['''BeitFeatureExtractor''']\r\n\t\tlowerCamelCase\t\t\t\t\t\t=\t\t['''BeitImageProcessor''']\r\n\r\ntry:\r\n\t\tif not is_torch_available():\r\n\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\tpass\r\nelse:\r\n\t\tlowerCamelCase\t\t\t\t\t\t=\t\t[\r\n\t\t '''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',\r\n\t\t '''BeitForImageClassification''',\r\n\t\t '''BeitForMaskedImageModeling''',\r\n\t\t '''BeitForSemanticSegmentation''',\r\n\t\t '''BeitModel''',\r\n\t\t '''BeitPreTrainedModel''',\r\n\t\t]\r\n\r\n\r\ntry:\r\n\t\tif not is_flax_available():\r\n\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\tpass\r\nelse:\r\n\t\tlowerCamelCase\t\t\t\t\t\t=\t\t[\r\n\t\t '''FlaxBeitForImageClassification''',\r\n\t\t '''FlaxBeitForMaskedImageModeling''',\r\n\t\t '''FlaxBeitModel''',\r\n\t\t '''FlaxBeitPreTrainedModel''',\r\n\t\t]\r\n\r\nif TYPE_CHECKING:\r\n\t\tfrom .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig\r\n\r\n\t\ttry:\r\n\t\t\t\tif not is_vision_available():\r\n\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\n\t\telse:\r\n\t\t\t\tfrom .feature_extraction_beit import BeitFeatureExtractor\r\n\t\t\t\tfrom .image_processing_beit import BeitImageProcessor\r\n\r\n\t\ttry:\r\n\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\n\t\telse:\r\n\t\t\t\tfrom .modeling_beit import (\r\n\t\t\t\t BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t BeitForImageClassification,\r\n\t\t\t\t BeitForMaskedImageModeling,\r\n\t\t\t\t BeitForSemanticSegmentation,\r\n\t\t\t\t BeitModel,\r\n\t\t\t\t BeitPreTrainedModel,\r\n\t\t\t\t)\r\n\r\n\t\ttry:\r\n\t\t\t\tif not is_flax_available():\r\n\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\tpass\r\n\t\telse:\r\n\t\t\t\tfrom .modeling_flax_beit import (\r\n\t\t\t\t FlaxBeitForImageClassification,\r\n\t\t\t\t FlaxBeitForMaskedImageModeling,\r\n\t\t\t\t FlaxBeitModel,\r\n\t\t\t\t FlaxBeitPreTrainedModel,\r\n\t\t\t\t)\r\n\r\n\r\nelse:\r\n\t\timport sys\r\n\r\n\t\tlowerCamelCase\t\t\t\t\t\t=\t\t_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":211,"string":"211"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":882,"cells":{"code":{"kind":"string","value":"\r\r\r\rdef \t\t\t\t\t\t\tA\t( )\t\t\t\t-> Union[str, Any]:\r\r\r\r\r\r\r\r\t\t\t'''simple docstring'''\r\t\t\tfor n in range(1\t\t\t\t\t\t\t, 1_000_000 ):\r\t\t\t\t\t\tyield n * (n + 1) // 2\r\r\r\r\r\rdef \t\t\t\t\t\t\tA\t( _UpperCAmelCase\t\t\t\t: Any )\t\t\t\t-> Optional[int]:\r\r\r\r\r\r\r\r\t\t\t'''simple docstring'''\r\t\t\t_UpperCAmelCase\t\t = 1\r\t\t\t_UpperCAmelCase\t\t = 2\r\t\t\twhile i * i <= n:\r\t\t\t\t\t\t_UpperCAmelCase\t\t = 0\r\t\t\t\t\t\twhile n % i == 0:\r\t\t\t\t\t\t\t\t\tn //= i\r\t\t\t\t\t\t\t\t\tmultiplicity += 1\r\t\t\t\t\t\tdivisors_count *= multiplicity + 1\r\t\t\t\t\t\ti += 1\r\t\t\tif n > 1:\r\t\t\t\t\t\tdivisors_count *= 2\r\t\t\treturn divisors_count\r\r\r\r\r\rdef \t\t\t\t\t\t\tA\t( )\t\t\t\t-> Tuple:\r\r\r\r\r\r\r\r\t\t\t'''simple docstring'''\r\t\t\treturn next(i for i in triangle_number_generator() if count_divisors(_UpperCAmelCase ) > 500 )\r\r\rif __name__ == \"__main__\":\r\t\t\t\t\t\t\tprint(solution())\r"},"code_codestyle":{"kind":"number","value":339,"string":"339"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport copy\r\nfrom collections import OrderedDict\r\nfrom typing import Mapping\r\n\r\nfrom packaging import version\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...onnx import OnnxConfig\r\nfrom ...utils import logging\r\nfrom ..auto import CONFIG_MAPPING\r\n\r\n\r\nlowerCamelCase_ : Union[str, Any]\t\t\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\nlowerCamelCase_ : Any\t\t\t\t\t\t\t\t\t= {\r\n \"\"\"microsoft/conditional-detr-resnet-50\"\"\": (\r\n \"\"\"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json\"\"\"\r\n ),\r\n}\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass __A\t\t\t\t( _SCREAMING_SNAKE_CASE\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n __lowerCAmelCase \t\t\t\t\t= \"conditional_detr\"\r\n __lowerCAmelCase \t\t\t\t\t= [\"past_key_values\"]\r\n __lowerCAmelCase \t\t\t\t\t= {\r\n \"hidden_size\": \"d_model\",\r\n \"num_attention_heads\": \"encoder_attention_heads\",\r\n }\r\n\r\n\r\n def __init__(\t\tself , __A=True , __A=None , __A=3 , __A=300 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A=\"relu\" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.02 , __A=1.0 , __A=False , __A=\"sine\" , __A=\"resnet50\" , __A=True , __A=False , __A=2 , __A=5 , __A=2 , __A=1 , __A=1 , __A=2 , __A=5 , __A=2 , __A=0.25 , **__A , )\t\t\t\t\t-> List[Any]:\r\n if backbone_config is not None and use_timm_backbone:\r\n raise ValueError('''You can\\'t specify both `backbone_config` and `use_timm_backbone`.'''\t\t\t\t\t\t)\r\n\r\n if not use_timm_backbone:\r\n if backbone_config is None:\r\n logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.'''\t\t\t\t\t\t)\r\n a \t\t=CONFIG_MAPPING['''resnet'''](out_features=['''stage4''']\t\t\t\t\t\t)\r\n elif isinstance(__A , __A\t\t\t\t\t\t):\r\n a \t\t=backbone_config.get('''model_type'''\t\t\t\t\t\t)\r\n a \t\t=CONFIG_MAPPING[backbone_model_type]\r\n a \t\t=config_class.from_dict(__A\t\t\t\t\t\t)\r\n\r\n a \t\t=use_timm_backbone\r\n a \t\t=backbone_config\r\n a \t\t=num_channels\r\n a \t\t=num_queries\r\n a \t\t=d_model\r\n a \t\t=encoder_ffn_dim\r\n a \t\t=encoder_layers\r\n a \t\t=encoder_attention_heads\r\n a \t\t=decoder_ffn_dim\r\n a \t\t=decoder_layers\r\n a \t\t=decoder_attention_heads\r\n a \t\t=dropout\r\n a \t\t=attention_dropout\r\n a \t\t=activation_dropout\r\n a \t\t=activation_function\r\n a \t\t=init_std\r\n a \t\t=init_xavier_std\r\n a \t\t=encoder_layerdrop\r\n a \t\t=decoder_layerdrop\r\n a \t\t=encoder_layers\r\n a \t\t=auxiliary_loss\r\n a \t\t=position_embedding_type\r\n a \t\t=backbone\r\n a \t\t=use_pretrained_backbone\r\n a \t\t=dilation\r\n # Hungarian matcher\r\n a \t\t=class_cost\r\n a \t\t=bbox_cost\r\n a \t\t=giou_cost\r\n # Loss coefficients\r\n a \t\t=mask_loss_coefficient\r\n a \t\t=dice_loss_coefficient\r\n a \t\t=cls_loss_coefficient\r\n a \t\t=bbox_loss_coefficient\r\n a \t\t=giou_loss_coefficient\r\n a \t\t=focal_alpha\r\n super().__init__(is_encoder_decoder=__A , **__A\t\t\t\t\t\t)\r\n\r\n\r\n @property\r\n def SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t(\t\tself\t\t\t\t\t\t)\t\t\t\t\t-> int:\r\n return self.encoder_attention_heads\r\n\r\n\r\n @property\r\n def SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t(\t\tself\t\t\t\t\t\t)\t\t\t\t\t-> int:\r\n return self.d_model\r\n\r\n\r\n\r\n\r\n\r\n def SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t(\t\tself\t\t\t\t\t\t)\t\t\t\t\t-> Tuple:\r\n a \t\t=copy.deepcopy(self.__dict__\t\t\t\t\t\t)\r\n if self.backbone_config is not None:\r\n a \t\t=self.backbone_config.to_dict()\r\n a \t\t=self.__class__.model_type\r\n return output\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass __A\t\t\t\t( _SCREAMING_SNAKE_CASE\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n __lowerCAmelCase \t\t\t\t\t= version.parse(\"1.11\"\t\t\t)\r\n\r\n\r\n @property\r\n def SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t(\t\tself\t\t\t\t\t\t)\t\t\t\t\t-> Mapping[str, Mapping[int, str]]:\r\n return OrderedDict(\r\n [\r\n ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),\r\n ('''pixel_mask''', {0: '''batch'''}),\r\n ]\t\t\t\t\t\t)\r\n\r\n\r\n @property\r\n def SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t(\t\tself\t\t\t\t\t\t)\t\t\t\t\t-> float:\r\n return 1E-5\r\n\r\n\r\n\r\n\r\n\r\n @property\r\n def SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t(\t\tself\t\t\t\t\t\t)\t\t\t\t\t-> int:\r\n return 12"},"style_context_codestyle":{"kind":"number","value":81,"string":"81"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":883,"cells":{"code":{"kind":"string","value":"\r\r\"\"\"simple docstring\"\"\"\r\r\r\r\rfrom ... import PretrainedConfig\r\r\r__A\t\t\t\t\t: Dict =\t\t{\r 'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',\r}\r\r\r\r\r\rclass __UpperCamelCase\t\t\t( _A\t\t\t\t\t\t):\r\t\tSCREAMING_SNAKE_CASE = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP\r\t\tSCREAMING_SNAKE_CASE = \"nezha\"\r\r\r\t\tdef __init__(self\t\t\t:\t\tAny\t\t\t\t\t,\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t:\t\tOptional[int]=2_1_1_2_8\t\t\t\t\t,\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t:\t\tDict=7_6_8\t\t\t\t\t,\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t:\t\tOptional[int]=1_2\t\t\t\t\t,\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t:\t\tDict=1_2\t\t\t\t\t,\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t:\t\tList[str]=3_0_7_2\t\t\t\t\t,\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t:\t\tOptional[int]=\"gelu\"\t\t\t\t\t,\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t:\t\tList[str]=0.1\t\t\t\t\t,\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t:\t\tList[str]=0.1\t\t\t\t\t,\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t:\t\tList[Any]=5_1_2\t\t\t\t\t,\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t:\t\tOptional[Any]=6_4\t\t\t\t\t,\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t:\t\tstr=2\t\t\t\t\t,\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t:\t\tTuple=0.0_2\t\t\t\t\t,\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t:\t\tDict=1E-12\t\t\t\t\t,\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t:\t\tAny=0.1\t\t\t\t\t,\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t:\t\tAny=0\t\t\t\t\t,\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t:\t\tOptional[Any]=2\t\t\t\t\t,\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t:\t\tUnion[str, Any]=3\t\t\t\t\t,\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t:\t\tstr=True\t\t\t\t\t,\t\t\t\t\t**__SCREAMING_SNAKE_CASE\t\t\t:\t\tOptional[Any]\t\t\t\t\t,\t\t\t\t\t):\r\t\t\t\t\t\t\tsuper().__init__(pad_token_id=__SCREAMING_SNAKE_CASE\t\t\t\t\t,\t\t\t\t\tbos_token_id=__SCREAMING_SNAKE_CASE\t\t\t\t\t,\t\t\t\t\teos_token_id=__SCREAMING_SNAKE_CASE\t\t\t\t\t,\t\t\t\t\t**__SCREAMING_SNAKE_CASE)\r\r\t\t\t\t\t\t\tA = vocab_size\r\t\t\t\t\t\t\tA = hidden_size\r\t\t\t\t\t\t\tA = num_hidden_layers\r\t\t\t\t\t\t\tA = num_attention_heads\r\t\t\t\t\t\t\tA = hidden_act\r\t\t\t\t\t\t\tA = intermediate_size\r\t\t\t\t\t\t\tA = hidden_dropout_prob\r\t\t\t\t\t\t\tA = attention_probs_dropout_prob\r\t\t\t\t\t\t\tA = max_position_embeddings\r\t\t\t\t\t\t\tA = max_relative_position\r\t\t\t\t\t\t\tA = type_vocab_size\r\t\t\t\t\t\t\tA = initializer_range\r\t\t\t\t\t\t\tA = layer_norm_eps\r\t\t\t\t\t\t\tA = classifier_dropout\r\t\t\t\t\t\t\tA = use_cache\r\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":57,"string":"57"},"style_context":{"kind":"string","value":"\r\r\"\"\"simple docstring\"\"\"\r\r\r\r\rimport argparse\rfrom collections import OrderedDict\rfrom pathlib import Path\r\rimport torch\r\rfrom transformers import (\r VisualBertConfig,\r VisualBertForMultipleChoice,\r VisualBertForPreTraining,\r VisualBertForQuestionAnswering,\r VisualBertForVisualReasoning,\r)\rfrom transformers.utils import logging\r\r\rlogging.set_verbosity_info()\r__A\t\t\t\t\t: int =\t\tlogging.get_logger(__name__)\r\r__A\t\t\t\t\t: Optional[int] =\t\t[\r ('bert.bert', 'visual_bert'),\r ('bert.cls', 'cls'),\r ('bert.classifier', 'cls'),\r ('token_type_embeddings_visual', 'visual_token_type_embeddings'),\r ('position_embeddings_visual', 'visual_position_embeddings'),\r ('projection', 'visual_projection'),\r]\r\r__A\t\t\t\t\t: Union[str, Any] =\t\t[\r 'nlvr2_coco_pre_trained.th',\r 'nlvr2_fine_tuned.th',\r 'nlvr2_pre_trained.th',\r 'vcr_coco_pre_train.th',\r 'vcr_fine_tune.th',\r 'vcr_pre_train.th',\r 'vqa_coco_pre_trained.th',\r 'vqa_fine_tuned.th',\r 'vqa_pre_trained.th',\r]\rdef __SCREAMING_SNAKE_CASE\t\t\t\t\t\t(\t\t\t\t\t\tlowercase__\t\t\t\t):\r\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\t\t\t\t\tA = torch.load(lowercase__ ,\t\t\t\t\t\t\tmap_location=\"cpu\"\t\t\t\t)\r\t\t\t\t\treturn sd\rdef __SCREAMING_SNAKE_CASE\t\t\t\t\t\t(\t\t\t\t\t\tlowercase__ ,\t\t\t\t\t\t\tlowercase__ ,\t\t\t\t\t\t\tlowercase__=rename_keys_prefix\t\t\t\t):\r\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\t\t\t\t\tA = OrderedDict()\r\t\t\t\t\tA = torch.arange(config.max_position_embeddings\t\t\t\t).expand((1, -1)\t\t\t\t)\r\t\t\t\t\t# detector_d = OrderedDict()\r\t\t\t\t\tfor key in d:\r\t\t\t\t\t\t\t\t\t\tif \"detector\" in key:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# detector_d[key.replace('detector.','')] = d[key]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcontinue\r\t\t\t\t\t\t\t\t\t\tA = key\r\t\t\t\t\t\t\t\t\t\tfor name_pair in rename_keys_prefix:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA = new_key.replace(name_pair[0] ,\t\t\t\t\t\t\tname_pair[1]\t\t\t\t)\r\t\t\t\t\t\t\t\t\t\tA = d[key]\r\t\t\t\t\t\t\t\t\t\tif key == \"bert.cls.predictions.decoder.weight\":\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Old bert code didn't have `decoder.bias`, but was added separately\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA = new_d[\"cls.predictions.bias\"]\r\t\t\t\t\treturn new_d\r@torch.no_grad()\rdef __SCREAMING_SNAKE_CASE\t\t\t\t\t\t(\t\t\t\t\t\tlowercase__ ,\t\t\t\t\t\t\tlowercase__\t\t\t\t):\r\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\t\t\t\t\tassert (\r\t\t\t\t\t checkpoint_path.split(\"/\"\t\t\t\t)[-1] in ACCEPTABLE_CHECKPOINTS\r\t\t\t\t\t), F\"\"\"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.\"\"\"\r\r\t\t\t\t\t# Get Config\r\t\t\t\t\tif \"pre\" in checkpoint_path:\r\t\t\t\t\t\t\t\t\t\tA = \"pretraining\"\r\t\t\t\t\t\t\t\t\t\tif \"vcr\" in checkpoint_path:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA = {\"visual_embedding_dim\": 512}\r\t\t\t\t\t\t\t\t\t\telif \"vqa_advanced\" in checkpoint_path:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA = {\"visual_embedding_dim\": 2_048}\r\t\t\t\t\t\t\t\t\t\telif \"vqa\" in checkpoint_path:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA = {\"visual_embedding_dim\": 2_048}\r\t\t\t\t\t\t\t\t\t\telif \"nlvr\" in checkpoint_path:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA = {\"visual_embedding_dim\": 1_024}\r\t\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise NotImplementedError(F\"\"\"No implementation found for `{checkpoint_path}`.\"\"\"\t\t\t\t)\r\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\tif \"vcr\" in checkpoint_path:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA = {\"visual_embedding_dim\": 512}\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA = \"multichoice\"\r\t\t\t\t\t\t\t\t\t\telif \"vqa_advanced\" in checkpoint_path:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA = {\"visual_embedding_dim\": 2_048}\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA = \"vqa_advanced\"\r\t\t\t\t\t\t\t\t\t\telif \"vqa\" in checkpoint_path:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA = {\"visual_embedding_dim\": 2_048, \"num_labels\": 3_129}\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA = \"vqa\"\r\t\t\t\t\t\t\t\t\t\telif \"nlvr\" in checkpoint_path:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA = {\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"visual_embedding_dim\": 1_024,\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"num_labels\": 2,\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t}\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tA = \"nlvr\"\r\r\t\t\t\t\tA = VisualBertConfig(**lowercase__\t\t\t\t)\r\r\t\t\t\t\t# Load State Dict\r\t\t\t\t\tA = load_state_dict(lowercase__\t\t\t\t)\r\r\t\t\t\t\tA = get_new_dict(lowercase__ ,\t\t\t\t\t\t\tlowercase__\t\t\t\t)\r\r\t\t\t\t\tif model_type == \"pretraining\":\r\t\t\t\t\t\t\t\t\t\tA = VisualBertForPreTraining(lowercase__\t\t\t\t)\r\t\t\t\t\telif model_type == \"vqa\":\r\t\t\t\t\t\t\t\t\t\tA = VisualBertForQuestionAnswering(lowercase__\t\t\t\t)\r\t\t\t\t\telif model_type == \"nlvr\":\r\t\t\t\t\t\t\t\t\t\tA = VisualBertForVisualReasoning(lowercase__\t\t\t\t)\r\t\t\t\t\telif model_type == \"multichoice\":\r\t\t\t\t\t\t\t\t\t\tA = VisualBertForMultipleChoice(lowercase__\t\t\t\t)\r\r\t\t\t\t\tmodel.load_state_dict(lowercase__\t\t\t\t)\r\t\t\t\t\t# Save Checkpoints\r\t\t\t\t\tPath(lowercase__\t\t\t\t).mkdir(exist_ok=lowercase__\t\t\t\t)\r\t\t\t\t\tmodel.save_pretrained(lowercase__\t\t\t\t)\r\r\rif __name__ == \"__main__\":\r\t__A\t\t\t\t\t: Optional[int] =\t\targparse.ArgumentParser()\r\t# Required parameters\r\tparser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')\r\tparser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')\r\t__A\t\t\t\t\t: Any =\t\tparser.parse_args()\r\tconvert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)\r\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":57,"string":"57"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":884,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\nimport multiprocessing\nfrom typing import TYPE_CHECKING, Optional, Union\n\nfrom .. import Dataset, Features, config\nfrom ..formatting import query_table\nfrom ..packaged_modules.sql.sql import Sql\nfrom ..utils import logging\nfrom .abc import AbstractDatasetInputStream\n\n\nif TYPE_CHECKING:\n\timport sqlitea\n\n\timport sqlalchemy\n\n\n\nclass \t\t\t\t\t__lowerCAmelCase (\t\t\t_a ):\n\n\n\n\n\n\n\t\t\tdef __init__(self\t\t\t\t\t,\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t,\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t,\t\t\t\t\t\t\t__magic_name__ = None\t\t\t\t\t,\t\t\t\t\t\t\t__magic_name__ = None\t\t\t\t\t,\t\t\t\t\t\t\t__magic_name__ = False\t\t\t\t\t,\t\t\t\t\t\t\t**__magic_name__\t\t\t\t\t,\t\t\t\t\t\t\t) -> List[Any]:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\tsuper().__init__(features=__lowercase\t\t\t\t\t,\t\t\t\t\t\t\tcache_dir=__lowercase\t\t\t\t\t,\t\t\t\t\t\t\tkeep_in_memory=__lowercase\t\t\t\t\t,\t\t\t\t\t\t\t**__lowercase )\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Tuple\t\t\t\t= Sql(\n\t\t\t\t\t\t\t cache_dir=__lowercase\t\t\t\t\t,\t\t\t\t\t\t\tfeatures=__lowercase\t\t\t\t\t,\t\t\t\t\t\t\tsql=__lowercase\t\t\t\t\t,\t\t\t\t\t\t\tcon=__lowercase\t\t\t\t\t,\t\t\t\t\t\t\t**__lowercase\t\t\t\t\t,\t\t\t\t\t\t\t)\n\n\n\n\n\n\n\t\t\tdef \t\t\tlowerCamelCase (self ) -> Optional[int]:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: List[Any]\t\t\t\t= None\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[Any]\t\t\t\t= None\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Union[str, Any]\t\t\t\t= None\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: int\t\t\t\t= None\n\n\t\t\t\t\t\t\tself.builder.download_and_prepare(\n\t\t\t\t\t\t\t download_config=__lowercase\t\t\t\t\t,\t\t\t\t\t\t\tdownload_mode=__lowercase\t\t\t\t\t,\t\t\t\t\t\t\tverification_mode=__lowercase\t\t\t\t\t,\t\t\t\t\t\t\tbase_path=__lowercase\t\t\t\t\t,\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t# Build dataset for splits\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: List[Any]\t\t\t\t= self.builder.as_dataset(\n\t\t\t\t\t\t\t split='''train'''\t\t\t\t\t,\t\t\t\t\t\t\tverification_mode=__lowercase\t\t\t\t\t,\t\t\t\t\t\t\tin_memory=self.keep_in_memory )\n\t\t\t\t\t\t\treturn dataset\n\n\n\n\nclass \t\t\t\t\t__lowerCAmelCase :\n\n\n\n\n\n\n\t\t\tdef __init__(self\t\t\t\t\t,\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t,\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t,\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t,\t\t\t\t\t\t\t__magic_name__ = None\t\t\t\t\t,\t\t\t\t\t\t\t__magic_name__ = None\t\t\t\t\t,\t\t\t\t\t\t\t**__magic_name__\t\t\t\t\t,\t\t\t\t\t\t\t) -> int:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\tif num_proc is not None and num_proc <= 0:\n\t\t\t\t\t\t\t\t\t\t\traise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[int]\t\t\t\t= dataset\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: int\t\t\t\t= name\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[int]\t\t\t\t= con\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: int\t\t\t\t= batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: List[Any]\t\t\t\t= num_proc\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: str\t\t\t\t= to_sql_kwargs\n\n\n\n\n\n\n\t\t\tdef \t\t\tlowerCamelCase (self ) -> Union[str, Any]:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[int]\t\t\t\t= self.to_sql_kwargs.pop('''sql'''\t\t\t\t\t,\t\t\t\t\t\t\t__lowercase )\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: str\t\t\t\t= self.to_sql_kwargs.pop('''con'''\t\t\t\t\t,\t\t\t\t\t\t\t__lowercase )\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Dict\t\t\t\t= self.to_sql_kwargs.pop('''index'''\t\t\t\t\t,\t\t\t\t\t\t\t__lowercase )\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: List[str]\t\t\t\t= self._write(index=__lowercase\t\t\t\t\t,\t\t\t\t\t\t\t**self.to_sql_kwargs )\n\t\t\t\t\t\t\treturn written\n\n\n\n\n\n\n\t\t\tdef \t\t\tlowerCamelCase (self\t\t\t\t\t,\t\t\t\t\t\t\t__magic_name__ ) -> Optional[int]:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t,\t\t\t\t\t\t\tsnake_case_\t\t\t,\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[int]\t\t\t\t= args\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[Any]\t\t\t\t= {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Tuple\t\t\t\t= query_table(\n\t\t\t\t\t\t\t table=self.dataset.data\t\t\t\t\t,\t\t\t\t\t\t\tkey=slice(__lowercase\t\t\t\t\t,\t\t\t\t\t\t\toffset + self.batch_size )\t\t\t\t\t,\t\t\t\t\t\t\tindices=self.dataset._indices\t\t\t\t\t,\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: List[Any]\t\t\t\t= batch.to_pandas()\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: int\t\t\t\t= df.to_sql(self.name\t\t\t\t\t,\t\t\t\t\t\t\tself.con\t\t\t\t\t,\t\t\t\t\t\t\tindex=__lowercase\t\t\t\t\t,\t\t\t\t\t\t\t**__lowercase )\n\t\t\t\t\t\t\treturn num_rows or len(__lowercase )\n\n\n\n\n\n\n\t\t\tdef \t\t\tlowerCamelCase (self\t\t\t\t\t,\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t,\t\t\t\t\t\t\t**__magic_name__ ) -> int:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: List[Any]\t\t\t\t= 0\n\n\t\t\t\t\t\t\tif self.num_proc is None or self.num_proc == 1:\n\t\t\t\t\t\t\t\t\t\t\tfor offset in logging.tqdm(\n\t\t\t\t\t\t\t\t\t\t\t range(0\t\t\t\t\t,\t\t\t\t\t\t\tlen(self.dataset )\t\t\t\t\t,\t\t\t\t\t\t\tself.batch_size )\t\t\t\t\t,\t\t\t\t\t\t\tunit='''ba'''\t\t\t\t\t,\t\t\t\t\t\t\tdisable=not logging.is_progress_bar_enabled()\t\t\t\t\t,\t\t\t\t\t\t\tdesc='''Creating SQL from Arrow format'''\t\t\t\t\t,\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twritten += self._batch_sql((offset, index, to_sql_kwargs) )\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t,\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: int\t\t\t\t= len(self.dataset ), self.batch_size\n\t\t\t\t\t\t\t\t\t\t\twith multiprocessing.Pool(self.num_proc ) as pool:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor num_rows in logging.tqdm(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t pool.imap(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t self._batch_sql\t\t\t\t\t,\t\t\t\t\t\t\t[(offset, index, to_sql_kwargs) for offset in range(0\t\t\t\t\t,\t\t\t\t\t\t\t__lowercase\t\t\t\t\t,\t\t\t\t\t\t\t__lowercase )]\t\t\t\t\t,\t\t\t\t\t\t\t)\t\t\t\t\t,\t\t\t\t\t\t\ttotal=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size\t\t\t\t\t,\t\t\t\t\t\t\tunit='''ba'''\t\t\t\t\t,\t\t\t\t\t\t\tdisable=not logging.is_progress_bar_enabled()\t\t\t\t\t,\t\t\t\t\t\t\tdesc='''Creating SQL from Arrow format'''\t\t\t\t\t,\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twritten += num_rows\n\n\t\t\t\t\t\t\treturn written\n"},"code_codestyle":{"kind":"number","value":279,"string":"279"},"style_context":{"kind":"string","value":"\r\r'''simple docstring'''\r\rimport json\rfrom typing import List, Optional, Tuple\r\rfrom tokenizers import normalizers\r\rfrom ...tokenization_utils_base import BatchEncoding\rfrom ...tokenization_utils_fast import PreTrainedTokenizerFast\rfrom ...utils import PaddingStrategy, logging\rfrom .tokenization_realm import RealmTokenizer\r\r\rUpperCAmelCase = logging.get_logger(__name__)\r\rUpperCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}\r\rUpperCAmelCase = {\r '''vocab_file''': {\r '''google/realm-cc-news-pretrained-embedder''': (\r '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''\r ),\r '''google/realm-cc-news-pretrained-encoder''': (\r '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''\r ),\r '''google/realm-cc-news-pretrained-scorer''': (\r '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''\r ),\r '''google/realm-cc-news-pretrained-openqa''': (\r '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''\r ),\r '''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',\r '''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',\r '''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',\r '''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',\r },\r '''tokenizer_file''': {\r '''google/realm-cc-news-pretrained-embedder''': (\r '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''\r ),\r '''google/realm-cc-news-pretrained-encoder''': (\r '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''\r ),\r '''google/realm-cc-news-pretrained-scorer''': (\r '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''\r ),\r '''google/realm-cc-news-pretrained-openqa''': (\r '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''\r ),\r '''google/realm-orqa-nq-openqa''': (\r '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''\r ),\r '''google/realm-orqa-nq-reader''': (\r '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''\r ),\r '''google/realm-orqa-wq-openqa''': (\r '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''\r ),\r '''google/realm-orqa-wq-reader''': (\r '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''\r ),\r },\r}\r\rUpperCAmelCase = {\r '''google/realm-cc-news-pretrained-embedder''': 512,\r '''google/realm-cc-news-pretrained-encoder''': 512,\r '''google/realm-cc-news-pretrained-scorer''': 512,\r '''google/realm-cc-news-pretrained-openqa''': 512,\r '''google/realm-orqa-nq-openqa''': 512,\r '''google/realm-orqa-nq-reader''': 512,\r '''google/realm-orqa-wq-openqa''': 512,\r '''google/realm-orqa-wq-reader''': 512,\r}\r\rUpperCAmelCase = {\r '''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},\r '''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},\r '''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},\r '''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},\r '''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},\r '''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},\r '''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},\r '''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},\r}\r\r\rclass lowerCAmelCase (\t\t\t\t\tA ):\r lowerCAmelCase_\t\t\t\t= VOCAB_FILES_NAMES\r lowerCAmelCase_\t\t\t\t= PRETRAINED_VOCAB_FILES_MAP\r lowerCAmelCase_\t\t\t\t= PRETRAINED_INIT_CONFIGURATION\r lowerCAmelCase_\t\t\t\t= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\r lowerCAmelCase_\t\t\t\t= RealmTokenizer\r\r\r\r\r\r\r\r def __init__(\t\t\t\t\t\t\tself\t: int ,\t\t\t\t\t__lowercase\t: Union[str, Any]=None ,\t\t\t\t\t__lowercase\t: int=None ,\t\t\t\t\t__lowercase\t: List[Any]=True ,\t\t\t\t\t__lowercase\t: Any=\"[UNK]\" ,\t\t\t\t\t__lowercase\t: Union[str, Any]=\"[SEP]\" ,\t\t\t\t\t__lowercase\t: Union[str, Any]=\"[PAD]\" ,\t\t\t\t\t__lowercase\t: Tuple=\"[CLS]\" ,\t\t\t\t\t__lowercase\t: List[Any]=\"[MASK]\" ,\t\t\t\t\t__lowercase\t: Tuple=True ,\t\t\t\t\t__lowercase\t: Union[str, Any]=None ,\t\t\t\t\t**__lowercase\t: int ,\t\t\t\t\t):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r super().__init__(\r __lowercase ,\t\t\t\t\ttokenizer_file=__lowercase ,\t\t\t\t\tdo_lower_case=__lowercase ,\t\t\t\t\tunk_token=__lowercase ,\t\t\t\t\tsep_token=__lowercase ,\t\t\t\t\tpad_token=__lowercase ,\t\t\t\t\tcls_token=__lowercase ,\t\t\t\t\tmask_token=__lowercase ,\t\t\t\t\ttokenize_chinese_chars=__lowercase ,\t\t\t\t\tstrip_accents=__lowercase ,\t\t\t\t\t**__lowercase ,\t\t\t\t\t)\r\r __lowercase\t\t\t\t\t\t\t=json.loads(self.backend_tokenizer.normalizer.__getstate__()\t\t\t\t\t)\r if (\r normalizer_state.get('lowercase' ,\t\t\t\t\t__lowercase\t\t\t\t\t) != do_lower_case\r or normalizer_state.get('strip_accents' ,\t\t\t\t\t__lowercase\t\t\t\t\t) != strip_accents\r or normalizer_state.get('handle_chinese_chars' ,\t\t\t\t\t__lowercase\t\t\t\t\t) != tokenize_chinese_chars\r ):\r __lowercase\t\t\t\t\t\t\t=getattr(__lowercase ,\t\t\t\t\tnormalizer_state.pop('type'\t\t\t\t\t)\t\t\t\t\t)\r __lowercase\t\t\t\t\t\t\t=do_lower_case\r __lowercase\t\t\t\t\t\t\t=strip_accents\r __lowercase\t\t\t\t\t\t\t=tokenize_chinese_chars\r __lowercase\t\t\t\t\t\t\t=normalizer_class(**__lowercase\t\t\t\t\t)\r\r __lowercase\t\t\t\t\t\t\t=do_lower_case\r\r\r\r\r\r\r\r def \tsnake_case (\t\t\t\t\t\t\tself\t: List[str] ,\t\t\t\t\t__lowercase\t: Optional[Any] ,\t\t\t\t\t**__lowercase\t: Any\t\t\t\t\t):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r __lowercase\t\t\t\t\t\t\t=PaddingStrategy.MAX_LENGTH\r\r __lowercase\t\t\t\t\t\t\t=text\r __lowercase\t\t\t\t\t\t\t=kwargs.pop('text_pair' ,\t\t\t\t\t__lowercase\t\t\t\t\t)\r __lowercase\t\t\t\t\t\t\t=kwargs.pop('return_tensors' ,\t\t\t\t\t__lowercase\t\t\t\t\t)\r\r __lowercase\t\t\t\t\t\t\t={\r 'input_ids': [],\r 'attention_mask': [],\r 'token_type_ids': [],\r }\r\r for idx, candidate_text in enumerate(__lowercase\t\t\t\t\t):\r if batch_text_pair is not None:\r __lowercase\t\t\t\t\t\t\t=batch_text_pair[idx]\r else:\r __lowercase\t\t\t\t\t\t\t=None\r\r __lowercase\t\t\t\t\t\t\t=super().__call__(__lowercase ,\t\t\t\t\t__lowercase ,\t\t\t\t\treturn_tensors=__lowercase ,\t\t\t\t\t**__lowercase\t\t\t\t\t)\r\r __lowercase\t\t\t\t\t\t\t=encoded_candidates.get('input_ids'\t\t\t\t\t)\r __lowercase\t\t\t\t\t\t\t=encoded_candidates.get('attention_mask'\t\t\t\t\t)\r __lowercase\t\t\t\t\t\t\t=encoded_candidates.get('token_type_ids'\t\t\t\t\t)\r\r if encoded_input_ids is not None:\r output_data[\"input_ids\"].append(__lowercase\t\t\t\t\t)\r if encoded_attention_mask is not None:\r output_data[\"attention_mask\"].append(__lowercase\t\t\t\t\t)\r if encoded_token_type_ids is not None:\r output_data[\"token_type_ids\"].append(__lowercase\t\t\t\t\t)\r\r __lowercase\t\t\t\t\t\t\t={key: item for key, item in output_data.items() if len(__lowercase\t\t\t\t\t) != 0}\r\r return BatchEncoding(__lowercase ,\t\t\t\t\ttensor_type=__lowercase\t\t\t\t\t)\r\r\r\r\r\r\r\r def \tsnake_case (\t\t\t\t\t\t\tself\t: List[str] ,\t\t\t\t\t__lowercase\t: Tuple ,\t\t\t\t\t__lowercase\t: Optional[int]=None\t\t\t\t\t):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r __lowercase\t\t\t\t\t\t\t=[self.cls_token_id] + token_ids_a + [self.sep_token_id]\r\r if token_ids_a:\r output += token_ids_a + [self.sep_token_id]\r\r return output\r\r\r\r\r\r\r\r def \tsnake_case (\t\t\t\t\t\t\tself\t: List[str] ,\t\t\t\t\t__lowercase\t: List[int] ,\t\t\t\t\t__lowercase\t: Optional[List[int]] = None\t\t\t\t\t):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r __lowercase\t\t\t\t\t\t\t=[self.sep_token_id]\r __lowercase\t\t\t\t\t\t\t=[self.cls_token_id]\r if token_ids_a is None:\r return len(cls + token_ids_a + sep\t\t\t\t\t) * [0]\r return len(cls + token_ids_a + sep\t\t\t\t\t) * [0] + len(token_ids_a + sep\t\t\t\t\t) * [1]\r\r\r\r\r\r\r\r def \tsnake_case (\t\t\t\t\t\t\tself\t: Dict ,\t\t\t\t\t__lowercase\t: str ,\t\t\t\t\t__lowercase\t: Optional[str] = None\t\t\t\t\t):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r __lowercase\t\t\t\t\t\t\t=self._tokenizer.model.save(__lowercase ,\t\t\t\t\tname=__lowercase\t\t\t\t\t)\r return tuple(__lowercase\t\t\t\t\t)\r\r\r"},"style_context_codestyle":{"kind":"number","value":141,"string":"141"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":885,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom argparse import ArgumentParser\r\n\r\nfrom ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline\r\nfrom ..utils import logging\r\nfrom . import BaseTransformersCLICommand\r\n\r\n\r\nlowerCAmelCase : Dict\t\t\t\t\t= logging.get_logger(__name__) # pylint: disable=invalid-name\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef lowercase\t\t\t(_A ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n if not path:\r\n return \"pipe\"\r\n\r\n for ext in PipelineDataFormat.SUPPORTED_FORMATS:\r\n if path.endswith(_A ):\r\n return ext\r\n\r\n raise Exception(\r\n f'Unable to determine file format from file extension {path}. '\r\n f'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef lowercase\t\t\t(_A ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n _lowerCAmelCase :\t\t\t\t\t\t\tTuple\t\t\t\t = pipeline(\r\n task=args.task\t\t\t\t, model=args.model if args.model else None\t\t\t\t, config=args.config\t\t\t\t, tokenizer=args.tokenizer\t\t\t\t, device=args.device\t\t\t\t, )\r\n _lowerCAmelCase :\t\t\t\t\t\t\tList[Any]\t\t\t\t = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format\r\n _lowerCAmelCase :\t\t\t\t\t\t\tDict\t\t\t\t = PipelineDataFormat.from_str(\r\n format=_A\t\t\t\t, output_path=args.output\t\t\t\t, input_path=args.input\t\t\t\t, column=args.column if args.column else nlp.default_input_names\t\t\t\t, overwrite=args.overwrite\t\t\t\t, )\r\n return RunCommand(_A\t\t\t\t, _A )\r\nclass \t\tUpperCamelCase__ (\tSCREAMING_SNAKE_CASE_\t\t):\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __init__(\tself ,\t\tsnake_case__ ,\t\tsnake_case__\t\t\t\t):\r\n\r\n '''simple docstring'''\r\n\r\n _lowerCAmelCase :\t\t\t\t\t\t\tAny\t\t\t\t = nlp\r\n _lowerCAmelCase :\t\t\t\t\t\t\tList[str]\t\t\t\t = reader\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n @staticmethod\r\n def a\t\t(\tsnake_case__\t\t\t\t):\r\n\r\n '''simple docstring'''\r\n\r\n _lowerCAmelCase :\t\t\t\t\t\t\tAny\t\t\t\t = parser.add_parser('run' ,\t\thelp='Run a pipeline through the CLI'\t\t\t\t)\r\n run_parser.add_argument('--task' ,\t\tchoices=get_supported_tasks() ,\t\thelp='Task to run'\t\t\t\t)\r\n run_parser.add_argument('--input' ,\t\ttype=snake_case__ ,\t\thelp='Path to the file to use for inference'\t\t\t\t)\r\n run_parser.add_argument('--output' ,\t\ttype=snake_case__ ,\t\thelp='Path to the file that will be used post to write results.'\t\t\t\t)\r\n run_parser.add_argument('--model' ,\t\ttype=snake_case__ ,\t\thelp='Name or path to the model to instantiate.'\t\t\t\t)\r\n run_parser.add_argument('--config' ,\t\ttype=snake_case__ ,\t\thelp='Name or path to the model\\'s config to instantiate.'\t\t\t\t)\r\n run_parser.add_argument(\r\n '--tokenizer' ,\t\ttype=snake_case__ ,\t\thelp='Name of the tokenizer to use. (default: same as the model name)'\t\t\t\t)\r\n run_parser.add_argument(\r\n '--column' ,\t\ttype=snake_case__ ,\t\thelp='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' ,\t\t)\r\n run_parser.add_argument(\r\n '--format' ,\t\ttype=snake_case__ ,\t\tdefault='infer' ,\t\tchoices=PipelineDataFormat.SUPPORTED_FORMATS ,\t\thelp='Input format to read from' ,\t\t)\r\n run_parser.add_argument(\r\n '--device' ,\t\ttype=snake_case__ ,\t\tdefault=-1 ,\t\thelp='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' ,\t\t)\r\n run_parser.add_argument('--overwrite' ,\t\taction="https://huggingface.co/datasets/infinityofspace/python_codestyles-mixed1-500/viewer/default/store_true" ,\t\thelp='Allow overwriting the output file.'\t\t\t\t)\r\n run_parser.set_defaults(func=snake_case__\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def a\t\t(\tself\t\t\t\t):\r\n\r\n '''simple docstring'''\r\n\r\n _lowerCAmelCase , _lowerCAmelCase :\t\t\t\t\t\t\tAny\t\t\t\t = self._nlp, []\r\n\r\n for entry in self._reader:\r\n _lowerCAmelCase :\t\t\t\t\t\t\tOptional[int]\t\t\t\t = nlp(**snake_case__\t\t\t\t) if self._reader.is_multi_columns else nlp(snake_case__\t\t\t\t)\r\n if isinstance(snake_case__ ,\t\tsnake_case__\t\t\t\t):\r\n outputs.append(snake_case__\t\t\t\t)\r\n else:\r\n outputs += output\r\n\r\n # Saving data\r\n if self._nlp.binary_output:\r\n _lowerCAmelCase :\t\t\t\t\t\t\tstr\t\t\t\t = self._reader.save_binary(snake_case__\t\t\t\t)\r\n logger.warning(F'Current pipeline requires output to be in binary format, saving at {binary_path}'\t\t\t\t)\r\n else:\r\n self._reader.save(snake_case__\t\t\t\t)\r\n\r\n"},"code_codestyle":{"kind":"number","value":25,"string":"25"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport warnings\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...utils import logging\r\n\r\n\r\nlowerCAmelCase : Any\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\nlowerCAmelCase : List[Any]\t\t\t\t\t= {\r\n \"\"\"RUCAIBox/mvp\"\"\": \"\"\"https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json\"\"\",\r\n}\r\nclass \t\tUpperCamelCase__ (\tSCREAMING_SNAKE_CASE_\t\t):\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n __magic_name__ = \"mvp\"\r\n __magic_name__ = [\"past_key_values\"]\r\n __magic_name__ = {\"num_attention_heads\": \"encoder_attention_heads\", \"hidden_size\": \"d_model\"}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __init__(\tself ,\t\tsnake_case__=5_0267 ,\t\tsnake_case__=1024 ,\t\tsnake_case__=12 ,\t\tsnake_case__=4096 ,\t\tsnake_case__=16 ,\t\tsnake_case__=12 ,\t\tsnake_case__=4096 ,\t\tsnake_case__=16 ,\t\tsnake_case__=0.0 ,\t\tsnake_case__=0.0 ,\t\tsnake_case__=\"gelu\" ,\t\tsnake_case__=1024 ,\t\tsnake_case__=0.1 ,\t\tsnake_case__=0.0 ,\t\tsnake_case__=0.0 ,\t\tsnake_case__=0.02 ,\t\tsnake_case__=0.0 ,\t\tsnake_case__=False ,\t\tsnake_case__=True ,\t\tsnake_case__=1 ,\t\tsnake_case__=0 ,\t\tsnake_case__=2 ,\t\tsnake_case__=True ,\t\tsnake_case__=2 ,\t\tsnake_case__=2 ,\t\tsnake_case__=False ,\t\tsnake_case__=100 ,\t\tsnake_case__=800 ,\t\t**snake_case__ ,\t\t):\r\n\r\n '''simple docstring'''\r\n\r\n _lowerCAmelCase :\t\t\t\t\t\t\tList[Any]\t\t\t\t = vocab_size\r\n _lowerCAmelCase :\t\t\t\t\t\t\tAny\t\t\t\t = max_position_embeddings\r\n _lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any]\t\t\t\t = d_model\r\n _lowerCAmelCase :\t\t\t\t\t\t\tOptional[int]\t\t\t\t = encoder_ffn_dim\r\n _lowerCAmelCase :\t\t\t\t\t\t\tOptional[int]\t\t\t\t = encoder_layers\r\n _lowerCAmelCase :\t\t\t\t\t\t\tAny\t\t\t\t = encoder_attention_heads\r\n _lowerCAmelCase :\t\t\t\t\t\t\tAny\t\t\t\t = decoder_ffn_dim\r\n _lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any]\t\t\t\t = decoder_layers\r\n _lowerCAmelCase :\t\t\t\t\t\t\tint\t\t\t\t = decoder_attention_heads\r\n _lowerCAmelCase :\t\t\t\t\t\t\tUnion[str, Any]\t\t\t\t = dropout\r\n _lowerCAmelCase :\t\t\t\t\t\t\tList[Any]\t\t\t\t = attention_dropout\r\n _lowerCAmelCase :\t\t\t\t\t\t\tList[str]\t\t\t\t = activation_dropout\r\n _lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any]\t\t\t\t = activation_function\r\n _lowerCAmelCase :\t\t\t\t\t\t\tAny\t\t\t\t = init_std\r\n _lowerCAmelCase :\t\t\t\t\t\t\tAny\t\t\t\t = encoder_layerdrop\r\n _lowerCAmelCase :\t\t\t\t\t\t\tUnion[str, Any]\t\t\t\t = decoder_layerdrop\r\n _lowerCAmelCase :\t\t\t\t\t\t\tOptional[int]\t\t\t\t = classifier_dropout\r\n _lowerCAmelCase :\t\t\t\t\t\t\tList[Any]\t\t\t\t = use_cache\r\n _lowerCAmelCase :\t\t\t\t\t\t\tOptional[int]\t\t\t\t = encoder_layers\r\n _lowerCAmelCase :\t\t\t\t\t\t\tAny\t\t\t\t = scale_embedding # scale factor will be sqrt(d_model) if True\r\n _lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any]\t\t\t\t = use_prompt\r\n _lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any]\t\t\t\t = prompt_length\r\n _lowerCAmelCase :\t\t\t\t\t\t\tAny\t\t\t\t = prompt_mid_dim\r\n\r\n super().__init__(\r\n pad_token_id=snake_case__ ,\t\tbos_token_id=snake_case__ ,\t\teos_token_id=snake_case__ ,\t\tis_encoder_decoder=snake_case__ ,\t\tdecoder_start_token_id=snake_case__ ,\t\tforced_eos_token_id=snake_case__ ,\t\t**snake_case__ ,\t\t)\r\n\r\n if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' ,\t\tsnake_case__\t\t\t\t):\r\n _lowerCAmelCase :\t\t\t\t\t\t\tAny\t\t\t\t = self.bos_token_id\r\n warnings.warn(\r\n F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '\r\n 'The config can simply be saved and uploaded again to be fixed.'\t\t\t\t)\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":25,"string":"25"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":886,"cells":{"code":{"kind":"string","value":"\r\r\r\rimport os\rimport posixpath\rimport uuid\rfrom dataclasses import dataclass\rfrom typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union\r\rimport numpy as np\rimport pyarrow as pa\r\rimport datasets\rfrom datasets.arrow_writer import ArrowWriter, ParquetWriter\rfrom datasets.config import MAX_SHARD_SIZE\rfrom datasets.filesystems import (\r is_remote_filesystem,\r rename,\r)\rfrom datasets.iterable_dataset import _BaseExamplesIterable\rfrom datasets.utils.py_utils import convert_file_size_to_int\r\r\r_lowercase: Optional[int] \t\t\t=\t\tdatasets.utils.logging.get_logger(__name__)\r\rif TYPE_CHECKING:\r\t\t\t\timport pyspark\r\r\r\r@dataclass\rclass \t_lowercase\t\t\t\t\t\t\t( datasets.BuilderConfig\t):\r\r\r\r\r\r\t\"\"\"simple docstring\"\"\"\r\t__A\t\t\t\t\t\t\t=\t\t\t\t\t\t\tNone\r\r\r\r\r\r\rdef a( A : Dict , A : Any , )\t\t-> List[str]:\r\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t\t\t\timport pyspark\r\r\t\t\t\t\tdef generate_fn():\r\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = df.select(\"*\" , pyspark.sql.functions.spark_partition_id().alias(\"part_id\" ) )\r\t\t\t\t\t\t\t\t\t\tfor partition_id in partition_order:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = df_with_partition_id.select(\"*\" ).where(f'''part_id = {partition_id}''' ).drop(\"part_id\" )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = partition_df.collect()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = 0\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor row in rows:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tyield f'''{partition_id}_{row_id}''', row.asDict()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\trow_id += 1\r\r\t\t\t\t\treturn generate_fn\r\r\r\rclass \t_lowercase\t\t\t\t\t\t\t( _BaseExamplesIterable\t):\r\r\r\r\r\r\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\tdef __init__(self\t\t, lowerCamelCase_\t\t, lowerCamelCase_=None\t\t, ):\r\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\t\t\t\t\t\ta\t\t\t\t\t = df\r\t\t\t\t\t\ta\t\t\t\t\t = partition_order or range(self.df.rdd.getNumPartitions()\t\t)\r\t\t\t\t\t\ta\t\t\t\t\t = _generate_iterable_examples(self.df\t\t, self.partition_order\t\t)\r\r\r\r\r\tdef __iter__(self\t\t):\r\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\t\t\t\t\t\tyield from self.generate_examples_fn()\r\r\r\r\r\tdef UpperCamelCase_ (self\t\t, lowerCamelCase_\t\t):\r\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\t\t\t\t\t\ta\t\t\t\t\t = list(range(self.df.rdd.getNumPartitions()\t\t)\t\t)\r\t\t\t\t\t\tgenerator.shuffle(lowerCamelCase_\t\t)\r\t\t\t\t\t\treturn SparkExamplesIterable(self.df\t\t, partition_order=lowerCamelCase_\t\t)\r\r\r\r\r\tdef UpperCamelCase_ (self\t\t, lowerCamelCase_\t\t, lowerCamelCase_\t\t):\r\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\t\t\t\t\t\ta\t\t\t\t\t = self.split_shard_indices_by_worker(lowerCamelCase_\t\t, lowerCamelCase_\t\t)\r\t\t\t\t\t\treturn SparkExamplesIterable(self.df\t\t, partition_order=lowerCamelCase_\t\t)\r\r\r\r\r\t@property\r\tdef UpperCamelCase_ (self\t\t):\r\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\t\t\t\t\t\treturn len(self.partition_order\t\t)\r\r\r\r\rclass \t_lowercase\t\t\t\t\t\t\t( datasets.DatasetBuilder\t):\r\r\r\r\r\r\t\"\"\"simple docstring\"\"\"\r\t__A\t\t\t\t\t\t\t=\t\t\t\t\t\t\tSparkConfig\r\r\r\r\r\tdef __init__(self\t\t, lowerCamelCase_\t\t, lowerCamelCase_ = None\t\t, lowerCamelCase_ = None\t\t, **lowerCamelCase_\t\t, ):\r\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\t\t\t\t\t\timport pyspark\r\r\t\t\t\t\t\ta\t\t\t\t\t = pyspark.sql.SparkSession.builder.getOrCreate()\r\t\t\t\t\t\ta\t\t\t\t\t = df\r\t\t\t\t\t\ta\t\t\t\t\t = working_dir\r\r\t\t\t\t\t\tsuper().__init__(\r\t\t\t\t\t\t cache_dir=lowerCamelCase_\t\t, config_name=str(self.df.semanticHash()\t\t)\t\t, **lowerCamelCase_\t\t, )\r\r\r\r\r\tdef UpperCamelCase_ (self\t\t):\r\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\t\t\t\t\t\tdef create_cache_and_write_probe(lowerCamelCase_\t\t):\r\t\t\t\t\t\t\t\t\t\t\t# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories\r\t\t\t\t\t\t\t\t\t\t\t# already exist.\r\t\t\t\t\t\t\t\t\t\t\tos.makedirs(self._cache_dir\t\t, exist_ok=lowerCamelCase_\t\t)\r\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = os.path.join(self._cache_dir\t\t, \"fs_test\" + uuid.uuida().hex\t\t)\r\t\t\t\t\t\t\t\t\t\t\t# Opening the file in append mode will create a new file unless it already exists, in which case it will not\r\t\t\t\t\t\t\t\t\t\t\t# change the file contents.\r\t\t\t\t\t\t\t\t\t\t\topen(lowerCamelCase_\t\t, \"a\"\t\t)\r\t\t\t\t\t\t\t\t\t\t\treturn [probe_file]\r\r\t\t\t\t\t\tif self._spark.conf.get(\"spark.master\"\t\t, \"\"\t\t).startswith(\"local\"\t\t):\r\t\t\t\t\t\t\t\t\t\t\treturn\r\r\t\t\t\t\t\t# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS\r\t\t\t\t\t\t# accessible to the driver.\r\t\t\t\t\t\t# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.\r\t\t\t\t\t\tif self._cache_dir:\r\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = (\r\t\t\t\t\t\t\t\t\t\t\t self._spark.sparkContext.parallelize(range(1\t\t)\t\t, 1\t\t).mapPartitions(lowerCamelCase_\t\t).collect()\r\t\t\t\t\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\t\t\t\tif os.path.isfile(probe[0]\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn\r\r\t\t\t\t\t\traise ValueError(\r\t\t\t\t\t\t \"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir\"\t\t)\r\r\r\r\r\tdef UpperCamelCase_ (self\t\t):\r\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\t\t\t\t\t\treturn datasets.DatasetInfo(features=self.config.features\t\t)\r\r\r\r\r\tdef UpperCamelCase_ (self\t\t, lowerCamelCase_\t\t):\r\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\t\t\t\t\t\treturn [datasets.SplitGenerator(name=datasets.Split.TRAIN\t\t)]\r\r\r\r\r\tdef UpperCamelCase_ (self\t\t, lowerCamelCase_\t\t):\r\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\t\t\t\t\t\timport pyspark\r\r\t\t\t\t\t\tdef get_arrow_batch_size(lowerCamelCase_\t\t):\r\t\t\t\t\t\t\t\t\t\t\tfor batch in it:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tyield pa.RecordBatch.from_pydict({\"batch_bytes\": [batch.nbytes]}\t\t)\r\r\t\t\t\t\t\ta\t\t\t\t\t = self.df.count()\r\t\t\t\t\t\ta\t\t\t\t\t = df_num_rows if df_num_rows <= 100 else 100\r\t\t\t\t\t\t# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.\r\t\t\t\t\t\ta\t\t\t\t\t = (\r\t\t\t\t\t\t self.df.limit(lowerCamelCase_\t\t)\r\t\t\t\t\t\t .repartition(1\t\t)\r\t\t\t\t\t\t .mapInArrow(lowerCamelCase_\t\t, \"batch_bytes: long\"\t\t)\r\t\t\t\t\t\t .agg(pyspark.sql.functions.sum(\"batch_bytes\"\t\t).alias(\"sample_bytes\"\t\t)\t\t)\r\t\t\t\t\t\t .collect()[0]\r\t\t\t\t\t\t .sample_bytes\r\t\t\t\t\t\t / sample_num_rows\r\t\t\t\t\t\t)\r\t\t\t\t\t\ta\t\t\t\t\t = approx_bytes_per_row * df_num_rows\r\t\t\t\t\t\tif approx_total_size > max_shard_size:\r\t\t\t\t\t\t\t\t\t\t\t# Make sure there is at least one row per partition.\r\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = min(lowerCamelCase_\t\t, int(approx_total_size / max_shard_size\t\t)\t\t)\r\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = self.df.repartition(lowerCamelCase_\t\t)\r\r\r\r\r\tdef UpperCamelCase_ (self\t\t, lowerCamelCase_\t\t, lowerCamelCase_\t\t, lowerCamelCase_\t\t, ):\r\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\t\t\t\t\t\timport pyspark\r\r\t\t\t\t\t\ta\t\t\t\t\t = ParquetWriter if file_format == \"\"\"parquet\"\"\" else ArrowWriter\r\t\t\t\t\t\ta\t\t\t\t\t = os.path.join(self._working_dir\t\t, os.path.basename(lowerCamelCase_\t\t)\t\t) if self._working_dir else fpath\r\t\t\t\t\t\ta\t\t\t\t\t = file_format == \"\"\"parquet\"\"\"\r\r\t\t\t\t\t\t# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to\r\t\t\t\t\t\t# pickling the SparkContext.\r\t\t\t\t\t\ta\t\t\t\t\t = self.config.features\r\t\t\t\t\t\ta\t\t\t\t\t = self._writer_batch_size\r\t\t\t\t\t\ta\t\t\t\t\t = self._fs.storage_options\r\r\t\t\t\t\t\tdef write_arrow(lowerCamelCase_\t\t):\r\t\t\t\t\t\t\t\t\t\t\t# Within the same SparkContext, no two task attempts will share the same attempt ID.\r\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = pyspark.TaskContext().taskAttemptId()\r\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = next(lowerCamelCase_\t\t, lowerCamelCase_\t\t)\r\t\t\t\t\t\t\t\t\t\t\tif first_batch is None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Some partitions might not receive any data.\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn pa.RecordBatch.from_arrays(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [[task_id], [0], [0]]\t\t, names=[\"task_id\", \"num_examples\", \"num_bytes\"]\t\t, )\r\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = 0\r\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = writer_class(\r\t\t\t\t\t\t\t\t\t\t\t features=lowerCamelCase_\t\t, path=working_fpath.replace(\"SSSSS\"\t\t, F'''{shard_id:05d}'''\t\t).replace(\"TTTTT\"\t\t, F'''{task_id:05d}'''\t\t)\t\t, writer_batch_size=lowerCamelCase_\t\t, storage_options=lowerCamelCase_\t\t, embed_local_files=lowerCamelCase_\t\t, )\r\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = pa.Table.from_batches([first_batch]\t\t)\r\t\t\t\t\t\t\t\t\t\t\twriter.write_table(lowerCamelCase_\t\t)\r\t\t\t\t\t\t\t\t\t\t\tfor batch in it:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif max_shard_size is not None and writer._num_bytes >= max_shard_size:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = writer.finalize()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twriter.close()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tyield pa.RecordBatch.from_arrays(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [[task_id], [num_examples], [num_bytes]]\t\t, names=[\"task_id\", \"num_examples\", \"num_bytes\"]\t\t, )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tshard_id += 1\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = writer_class(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t features=writer._features\t\t, path=working_fpath.replace(\"SSSSS\"\t\t, F'''{shard_id:05d}'''\t\t).replace(\"TTTTT\"\t\t, F'''{task_id:05d}'''\t\t)\t\t, writer_batch_size=lowerCamelCase_\t\t, storage_options=lowerCamelCase_\t\t, embed_local_files=lowerCamelCase_\t\t, )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = pa.Table.from_batches([batch]\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twriter.write_table(lowerCamelCase_\t\t)\r\r\t\t\t\t\t\t\t\t\t\t\tif writer._num_bytes > 0:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = writer.finalize()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twriter.close()\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tyield pa.RecordBatch.from_arrays(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [[task_id], [num_examples], [num_bytes]]\t\t, names=[\"task_id\", \"num_examples\", \"num_bytes\"]\t\t, )\r\r\t\t\t\t\t\t\t\t\t\t\tif working_fpath != fpath:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor file in os.listdir(os.path.dirname(lowerCamelCase_\t\t)\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = os.path.join(os.path.dirname(lowerCamelCase_\t\t)\t\t, os.path.basename(lowerCamelCase_\t\t)\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tshutil.move(lowerCamelCase_\t\t, lowerCamelCase_\t\t)\r\r\t\t\t\t\t\ta\t\t\t\t\t = (\r\t\t\t\t\t\t self.df.mapInArrow(lowerCamelCase_\t\t, \"task_id: long, num_examples: long, num_bytes: long\"\t\t)\r\t\t\t\t\t\t .groupBy(\"task_id\"\t\t)\r\t\t\t\t\t\t .agg(\r\t\t\t\t\t\t pyspark.sql.functions.sum(\"num_examples\"\t\t).alias(\"total_num_examples\"\t\t)\t\t, pyspark.sql.functions.sum(\"num_bytes\"\t\t).alias(\"total_num_bytes\"\t\t)\t\t, pyspark.sql.functions.count(\"num_bytes\"\t\t).alias(\"num_shards\"\t\t)\t\t, pyspark.sql.functions.collect_list(\"num_examples\"\t\t).alias(\"shard_lengths\"\t\t)\t\t, )\r\t\t\t\t\t\t .collect()\r\t\t\t\t\t\t)\r\t\t\t\t\t\tfor row in stats:\r\t\t\t\t\t\t\t\t\t\t\tyield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)\r\r\r\r\r\tdef UpperCamelCase_ (self\t\t, lowerCamelCase_\t\t, lowerCamelCase_ = \"arrow\"\t\t, lowerCamelCase_ = None\t\t, lowerCamelCase_ = None\t\t, **lowerCamelCase_\t\t, ):\r\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\t\t\t\t\t\tself._validate_cache_dir()\r\r\t\t\t\t\t\ta\t\t\t\t\t = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE\t\t)\r\t\t\t\t\t\tself._repartition_df_if_needed(lowerCamelCase_\t\t)\r\t\t\t\t\t\ta\t\t\t\t\t = not is_remote_filesystem(self._fs\t\t)\r\t\t\t\t\t\ta\t\t\t\t\t = os.path.join if is_local else posixpath.join\r\r\t\t\t\t\t\ta\t\t\t\t\t = \"\"\"-TTTTT-SSSSS-of-NNNNN\"\"\"\r\t\t\t\t\t\ta\t\t\t\t\t = F'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''\r\t\t\t\t\t\ta\t\t\t\t\t = path_join(self._output_dir\t\t, lowerCamelCase_\t\t)\r\r\t\t\t\t\t\ta\t\t\t\t\t = 0\r\t\t\t\t\t\ta\t\t\t\t\t = 0\r\t\t\t\t\t\ta\t\t\t\t\t = 0\r\t\t\t\t\t\ta\t\t\t\t\t = []\r\t\t\t\t\t\ta\t\t\t\t\t = []\r\r\t\t\t\t\t\tfor task_id, content in self._prepare_split_single(lowerCamelCase_\t\t, lowerCamelCase_\t\t, lowerCamelCase_\t\t):\r\t\t\t\t\t\t\t\t\t\t\t(\r\t\t\t\t\t\t\t\t\t\t\t a \r\t\t\t\t\t\t\t\t\t\t\t)\t\t\t\t\t = content\r\t\t\t\t\t\t\t\t\t\t\tif num_bytes > 0:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttotal_num_examples += num_examples\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttotal_num_bytes += num_bytes\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttotal_shards += num_shards\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttask_id_and_num_shards.append((task_id, num_shards)\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tall_shard_lengths.extend(lowerCamelCase_\t\t)\r\r\t\t\t\t\t\ta\t\t\t\t\t = total_num_examples\r\t\t\t\t\t\ta\t\t\t\t\t = total_num_bytes\r\r\t\t\t\t\t\t# should rename everything at the end\r\t\t\t\t\t\tlogger.debug(F'''Renaming {total_shards} shards.'''\t\t)\r\t\t\t\t\t\tif total_shards > 1:\r\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = all_shard_lengths\r\r\t\t\t\t\t\t\t\t\t\t\t# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a\r\t\t\t\t\t\t\t\t\t\t\t# pickling error due to pickling the SparkContext.\r\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = self._fs\r\r\t\t\t\t\t\t\t\t\t\t\t# use the -SSSSS-of-NNNNN pattern\r\t\t\t\t\t\t\t\t\t\t\tdef _rename_shard(\r\t\t\t\t\t\t\t\t\t\t\t lowerCamelCase_\t\t, lowerCamelCase_\t\t, lowerCamelCase_\t\t, ):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\trename(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t lowerCamelCase_\t\t, fpath.replace(\"SSSSS\"\t\t, F'''{shard_id:05d}'''\t\t).replace(\"TTTTT\"\t\t, F'''{task_id:05d}'''\t\t)\t\t, fpath.replace(\"TTTTT-SSSSS\"\t\t, F'''{global_shard_id:05d}'''\t\t).replace(\"NNNNN\"\t\t, F'''{total_shards:05d}'''\t\t)\t\t, )\r\r\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = []\r\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = 0\r\t\t\t\t\t\t\t\t\t\t\tfor i in range(len(lowerCamelCase_\t\t)\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = task_id_and_num_shards[i]\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor shard_id in range(lowerCamelCase_\t\t):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\targs.append([task_id, shard_id, global_shard_id]\t\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tglobal_shard_id += 1\r\t\t\t\t\t\t\t\t\t\t\tself._spark.sparkContext.parallelize(lowerCamelCase_\t\t, len(lowerCamelCase_\t\t)\t\t).map(lambda lowerCamelCase_\t\t: _rename_shard(*lowerCamelCase_\t\t)\t\t).collect()\r\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t# don't use any pattern\r\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = 0\r\t\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t = task_id_and_num_shards[0][0]\r\t\t\t\t\t\t\t\t\t\t\tself._rename(\r\t\t\t\t\t\t\t\t\t\t\t fpath.replace(\"SSSSS\"\t\t, F'''{shard_id:05d}'''\t\t).replace(\"TTTTT\"\t\t, F'''{task_id:05d}'''\t\t)\t\t, fpath.replace(lowerCamelCase_\t\t, \"\"\t\t)\t\t, )\r\r\r\r\r\tdef UpperCamelCase_ (self\t\t, lowerCamelCase_\t\t, ):\r\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\t\t\t\t\t\treturn SparkExamplesIterable(self.df\t\t)\r"},"code_codestyle":{"kind":"number","value":227,"string":"227"},"style_context":{"kind":"string","value":"\n\n\n\n'''simple docstring'''\n\n\n\n\nimport gc\nimport random\nimport tempfile\nimport unittest\n\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer\n\nfrom diffusers import (\n AutoencoderKL,\n DDIMInverseScheduler,\n DDIMScheduler,\n DPMSolverMultistepInverseScheduler,\n DPMSolverMultistepScheduler,\n StableDiffusionDiffEditPipeline,\n UNetaDConditionModel,\n)\nfrom diffusers.utils import load_image, slow\nfrom diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device\n\nfrom ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS\nfrom ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin\n\n\nenable_full_determinism()\n\nclass \t\t\tUpperCamelCase_ (\t\t\t\t\t\t\t__magic_name__ , __magic_name__ , unittest.TestCase\t\t\t):\n\tlowercase = StableDiffusionDiffEditPipeline\n\tlowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}\n\tlowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}\n\tlowercase = frozenset(\n\t []\t\t\t) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess\n\tlowercase = frozenset([]\t\t\t)\n\n\n\n\n\n\tdef _lowercase( self )\t\t\t->\t\t\tOptional[int]:\n\t\t\t\t\t\t\t\ttorch.manual_seed(0 )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tDict\t\t\t\t\t\t\t =\t\tUNetaDConditionModel(\n\t\t\t\t\t\t\t\t block_out_channels=(32, 64)\t\t,\t\tlayers_per_block=2\t\t,\t\tsample_size=32\t\t,\t\tin_channels=4\t\t,\t\tout_channels=4\t\t,\t\tdown_block_types=(\"\"\"DownBlock2D\"\"\", \"\"\"CrossAttnDownBlock2D\"\"\")\t\t,\t\tup_block_types=(\"\"\"CrossAttnUpBlock2D\"\"\", \"\"\"UpBlock2D\"\"\")\t\t,\t\tcross_attention_dim=32\t\t,\t\tattention_head_dim=(2, 4)\t\t,\t\tuse_linear_projection=A\t\t,\t\t)\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tint\t\t\t\t\t\t\t =\t\tDDIMScheduler(\n\t\t\t\t\t\t\t\t beta_start=0.0_0_0_8_5\t\t,\t\tbeta_end=0.0_1_2\t\t,\t\tbeta_schedule=\"\"\"scaled_linear\"\"\"\t\t,\t\tclip_sample=A\t\t,\t\tset_alpha_to_one=A\t\t,\t\t)\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tList[Any]\t\t\t\t\t\t\t =\t\tDDIMInverseScheduler(\n\t\t\t\t\t\t\t\t beta_start=0.0_0_0_8_5\t\t,\t\tbeta_end=0.0_1_2\t\t,\t\tbeta_schedule=\"\"\"scaled_linear\"\"\"\t\t,\t\tclip_sample=A\t\t,\t\tset_alpha_to_zero=A\t\t,\t\t)\n\t\t\t\t\t\t\t\ttorch.manual_seed(0 )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tint\t\t\t\t\t\t\t =\t\tAutoencoderKL(\n\t\t\t\t\t\t\t\t block_out_channels=[32, 64]\t\t,\t\tin_channels=3\t\t,\t\tout_channels=3\t\t,\t\tdown_block_types=[\"\"\"DownEncoderBlock2D\"\"\", \"\"\"DownEncoderBlock2D\"\"\"]\t\t,\t\tup_block_types=[\"\"\"UpDecoderBlock2D\"\"\", \"\"\"UpDecoderBlock2D\"\"\"]\t\t,\t\tlatent_channels=4\t\t,\t\tsample_size=128\t\t,\t\t)\n\t\t\t\t\t\t\t\ttorch.manual_seed(0 )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tOptional[int]\t\t\t\t\t\t\t =\t\tCLIPTextConfig(\n\t\t\t\t\t\t\t\t bos_token_id=0\t\t,\t\teos_token_id=2\t\t,\t\thidden_size=32\t\t,\t\tintermediate_size=37\t\t,\t\tlayer_norm_eps=1e-05\t\t,\t\tnum_attention_heads=4\t\t,\t\tnum_hidden_layers=5\t\t,\t\tpad_token_id=1\t\t,\t\tvocab_size=1000\t\t,\t\thidden_act=\"\"\"gelu\"\"\"\t\t,\t\tprojection_dim=512\t\t,\t\t)\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tOptional[Any]\t\t\t\t\t\t\t =\t\tCLIPTextModel(A )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tList[str]\t\t\t\t\t\t\t =\t\tCLIPTokenizer.from_pretrained(\"\"\"hf-internal-testing/tiny-random-clip\"\"\" )\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tint\t\t\t\t\t\t\t =\t\t{\n\t\t\t\t\t\t\t\t \"\"\"unet\"\"\": unet,\n\t\t\t\t\t\t\t\t \"\"\"scheduler\"\"\": scheduler,\n\t\t\t\t\t\t\t\t \"\"\"inverse_scheduler\"\"\": inverse_scheduler,\n\t\t\t\t\t\t\t\t \"\"\"vae\"\"\": vae,\n\t\t\t\t\t\t\t\t \"\"\"text_encoder\"\"\": text_encoder,\n\t\t\t\t\t\t\t\t \"\"\"tokenizer\"\"\": tokenizer,\n\t\t\t\t\t\t\t\t \"\"\"safety_checker\"\"\": None,\n\t\t\t\t\t\t\t\t \"\"\"feature_extractor\"\"\": None,\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\treturn components\n\n\n\n\n\n\tdef _lowercase( self\t\t,\t\tA\t\t,\t\tA=0 )\t\t\t->\t\t\tOptional[Any]:\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tAny\t\t\t\t\t\t\t =\t\tfloats_tensor((1, 16, 16)\t\t,\t\trng=random.Random(A ) ).to(A )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tAny\t\t\t\t\t\t\t =\t\tfloats_tensor((1, 2, 4, 16, 16)\t\t,\t\trng=random.Random(A ) ).to(A )\n\t\t\t\t\t\t\t\tif str(A ).startswith(\"\"\"mps\"\"\" ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tList[Any]\t\t\t\t\t\t\t =\t\ttorch.manual_seed(A )\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tList[Any]\t\t\t\t\t\t\t =\t\ttorch.Generator(device=A ).manual_seed(A )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tint\t\t\t\t\t\t\t =\t\t{\n\t\t\t\t\t\t\t\t \"\"\"prompt\"\"\": \"\"\"a dog and a newt\"\"\",\n\t\t\t\t\t\t\t\t \"\"\"mask_image\"\"\": mask,\n\t\t\t\t\t\t\t\t \"\"\"image_latents\"\"\": latents,\n\t\t\t\t\t\t\t\t \"\"\"generator\"\"\": generator,\n\t\t\t\t\t\t\t\t \"\"\"num_inference_steps\"\"\": 2,\n\t\t\t\t\t\t\t\t \"\"\"inpaint_strength\"\"\": 1.0,\n\t\t\t\t\t\t\t\t \"\"\"guidance_scale\"\"\": 6.0,\n\t\t\t\t\t\t\t\t \"\"\"output_type\"\"\": \"\"\"numpy\"\"\",\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\treturn inputs\n\n\n\n\n\n\tdef _lowercase( self\t\t,\t\tA\t\t,\t\tA=0 )\t\t\t->\t\t\tOptional[int]:\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tOptional[Any]\t\t\t\t\t\t\t =\t\tfloats_tensor((1, 3, 32, 32)\t\t,\t\trng=random.Random(A ) ).to(A )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tUnion[str, Any]\t\t\t\t\t\t\t =\t\timage.cpu().permute(0\t\t,\t\t2\t\t,\t\t3\t\t,\t\t1 )[0]\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tint\t\t\t\t\t\t\t =\t\tImage.fromarray(np.uinta(A ) ).convert(\"\"\"RGB\"\"\" )\n\t\t\t\t\t\t\t\tif str(A ).startswith(\"\"\"mps\"\"\" ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tOptional[int]\t\t\t\t\t\t\t =\t\ttorch.manual_seed(A )\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tOptional[Any]\t\t\t\t\t\t\t =\t\ttorch.Generator(device=A ).manual_seed(A )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tAny\t\t\t\t\t\t\t =\t\t{\n\t\t\t\t\t\t\t\t \"\"\"image\"\"\": image,\n\t\t\t\t\t\t\t\t \"\"\"source_prompt\"\"\": \"\"\"a cat and a frog\"\"\",\n\t\t\t\t\t\t\t\t \"\"\"target_prompt\"\"\": \"\"\"a dog and a newt\"\"\",\n\t\t\t\t\t\t\t\t \"\"\"generator\"\"\": generator,\n\t\t\t\t\t\t\t\t \"\"\"num_inference_steps\"\"\": 2,\n\t\t\t\t\t\t\t\t \"\"\"num_maps_per_mask\"\"\": 2,\n\t\t\t\t\t\t\t\t \"\"\"mask_encode_strength\"\"\": 1.0,\n\t\t\t\t\t\t\t\t \"\"\"guidance_scale\"\"\": 6.0,\n\t\t\t\t\t\t\t\t \"\"\"output_type\"\"\": \"\"\"numpy\"\"\",\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\treturn inputs\n\n\n\n\n\n\tdef _lowercase( self\t\t,\t\tA\t\t,\t\tA=0 )\t\t\t->\t\t\tstr:\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tint\t\t\t\t\t\t\t =\t\tfloats_tensor((1, 3, 32, 32)\t\t,\t\trng=random.Random(A ) ).to(A )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tstr\t\t\t\t\t\t\t =\t\timage.cpu().permute(0\t\t,\t\t2\t\t,\t\t3\t\t,\t\t1 )[0]\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tint\t\t\t\t\t\t\t =\t\tImage.fromarray(np.uinta(A ) ).convert(\"\"\"RGB\"\"\" )\n\t\t\t\t\t\t\t\tif str(A ).startswith(\"\"\"mps\"\"\" ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tOptional[int]\t\t\t\t\t\t\t =\t\ttorch.manual_seed(A )\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tOptional[int]\t\t\t\t\t\t\t =\t\ttorch.Generator(device=A ).manual_seed(A )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tstr\t\t\t\t\t\t\t =\t\t{\n\t\t\t\t\t\t\t\t \"\"\"image\"\"\": image,\n\t\t\t\t\t\t\t\t \"\"\"prompt\"\"\": \"\"\"a cat and a frog\"\"\",\n\t\t\t\t\t\t\t\t \"\"\"generator\"\"\": generator,\n\t\t\t\t\t\t\t\t \"\"\"num_inference_steps\"\"\": 2,\n\t\t\t\t\t\t\t\t \"\"\"inpaint_strength\"\"\": 1.0,\n\t\t\t\t\t\t\t\t \"\"\"guidance_scale\"\"\": 6.0,\n\t\t\t\t\t\t\t\t \"\"\"decode_latents\"\"\": True,\n\t\t\t\t\t\t\t\t \"\"\"output_type\"\"\": \"\"\"numpy\"\"\",\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\treturn inputs\n\n\n\n\n\n\tdef _lowercase( self )\t\t\t->\t\t\tList[Any]:\n\t\t\t\t\t\t\t\tif not hasattr(self.pipeline_class\t\t,\t\t\"\"\"_optional_components\"\"\" ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tDict\t\t\t\t\t\t\t =\t\tself.get_dummy_components()\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tint\t\t\t\t\t\t\t =\t\tself.pipeline_class(**A )\n\t\t\t\t\t\t\t\tpipe.to(A )\n\t\t\t\t\t\t\t\tpipe.set_progress_bar_config(disable=A )\n\n\t\t\t\t\t\t\t\t# set all optional components to None and update pipeline config accordingly\n\t\t\t\t\t\t\t\tfor optional_component in pipe._optional_components:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsetattr(A\t\t,\t\tA\t\t,\t\tA )\n\t\t\t\t\t\t\t\tpipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tAny\t\t\t\t\t\t\t =\t\tself.get_dummy_inputs(A )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tOptional[Any]\t\t\t\t\t\t\t =\t\tpipe(**A )[0]\n\n\t\t\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdir:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpipe.save_pretrained(A )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tDict\t\t\t\t\t\t\t =\t\tself.pipeline_class.from_pretrained(A )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpipe_loaded.to(A )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpipe_loaded.set_progress_bar_config(disable=A )\n\n\t\t\t\t\t\t\t\tfor optional_component in pipe._optional_components:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t getattr(A\t\t,\t\tA ) is None\t\t,\t\tf'''`{optional_component}` did not stay set to None after loading.'''\t\t,\t\t)\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tUnion[str, Any]\t\t\t\t\t\t\t =\t\tself.get_dummy_inputs(A )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tTuple\t\t\t\t\t\t\t =\t\tpipe_loaded(**A )[0]\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tOptional[Any]\t\t\t\t\t\t\t =\t\tnp.abs(output - output_loaded ).max()\n\t\t\t\t\t\t\t\tself.assertLess(A\t\t,\t\t1e-4 )\n\n\n\n\n\n\tdef _lowercase( self )\t\t\t->\t\t\tOptional[int]:\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tOptional[int]\t\t\t\t\t\t\t =\t\t\"\"\"cpu\"\"\"\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tOptional[Any]\t\t\t\t\t\t\t =\t\tself.get_dummy_components()\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tList[str]\t\t\t\t\t\t\t =\t\tself.pipeline_class(**A )\n\t\t\t\t\t\t\t\tpipe.to(A )\n\t\t\t\t\t\t\t\tpipe.set_progress_bar_config(disable=A )\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tUnion[str, Any]\t\t\t\t\t\t\t =\t\tself.get_dummy_mask_inputs(A )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tList[Any]\t\t\t\t\t\t\t =\t\tpipe.generate_mask(**A )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tDict\t\t\t\t\t\t\t =\t\tmask[0, -3:, -3:]\n\n\t\t\t\t\t\t\t\tself.assertEqual(mask.shape\t\t,\t\t(1, 16, 16) )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tOptional[int]\t\t\t\t\t\t\t =\t\tnp.array([0] * 9 )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tUnion[str, Any]\t\t\t\t\t\t\t =\t\tnp.abs(mask_slice.flatten() - expected_slice ).max()\n\t\t\t\t\t\t\t\tself.assertLessEqual(A\t\t,\t\t1e-3 )\n\t\t\t\t\t\t\t\tself.assertEqual(mask[0, -3, -4]\t\t,\t\t0 )\n\n\n\n\n\n\tdef _lowercase( self )\t\t\t->\t\t\tOptional[Any]:\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tOptional[Any]\t\t\t\t\t\t\t =\t\t\"\"\"cpu\"\"\"\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tList[str]\t\t\t\t\t\t\t =\t\tself.get_dummy_components()\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tOptional[Any]\t\t\t\t\t\t\t =\t\tself.pipeline_class(**A )\n\t\t\t\t\t\t\t\tpipe.to(A )\n\t\t\t\t\t\t\t\tpipe.set_progress_bar_config(disable=A )\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tOptional[int]\t\t\t\t\t\t\t =\t\tself.get_dummy_inversion_inputs(A )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tList[str]\t\t\t\t\t\t\t =\t\tpipe.invert(**A ).images\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tUnion[str, Any]\t\t\t\t\t\t\t =\t\timage[0, -1, -3:, -3:]\n\n\t\t\t\t\t\t\t\tself.assertEqual(image.shape\t\t,\t\t(2, 32, 32, 3) )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tDict\t\t\t\t\t\t\t =\t\tnp.array(\n\t\t\t\t\t\t\t\t [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9]\t\t,\t\t)\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tDict\t\t\t\t\t\t\t =\t\tnp.abs(image_slice.flatten() - expected_slice ).max()\n\t\t\t\t\t\t\t\tself.assertLessEqual(A\t\t,\t\t1e-3 )\n\n\n\n\n\n\tdef _lowercase( self )\t\t\t->\t\t\tDict:\n\t\t\t\t\t\t\t\tsuper().test_inference_batch_single_identical(expected_max_diff=5e-3 )\n\n\n\n\n\n\n\tdef _lowercase( self )\t\t\t->\t\t\tint:\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tList[Any]\t\t\t\t\t\t\t =\t\t\"\"\"cpu\"\"\"\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tint\t\t\t\t\t\t\t =\t\tself.get_dummy_components()\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tList[Any]\t\t\t\t\t\t\t =\t\t{\"\"\"beta_start\"\"\": 0.0_0_0_8_5, \"\"\"beta_end\"\"\": 0.0_1_2, \"\"\"beta_schedule\"\"\": \"\"\"scaled_linear\"\"\"}\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tint\t\t\t\t\t\t\t =\t\tDPMSolverMultistepScheduler(**A )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tint\t\t\t\t\t\t\t =\t\tDPMSolverMultistepInverseScheduler(**A )\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tList[str]\t\t\t\t\t\t\t =\t\tself.pipeline_class(**A )\n\t\t\t\t\t\t\t\tpipe.to(A )\n\t\t\t\t\t\t\t\tpipe.set_progress_bar_config(disable=A )\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tTuple\t\t\t\t\t\t\t =\t\tself.get_dummy_inversion_inputs(A )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tAny\t\t\t\t\t\t\t =\t\tpipe.invert(**A ).images\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tDict\t\t\t\t\t\t\t =\t\timage[0, -1, -3:, -3:]\n\n\t\t\t\t\t\t\t\tself.assertEqual(image.shape\t\t,\t\t(2, 32, 32, 3) )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tAny\t\t\t\t\t\t\t =\t\tnp.array(\n\t\t\t\t\t\t\t\t [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9]\t\t,\t\t)\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tOptional[int]\t\t\t\t\t\t\t =\t\tnp.abs(image_slice.flatten() - expected_slice ).max()\n\t\t\t\t\t\t\t\tself.assertLessEqual(A\t\t,\t\t1e-3 )\n\n\n\n\n@require_torch_gpu\n@slow\nclass \t\t\tUpperCamelCase_ (\t\t\t\t\t\t\tunittest.TestCase\t\t\t):\n\n\n\n\n\n\tdef _lowercase( self )\t\t\t->\t\t\tint:\n\t\t\t\t\t\t\t\tsuper().tearDown()\n\t\t\t\t\t\t\t\tgc.collect()\n\t\t\t\t\t\t\t\ttorch.cuda.empty_cache()\n\n\n\n\n\n\t@classmethod\n\tdef _lowercase( cls )\t\t\t->\t\t\tDict:\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tTuple\t\t\t\t\t\t\t =\t\tload_image(\n\t\t\t\t\t\t\t\t \"\"\"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png\"\"\" )\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tUnion[str, Any]\t\t\t\t\t\t\t =\t\traw_image.convert(\"\"\"RGB\"\"\" ).resize((768, 768) )\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tList[str]\t\t\t\t\t\t\t =\t\traw_image\n\n\n\n\n\n\tdef _lowercase( self )\t\t\t->\t\t\tOptional[Any]:\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tDict\t\t\t\t\t\t\t =\t\ttorch.manual_seed(0 )\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tint\t\t\t\t\t\t\t =\t\tStableDiffusionDiffEditPipeline.from_pretrained(\n\t\t\t\t\t\t\t\t \"\"\"stabilityai/stable-diffusion-2-1\"\"\"\t\t,\t\tsafety_checker=A\t\t,\t\ttorch_dtype=torch.floataa )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tOptional[int]\t\t\t\t\t\t\t =\t\tDDIMScheduler.from_config(pipe.scheduler.config )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tOptional[int]\t\t\t\t\t\t\t =\t\tDDIMInverseScheduler.from_config(pipe.scheduler.config )\n\t\t\t\t\t\t\t\tpipe.enable_model_cpu_offload()\n\t\t\t\t\t\t\t\tpipe.set_progress_bar_config(disable=A )\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tTuple\t\t\t\t\t\t\t =\t\t\"\"\"a bowl of fruit\"\"\"\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tList[Any]\t\t\t\t\t\t\t =\t\t\"\"\"a bowl of pears\"\"\"\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tstr\t\t\t\t\t\t\t =\t\tpipe.generate_mask(\n\t\t\t\t\t\t\t\t image=self.raw_image\t\t,\t\tsource_prompt=A\t\t,\t\ttarget_prompt=A\t\t,\t\tgenerator=A\t\t,\t\t)\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tTuple\t\t\t\t\t\t\t =\t\tpipe.invert(\n\t\t\t\t\t\t\t\t prompt=A\t\t,\t\timage=self.raw_image\t\t,\t\tinpaint_strength=0.7\t\t,\t\tgenerator=A ).latents\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tAny\t\t\t\t\t\t\t =\t\tpipe(\n\t\t\t\t\t\t\t\t prompt=A\t\t,\t\tmask_image=A\t\t,\t\timage_latents=A\t\t,\t\tgenerator=A\t\t,\t\tnegative_prompt=A\t\t,\t\tinpaint_strength=0.7\t\t,\t\toutput_type=\"\"\"numpy\"\"\"\t\t,\t\t).images[0]\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tList[str]\t\t\t\t\t\t\t =\t\t(\n\t\t\t\t\t\t\t\t np.array(\n\t\t\t\t\t\t\t\t load_image(\n\t\t\t\t\t\t\t\t \"\"\"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\"\"\n\t\t\t\t\t\t\t\t \"\"\"/diffedit/pears.png\"\"\" ).resize((768, 768) ) )\n\t\t\t\t\t\t\t\t / 255\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\tassert np.abs((expected_image - image).max() ) < 5e-1\n\n\n\n\n\n\n\tdef _lowercase( self )\t\t\t->\t\t\tOptional[Any]:\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tOptional[Any]\t\t\t\t\t\t\t =\t\ttorch.manual_seed(0 )\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tUnion[str, Any]\t\t\t\t\t\t\t =\t\tStableDiffusionDiffEditPipeline.from_pretrained(\n\t\t\t\t\t\t\t\t \"\"\"stabilityai/stable-diffusion-2-1\"\"\"\t\t,\t\tsafety_checker=A\t\t,\t\ttorch_dtype=torch.floataa )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tList[str]\t\t\t\t\t\t\t =\t\tDPMSolverMultistepScheduler.from_config(pipe.scheduler.config )\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tAny\t\t\t\t\t\t\t =\t\tDPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )\n\t\t\t\t\t\t\t\tpipe.enable_model_cpu_offload()\n\t\t\t\t\t\t\t\tpipe.set_progress_bar_config(disable=A )\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tint\t\t\t\t\t\t\t =\t\t\"\"\"a bowl of fruit\"\"\"\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tint\t\t\t\t\t\t\t =\t\t\"\"\"a bowl of pears\"\"\"\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tstr\t\t\t\t\t\t\t =\t\tpipe.generate_mask(\n\t\t\t\t\t\t\t\t image=self.raw_image\t\t,\t\tsource_prompt=A\t\t,\t\ttarget_prompt=A\t\t,\t\tgenerator=A\t\t,\t\t)\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tAny\t\t\t\t\t\t\t =\t\tpipe.invert(\n\t\t\t\t\t\t\t\t prompt=A\t\t,\t\timage=self.raw_image\t\t,\t\tinpaint_strength=0.7\t\t,\t\tgenerator=A\t\t,\t\tnum_inference_steps=25\t\t,\t\t).latents\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tstr\t\t\t\t\t\t\t =\t\tpipe(\n\t\t\t\t\t\t\t\t prompt=A\t\t,\t\tmask_image=A\t\t,\t\timage_latents=A\t\t,\t\tgenerator=A\t\t,\t\tnegative_prompt=A\t\t,\t\tinpaint_strength=0.7\t\t,\t\tnum_inference_steps=25\t\t,\t\toutput_type=\"\"\"numpy\"\"\"\t\t,\t\t).images[0]\n\n\t\t\t\t\t\t\t\tUpperCAmelCase :\t\tTuple\t\t\t\t\t\t\t =\t\t(\n\t\t\t\t\t\t\t\t np.array(\n\t\t\t\t\t\t\t\t load_image(\n\t\t\t\t\t\t\t\t \"\"\"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\"\"\n\t\t\t\t\t\t\t\t \"\"\"/diffedit/pears.png\"\"\" ).resize((768, 768) ) )\n\t\t\t\t\t\t\t\t / 255\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\tassert np.abs((expected_image - image).max() ) < 5e-1\n\n"},"style_context_codestyle":{"kind":"number","value":265,"string":"265"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":887,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\nfrom collections import OrderedDict\nfrom typing import Any, List, Mapping, Optional\n\nfrom ... import PreTrainedTokenizer, TensorType, is_torch_available\nfrom ...configuration_utils import PretrainedConfig\nfrom ...onnx import OnnxConfigWithPast, PatchingSpec\nfrom ...utils import logging\n\n\nlowerCAmelCase_ = logging.get_logger(__name__)\n\nlowerCAmelCase_ = {\n '''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',\n # See all GPT-J models at https://huggingface.co/models?filter=gpt_j\n}\n\n\n\nclass __lowerCAmelCase\t( UpperCAmelCase_\t\t\t\t\t):\n\t\t\tlowerCamelCase_\t\t\t\t\t:\t\t\t\t\tOptional[Any] =\t\t\t\t\"\"\"gptj\"\"\"\n\t\t\tlowerCamelCase_\t\t\t\t\t:\t\t\t\t\tTuple =\t\t\t\t{\n\t\t\t \"\"\"max_position_embeddings\"\"\": \"\"\"n_positions\"\"\",\n\t\t\t \"\"\"hidden_size\"\"\": \"\"\"n_embd\"\"\",\n\t\t\t \"\"\"num_attention_heads\"\"\": \"\"\"n_head\"\"\",\n\t\t\t \"\"\"num_hidden_layers\"\"\": \"\"\"n_layer\"\"\",\n\t\t\t}\n\n\n\n\n\n\n\t\t\tdef __init__(self ,\t\t\t\t\t\t\t__magic_name__=5_0400 ,\t\t\t\t\t\t\t__magic_name__=2048 ,\t\t\t\t\t\t\t__magic_name__=4096 ,\t\t\t\t\t\t\t__magic_name__=28 ,\t\t\t\t\t\t\t__magic_name__=16 ,\t\t\t\t\t\t\t__magic_name__=64 ,\t\t\t\t\t\t\t__magic_name__=None ,\t\t\t\t\t\t\t__magic_name__=\"gelu_new\" ,\t\t\t\t\t\t\t__magic_name__=0.0 ,\t\t\t\t\t\t\t__magic_name__=0.0 ,\t\t\t\t\t\t\t__magic_name__=0.0 ,\t\t\t\t\t\t\t__magic_name__=1e-5 ,\t\t\t\t\t\t\t__magic_name__=0.02 ,\t\t\t\t\t\t\t__magic_name__=True ,\t\t\t\t\t\t\t__magic_name__=5_0256 ,\t\t\t\t\t\t\t__magic_name__=5_0256 ,\t\t\t\t\t\t\t__magic_name__=False ,\t\t\t\t\t\t\t**__magic_name__ ,\t\t\t\t\t\t\t)\t\t\t\t\t-> Tuple:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Any = vocab_size\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[int] = n_positions\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Tuple = n_embd\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: int = n_layer\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Any = n_head\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Any = n_inner\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Dict = rotary_dim\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Tuple = activation_function\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[Any] = resid_pdrop\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Any = embd_pdrop\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: List[str] = attn_pdrop\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: str = layer_norm_epsilon\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: List[Any] = initializer_range\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Dict = use_cache\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: List[Any] = bos_token_id\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Tuple = eos_token_id\n\n\t\t\t\t\t\t\tsuper().__init__(\n\t\t\t\t\t\t\t bos_token_id=__lowercase ,\t\t\t\t\t\t\teos_token_id=__lowercase ,\t\t\t\t\t\t\ttie_word_embeddings=__lowercase ,\t\t\t\t\t\t\t**__lowercase )\n\n\n\n\nclass __lowerCAmelCase\t( UpperCAmelCase_\t\t\t\t\t):\n\n\n\n\n\n\n\t\t\tdef __init__(self ,\t\t\t\t\t\t\t__magic_name__ ,\t\t\t\t\t\t\t__magic_name__ = \"default\" ,\t\t\t\t\t\t\t__magic_name__ = None ,\t\t\t\t\t\t\t__magic_name__ = False ,\t\t\t\t\t\t\t)\t\t\t\t\t-> Any:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\tsuper().__init__(__lowercase ,\t\t\t\t\t\t\ttask=__lowercase ,\t\t\t\t\t\t\tpatching_specs=__lowercase ,\t\t\t\t\t\t\tuse_past=__lowercase )\n\t\t\t\t\t\t\tif not getattr(self._config ,\t\t\t\t\t\t\t'''pad_token_id''' ,\t\t\t\t\t\t\t__lowercase ):\n\t\t\t\t\t\t\t\t\t\t\t# TODO: how to do that better?\n\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Tuple = 0\n\n\n\n\n\n\n\t\t\t@property\n\t\t\tdef lowerCamelCase (self )\t\t\t\t\t-> Mapping[str, Mapping[int, str]]:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )\n\t\t\t\t\t\t\tif self.use_past:\n\t\t\t\t\t\t\t\t\t\t\tself.fill_with_past_key_values_(__lowercase ,\t\t\t\t\t\t\tdirection='''inputs''' )\n\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: str = {0: '''batch''', 1: '''past_sequence + sequence'''}\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Any = {0: '''batch''', 1: '''sequence'''}\n\n\t\t\t\t\t\t\treturn common_inputs\n\n\n\n\n\n\n\t\t\t@property\n\t\t\tdef lowerCamelCase (self )\t\t\t\t\t-> int:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\treturn self._config.n_layer\n\n\n\n\n\n\n\t\t\t@property\n\t\t\tdef lowerCamelCase (self )\t\t\t\t\t-> int:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\treturn self._config.n_head\n\n\n\n\n\n\n\t\t\tdef lowerCamelCase (self ,\t\t\t\t\t\t\t__magic_name__ ,\t\t\t\t\t\t\t__magic_name__ = -1 ,\t\t\t\t\t\t\t__magic_name__ = -1 ,\t\t\t\t\t\t\t__magic_name__ = False ,\t\t\t\t\t\t\t__magic_name__ = None ,\t\t\t\t\t\t\t)\t\t\t\t\t-> Mapping[str, Any]:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[int] = super(__lowercase ,\t\t\t\t\t\t\tself ).generate_dummy_inputs(\n\t\t\t\t\t\t\t __lowercase ,\t\t\t\t\t\t\tbatch_size=__lowercase ,\t\t\t\t\t\t\tseq_length=__lowercase ,\t\t\t\t\t\t\tis_pair=__lowercase ,\t\t\t\t\t\t\tframework=__lowercase )\n\n\t\t\t\t\t\t\t# We need to order the input in the way they appears in the forward()\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: int = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )\n\n\t\t\t\t\t\t\t# Need to add the past_keys\n\t\t\t\t\t\t\tif self.use_past:\n\t\t\t\t\t\t\t\t\t\t\tif not is_torch_available():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )\n\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\timport torch\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: str = common_inputs['''input_ids'''].shape\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Not using the same length for past_key_values\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: List[str] = seqlen + 2\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Union[str, Any] = (\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t batch,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t self.num_attention_heads,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t past_key_values_length,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t self._config.hidden_size // self.num_attention_heads,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Tuple = [\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t (torch.zeros(__lowercase ), torch.zeros(__lowercase )) for _ in range(self.num_layers )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t]\n\n\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Tuple = common_inputs['''attention_mask''']\n\t\t\t\t\t\t\tif self.use_past:\n\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Tuple = ordered_inputs['''attention_mask'''].dtype\n\t\t\t\t\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[Any] = torch.cat(\n\t\t\t\t\t\t\t\t\t\t\t [ordered_inputs['''attention_mask'''], torch.ones(__lowercase ,\t\t\t\t\t\t\t__lowercase ,\t\t\t\t\t\t\tdtype=__lowercase )] ,\t\t\t\t\t\t\tdim=1 )\n\n\t\t\t\t\t\t\treturn ordered_inputs\n\n\n\n\n\n\n\t\t\t@property\n\t\t\tdef lowerCamelCase (self )\t\t\t\t\t-> int:\n\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\treturn 13\n"},"code_codestyle":{"kind":"number","value":367,"string":"367"},"style_context":{"kind":"string","value":"\n\n\n\n\n\nfrom math import isclose, sqrt\n\n\n\n\n\n\ndef \t\t\t\t\tlowerCamelCase_\t\t\t\t\t\t( _UpperCamelCase\t\t\t, _UpperCamelCase\t\t\t, _UpperCamelCase\t\t\t)\t\t->\t\t\t\t\t\ttuple[float, float, float]:\n\n\n\n\n\n\n\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\t\t\t\tsnake_case_\t\t\t\t\t\t: Dict = point_y / 4 / point_x\n\t\t\t\tsnake_case_\t\t\t\t\t\t: List[str] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)\n\t\t\t\tsnake_case_\t\t\t\t\t\t: Union[str, Any] = (1 - normal_gradient * normal_gradient) / (\n\t\t\t\t 1 + normal_gradient * normal_gradient\n\t\t\t\t)\n\t\t\t\tsnake_case_\t\t\t\t\t\t: Tuple = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)\n\n\t\t\t\t# to find the next point, solve the simultaeneous equations:\n\t\t\t\t# y^2 + 4x^2 = 100\n\t\t\t\t# y - b = m * (x - a)\n\t\t\t\t# ==> A x^2 + B x + C = 0\n\t\t\t\tsnake_case_\t\t\t\t\t\t: Union[str, Any] = outgoing_gradient**2 + 4\n\t\t\t\tsnake_case_\t\t\t\t\t\t: Tuple = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)\n\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100\n\n\t\t\t\tsnake_case_\t\t\t\t\t\t: Dict = (\n\t\t\t\t -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term\t\t\t)\n\t\t\t\t) / (2 * quadratic_term)\n\t\t\t\tsnake_case_\t\t\t\t\t\t: Optional[int] = (\n\t\t\t\t -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term\t\t\t)\n\t\t\t\t) / (2 * quadratic_term)\n\n\t\t\t\t# two solutions, one of which is our input point\n\t\t\t\tsnake_case_\t\t\t\t\t\t: Any = x_minus if isclose(_UpperCamelCase\t\t\t, _UpperCamelCase\t\t\t) else x_plus\n\t\t\t\tsnake_case_\t\t\t\t\t\t: int = point_y + outgoing_gradient * (next_x - point_x)\n\n\t\t\t\treturn next_x, next_y, outgoing_gradient\n\n\n\n\n\n\ndef \t\t\t\t\tlowerCamelCase_\t\t\t\t\t\t( _UpperCamelCase = 1.4\t\t\t, _UpperCamelCase = -9.6\t\t\t)\t\t->\t\t\t\t\t\tint:\n\n\n\n\n\n\n\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\t\t\t\tsnake_case_\t\t\t\t\t\t: int = 0\n\t\t\t\tsnake_case_\t\t\t\t\t\t: float = first_x_coord\n\t\t\t\tsnake_case_\t\t\t\t\t\t: float = first_y_coord\n\t\t\t\tsnake_case_\t\t\t\t\t\t: float = (10.1 - point_y) / (0.0 - point_x)\n\n\t\t\t\twhile not (-0.01 <= point_x <= 0.01 and point_y > 0):\n\t\t\t\t\t\t\t\tsnake_case_ ,\t\t\t\t\t\t\tsnake_case_ ,\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t\t: str = next_point(_UpperCamelCase\t\t\t, _UpperCamelCase\t\t\t, _UpperCamelCase\t\t\t)\n\t\t\t\t\t\t\t\tnum_reflections += 1\n\n\t\t\t\treturn num_reflections\n\n\nif __name__ == \"__main__\":\n\tprint(F'''{solution() = }''')\n"},"style_context_codestyle":{"kind":"number","value":279,"string":"279"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":888,"cells":{"code":{"kind":"string","value":"\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\rdef \t\t\t\t\t\t_lowerCAmelCase\t\t\t\t(\t\t\t\tUpperCamelCase_ ,\t\t\t\tUpperCamelCase_\t\t\t\t):\r\t\t_validate_point(UpperCamelCase_\t\t\t\t)\r\t\t_validate_point(UpperCamelCase_\t\t\t\t)\r\t\tif len(UpperCamelCase_\t\t\t\t) != len(UpperCamelCase_\t\t\t\t):\r\t\t\t\traise ValueError(\"\"\"Both points must be in the same n-dimensional space\"\"\"\t\t\t\t)\r\r\t\treturn float(sum(abs(a - b\t\t\t\t) for a, b in zip(UpperCamelCase_ ,\t\t\t\tUpperCamelCase_\t\t\t\t)\t\t\t\t)\t\t\t\t)\r\r\r\rdef \t\t\t\t\t\t_lowerCAmelCase\t\t\t\t(\t\t\t\tUpperCamelCase_\t\t\t\t):\r\t\tif point:\r\t\t\t\tif isinstance(UpperCamelCase_ ,\t\t\t\tUpperCamelCase_\t\t\t\t):\r\t\t\t\t\t\tfor item in point:\r\t\t\t\t\t\t\t\tif not isinstance(UpperCamelCase_ ,\t\t\t\t(int, float)\t\t\t\t):\r\t\t\t\t\t\t\t\t\t\t__SCREAMING_SNAKE_CASE = (\r\t\t\t\t\t\t\t\t\t\t \"\"\"Expected a list of numbers as input, found \"\"\"\r\t\t\t\t\t\t\t\t\t\t f\"{type(UpperCamelCase_\t\t\t\t).__name__}\"\r\t\t\t\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\t\t\traise TypeError(UpperCamelCase_\t\t\t\t)\r\t\t\t\telse:\r\t\t\t\t\t\t__SCREAMING_SNAKE_CASE = f\"Expected a list of numbers as input, found {type(UpperCamelCase_\t\t\t\t).__name__}\"\r\t\t\t\t\t\traise TypeError(UpperCamelCase_\t\t\t\t)\r\t\telse:\r\t\t\t\traise ValueError(\"\"\"Missing an input\"\"\"\t\t\t\t)\r\r\r\rdef \t\t\t\t\t\t_lowerCAmelCase\t\t\t\t(\t\t\t\tUpperCamelCase_ ,\t\t\t\tUpperCamelCase_\t\t\t\t):\r\t\t_validate_point(UpperCamelCase_\t\t\t\t)\r\t\t_validate_point(UpperCamelCase_\t\t\t\t)\r\t\tif len(UpperCamelCase_\t\t\t\t) != len(UpperCamelCase_\t\t\t\t):\r\t\t\t\traise ValueError(\"\"\"Both points must be in the same n-dimensional space\"\"\"\t\t\t\t)\r\r\t\treturn float(sum(abs(x - y\t\t\t\t) for x, y in zip(UpperCamelCase_ ,\t\t\t\tUpperCamelCase_\t\t\t\t)\t\t\t\t)\t\t\t\t)\r\r\rif __name__ == \"__main__\":\r\t\t\t\t\t\t\timport doctest\r\r\t\t\t\t\t\t\tdoctest.testmod()\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":100,"string":"100"},"style_context":{"kind":"string","value":"\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\rfrom __future__ import annotations\r\rfrom fractions import Fraction\r\r\r\rdef SCREAMING_SNAKE_CASE__\t\t\t( __UpperCAmelCase , __UpperCAmelCase ) -> bool:\r return (\r num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den\r )\r\r\r\rdef SCREAMING_SNAKE_CASE__\t\t\t( __UpperCAmelCase ) -> list[str]:\r lowercase__: str\t\t\t\t\t= []\r lowercase__: str\t\t\t\t\t= 1_1\r lowercase__: str\t\t\t\t\t= int('''1''' + '''0''' * digit_len )\r for num in range(__UpperCAmelCase , __UpperCAmelCase ):\r while den <= 9_9:\r if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):\r if is_digit_cancelling(__UpperCAmelCase , __UpperCAmelCase ):\r solutions.append(F\"\"\"{num}/{den}\"\"\" )\r den += 1\r num += 1\r lowercase__: Dict\t\t\t\t\t= 1_0\r return solutions\r\r\r\rdef SCREAMING_SNAKE_CASE__\t\t\t( __UpperCAmelCase = 2 ) -> int:\r lowercase__: List[str]\t\t\t\t\t= 1.0\r for fraction in fraction_list(__UpperCAmelCase ):\r lowercase__: List[str]\t\t\t\t\t= Fraction(__UpperCAmelCase )\r result *= frac.denominator / frac.numerator\r return int(__UpperCAmelCase )\r\r\rif __name__ == \"__main__\":\r print(solution())\r"},"style_context_codestyle":{"kind":"number","value":177,"string":"177"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":889,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\r\"\"\"simple docstring\"\"\"\rimport warnings\rfrom contextlib import contextmanager\r\rfrom ....processing_utils import ProcessorMixin\r\r\r\r\r\r\r\rclass a ( a__ ):\r snake_case__ = '''MCTCTFeatureExtractor'''\r snake_case__ = '''AutoTokenizer'''\r\r\r\r def __init__( self\t\t\t\t\t\t\t, _snake_case\t\t\t\t\t\t\t, _snake_case ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r super().__init__(_snake_case\t\t\t\t\t\t\t, _snake_case )\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tself.feature_extractor\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tFalse\r\r\r\r def __call__( self\t\t\t\t\t\t\t, *_snake_case\t\t\t\t\t\t\t, **_snake_case ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r if self._in_target_context_manager:\r return self.current_processor(*_snake_case\t\t\t\t\t\t\t, **_snake_case )\r\r if \"raw_speech\" in kwargs:\r warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tkwargs.pop('raw_speech' )\r else:\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tkwargs.pop('audio'\t\t\t\t\t\t\t, _snake_case )\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tkwargs.pop('sampling_rate'\t\t\t\t\t\t\t, _snake_case )\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tkwargs.pop('text'\t\t\t\t\t\t\t, _snake_case )\r if len(_snake_case ) > 0:\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\targs[0]\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\targs[1:]\r\r if audio is None and text is None:\r raise ValueError('You need to specify either an `audio` or `text` input to process.' )\r\r if audio is not None:\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tself.feature_extractor(_snake_case\t\t\t\t\t\t\t, *_snake_case\t\t\t\t\t\t\t, sampling_rate=_snake_case\t\t\t\t\t\t\t, **_snake_case )\r if text is not None:\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tself.tokenizer(_snake_case\t\t\t\t\t\t\t, **_snake_case )\r\r if text is None:\r return inputs\r elif audio is None:\r return encodings\r else:\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tencodings['input_ids']\r return inputs\r\r\r\r def UpperCamelCase__ ( self\t\t\t\t\t\t\t, *_snake_case\t\t\t\t\t\t\t, **_snake_case ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r return self.tokenizer.batch_decode(*_snake_case\t\t\t\t\t\t\t, **_snake_case )\r\r\r\r def UpperCamelCase__ ( self\t\t\t\t\t\t\t, *_snake_case\t\t\t\t\t\t\t, **_snake_case ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r if self._in_target_context_manager:\r return self.current_processor.pad(*_snake_case\t\t\t\t\t\t\t, **_snake_case )\r\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tkwargs.pop('input_features'\t\t\t\t\t\t\t, _snake_case )\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tkwargs.pop('labels'\t\t\t\t\t\t\t, _snake_case )\r if len(_snake_case ) > 0:\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\targs[0]\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\targs[1:]\r\r if input_features is not None:\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tself.feature_extractor.pad(_snake_case\t\t\t\t\t\t\t, *_snake_case\t\t\t\t\t\t\t, **_snake_case )\r if labels is not None:\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tself.tokenizer.pad(_snake_case\t\t\t\t\t\t\t, **_snake_case )\r\r if labels is None:\r return input_features\r elif input_features is None:\r return labels\r else:\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tlabels['input_ids']\r return input_features\r\r\r\r def UpperCamelCase__ ( self\t\t\t\t\t\t\t, *_snake_case\t\t\t\t\t\t\t, **_snake_case ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r return self.tokenizer.decode(*_snake_case\t\t\t\t\t\t\t, **_snake_case )\r\r\r\r @contextmanager\r def UpperCamelCase__ ( self ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r warnings.warn(\r '`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '\r 'labels by using the argument `text` of the regular `__call__` method (either in the same call as '\r 'your audio inputs, or in a separate call.' )\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tTrue\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tself.tokenizer\r yield\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tself.feature_extractor\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tFalse\r\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":309,"string":"309"},"style_context":{"kind":"string","value":"\r\r\r\r\r\r\"\"\"simple docstring\"\"\"\rimport copy\rimport os\r\rimport cva\rimport numpy as np\rfrom matplotlib import pyplot as plt\r\r\r\r\r\r\r\rclass a :\r\r\r\r def __init__( self ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\t''\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\t''\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\t[]\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\t0\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\t2_56\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\t0\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\t0\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\t0\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\t0\r\r\r\r def UpperCamelCase__ ( self\t\t\t\t\t\t\t, _snake_case ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tcva.imread(_snake_case\t\t\t\t\t\t\t, 0 )\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tcopy.deepcopy(self.img )\r lowerCAmelCase\t\t\t\t\t\t\t,lowerCAmelCase\t\t\t\t\t\t\t,lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tplt.hist(self.img.ravel()\t\t\t\t\t\t\t, 2_56\t\t\t\t\t\t\t, [0, 2_56]\t\t\t\t\t\t\t, label='x' )\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tnp.sum(_snake_case )\r for i in range(len(_snake_case ) ):\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tx[i] / self.k\r self.sk += prk\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\t(self.L - 1) * self.sk\r if self.rem != 0:\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tint(last % last )\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tint(last + 1 if self.rem >= 0.5 else last )\r self.last_list.append(_snake_case )\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tint(np.ma.count(self.img ) / self.img[1].size )\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tself.img[1].size\r for i in range(self.number_of_cols ):\r for j in range(self.number_of_rows ):\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tself.img[j][i]\r if num != self.last_list[num]:\r lowerCAmelCase \t\t\t\t\t\t\t=\t\t\t\tself.last_list[num]\r cva.imwrite('output_data/output.jpg'\t\t\t\t\t\t\t, self.img )\r\r\r\r def UpperCamelCase__ ( self ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r plt.hist(self.img.ravel()\t\t\t\t\t\t\t, 2_56\t\t\t\t\t\t\t, [0, 2_56] )\r\r\r\r def UpperCamelCase__ ( self ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r cva.imshow('Output-Image'\t\t\t\t\t\t\t, self.img )\r cva.imshow('Input-Image'\t\t\t\t\t\t\t, self.original_image )\r cva.waitKey(50_00 )\r cva.destroyAllWindows()\r\r\rif __name__ == \"__main__\":\r __UpperCamelCase :\t\tint\t\t = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')\r __UpperCamelCase :\t\tList[Any]\t\t = ConstantStretch()\r stretcher.stretch(file_path)\r stretcher.plot_histogram()\r stretcher.show_image()\r\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":309,"string":"309"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":890,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...utils import logging\r\n\r\n\r\n_SCREAMING_SNAKE_CASE\t\t\t: Any\t\t\t = logging.get_logger(__name__)\r\n\r\n_SCREAMING_SNAKE_CASE\t\t\t: Optional[int]\t\t\t = {\r\n \"microsoft/trocr-base-handwritten\": (\r\n \"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json\"\r\n ),\r\n # See all TrOCR models at https://huggingface.co/models?filter=trocr\r\n}\r\n\r\nclass A__\t\t\t\t\t( snake_case__\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n __magic_name__\t\t\t =\t\t'trocr'\r\n __magic_name__\t\t\t =\t\t['past_key_values']\r\n __magic_name__\t\t\t =\t\t{\r\n 'num_attention_heads': 'decoder_attention_heads',\r\n 'hidden_size': 'd_model',\r\n 'num_hidden_layers': 'decoder_layers',\r\n }\r\n def __init__( self\t\t\t,\t\t\t\t\t__snake_case=5_0_2_6_5\t\t\t,\t\t\t\t\t__snake_case=1_0_2_4\t\t\t,\t\t\t\t\t__snake_case=1_2\t\t\t,\t\t\t\t\t__snake_case=1_6\t\t\t,\t\t\t\t\t__snake_case=4_0_9_6\t\t\t,\t\t\t\t\t__snake_case=\"gelu\"\t\t\t,\t\t\t\t\t__snake_case=5_1_2\t\t\t,\t\t\t\t\t__snake_case=0.1\t\t\t,\t\t\t\t\t__snake_case=0.0\t\t\t,\t\t\t\t\t__snake_case=0.0\t\t\t,\t\t\t\t\t__snake_case=2\t\t\t,\t\t\t\t\t__snake_case=0.02\t\t\t,\t\t\t\t\t__snake_case=0.0\t\t\t,\t\t\t\t\t__snake_case=True\t\t\t,\t\t\t\t\t__snake_case=False\t\t\t,\t\t\t\t\t__snake_case=True\t\t\t,\t\t\t\t\t__snake_case=True\t\t\t,\t\t\t\t\t__snake_case=1\t\t\t,\t\t\t\t\t__snake_case=0\t\t\t,\t\t\t\t\t__snake_case=2\t\t\t,\t\t\t\t\t**__snake_case\t\t\t,\t\t\t\t\t):\r\n snake_case\t\t =\t\t\t\tvocab_size\r\n snake_case\t\t =\t\t\t\td_model\r\n snake_case\t\t =\t\t\t\tdecoder_layers\r\n snake_case\t\t =\t\t\t\tdecoder_attention_heads\r\n snake_case\t\t =\t\t\t\tdecoder_ffn_dim\r\n snake_case\t\t =\t\t\t\tactivation_function\r\n snake_case\t\t =\t\t\t\tmax_position_embeddings\r\n snake_case\t\t =\t\t\t\tdropout\r\n snake_case\t\t =\t\t\t\tattention_dropout\r\n snake_case\t\t =\t\t\t\tactivation_dropout\r\n snake_case\t\t =\t\t\t\tinit_std\r\n snake_case\t\t =\t\t\t\tdecoder_layerdrop\r\n snake_case\t\t =\t\t\t\tuse_cache\r\n snake_case\t\t =\t\t\t\tscale_embedding\r\n snake_case\t\t =\t\t\t\tuse_learned_position_embeddings\r\n snake_case\t\t =\t\t\t\tlayernorm_embedding\r\n\r\n super().__init__(\r\n pad_token_id=__snake_case\t\t\t,\t\t\t\t\tbos_token_id=__snake_case\t\t\t,\t\t\t\t\teos_token_id=__snake_case\t\t\t,\t\t\t\t\tdecoder_start_token_id=__snake_case\t\t\t,\t\t\t\t\t**__snake_case\t\t\t,\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":127,"string":"127"},"style_context":{"kind":"string","value":"\r\n\r\n\r\ndef \t\t\tUpperCAmelCase__\t\t\t\t\t\t(UpperCamelCase_ = 4_00_00_00\t\t\t\t):\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n snake_case\t\t =\t\t\t\t[0, 1]\r\n snake_case\t\t =\t\t\t\t0\r\n while fib[i] <= n:\r\n fib.append(fib[i] + fib[i + 1]\t\t\t\t)\r\n if fib[i + 2] > n:\r\n break\r\n i += 1\r\n snake_case\t\t =\t\t\t\t0\r\n for j in range(len(UpperCamelCase_\t\t\t\t) - 1\t\t\t\t):\r\n if fib[j] % 2 == 0:\r\n total += fib[j]\r\n\r\n return total\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(f'''{solution() = }''')\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":127,"string":"127"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":891,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\n__UpperCamelCase :\tList[str] =\t\t\t\ttuple[int, int, int]\r\n__UpperCamelCase :\tOptional[Any] =\t\t\t\ttuple[str, str, str]\r\n\r\n\r\n# used alphabet --------------------------\r\n# from string.ascii_uppercase\r\n__UpperCamelCase :\tOptional[Any] =\t\t\t\t\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\r\n\r\n# -------------------------- default selection --------------------------\r\n# rotors --------------------------\r\n__UpperCamelCase :\tint =\t\t\t\t\"EGZWVONAHDCLFQMSIPJBYUKXTR\"\r\n__UpperCamelCase :\tList[Any] =\t\t\t\t\"FOBHMDKEXQNRAULPGSJVTYICZW\"\r\n__UpperCamelCase :\tDict =\t\t\t\t\"ZJXESIUQLHAVRMDOYGTNFWPBKC\"\r\n# reflector --------------------------\r\n__UpperCamelCase :\tstr =\t\t\t\t{\r\n \"A\": \"N\",\r\n \"N\": \"A\",\r\n \"B\": \"O\",\r\n \"O\": \"B\",\r\n \"C\": \"P\",\r\n \"P\": \"C\",\r\n \"D\": \"Q\",\r\n \"Q\": \"D\",\r\n \"E\": \"R\",\r\n \"R\": \"E\",\r\n \"F\": \"S\",\r\n \"S\": \"F\",\r\n \"G\": \"T\",\r\n \"T\": \"G\",\r\n \"H\": \"U\",\r\n \"U\": \"H\",\r\n \"I\": \"V\",\r\n \"V\": \"I\",\r\n \"J\": \"W\",\r\n \"W\": \"J\",\r\n \"K\": \"X\",\r\n \"X\": \"K\",\r\n \"L\": \"Y\",\r\n \"Y\": \"L\",\r\n \"M\": \"Z\",\r\n \"Z\": \"M\",\r\n}\r\n\r\n# -------------------------- extra rotors --------------------------\r\n__UpperCamelCase :\tOptional[int] =\t\t\t\t\"RMDJXFUWGISLHVTCQNKYPBEZOA\"\r\n__UpperCamelCase :\tDict =\t\t\t\t\"SGLCPQWZHKXAREONTFBVIYJUDM\"\r\n__UpperCamelCase :\tOptional[Any] =\t\t\t\t\"HVSICLTYKQUBXDWAJZOMFGPREN\"\r\n__UpperCamelCase :\tList[Any] =\t\t\t\t\"RZWQHFMVDBKICJLNTUXAGYPSOE\"\r\n__UpperCamelCase :\tOptional[Any] =\t\t\t\t\"LFKIJODBEGAMQPXVUHYSTCZRWN\"\r\n__UpperCamelCase :\tOptional[Any] =\t\t\t\t\"KOAEGVDHXPQZMLFTYWJNBRCIUS\"\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t__A\t\t( __lowerCamelCase\t\t\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t\t, __lowerCamelCase )\t\t\t\t->\ttuple[RotorPositionT, RotorSelectionT, dict[str, str]]:\r\n\t# Checks if there are 3 unique rotors\r\n\r\n\tif (unique_rotsel := len(set(__lowerCamelCase ) )) < 3:\r\n\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tf'Please use 3 unique rotors (not {unique_rotsel})'\r\n\t\traise Exception(__lowerCamelCase )\r\n\r\n\t# Checks if rotor positions are valid\r\n\ta ,\t\t\ta ,\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\trotpos\r\n\tif not 0 < rotorposa <= len(__lowerCamelCase ):\r\n\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tf'First rotor position is not within range of 1..26 ({rotorposa}'\r\n\t\traise ValueError(__lowerCamelCase )\r\n\tif not 0 < rotorposa <= len(__lowerCamelCase ):\r\n\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tf'Second rotor position is not within range of 1..26 ({rotorposa})'\r\n\t\traise ValueError(__lowerCamelCase )\r\n\tif not 0 < rotorposa <= len(__lowerCamelCase ):\r\n\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tf'Third rotor position is not within range of 1..26 ({rotorposa})'\r\n\t\traise ValueError(__lowerCamelCase )\r\n\r\n\t# Validates string and returns dict\r\n\ta\t\t\t\t\t\t =\t\t\t\t\t\t\t_plugboard(__lowerCamelCase )\r\n\r\n\treturn rotpos, rotsel, pbdict\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t__A\t\t( __lowerCamelCase )\t\t\t\t->\tdict[str, str]:\r\n\r\n\t# tests the input string if it\r\n\t# a) is type string\r\n\t# b) has even length (so pairs can be made)\r\n\tif not isinstance(__lowerCamelCase\t\t\t\t\t\t\t, __lowerCamelCase ):\r\n\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tf'Plugboard setting isn\\'t type string ({type(__lowerCamelCase )})'\r\n\t\traise TypeError(__lowerCamelCase )\r\n\telif len(__lowerCamelCase ) % 2 != 0:\r\n\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tf'Odd number of symbols ({len(__lowerCamelCase )})'\r\n\t\traise Exception(__lowerCamelCase )\r\n\telif pbstring == \"\":\r\n\t\treturn {}\r\n\r\n\tpbstring.replace(\"\"\" \"\"\"\t\t\t\t\t\t\t, \"\"\"\"\"\" )\r\n\r\n\t# Checks if all characters are unique\r\n\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tset()\r\n\tfor i in pbstring:\r\n\t\tif i not in abc:\r\n\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tf'\\'{i}\\' not in list of symbols'\r\n\t\t\traise Exception(__lowerCamelCase )\r\n\t\telif i in tmppbl:\r\n\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tf'Duplicate symbol ({i})'\r\n\t\t\traise Exception(__lowerCamelCase )\r\n\t\telse:\r\n\t\t\ttmppbl.add(__lowerCamelCase )\r\n\tdel tmppbl\r\n\r\n\t# Created the dictionary\r\n\ta\t\t\t\t\t\t =\t\t\t\t\t\t\t{}\r\n\tfor j in range(0\t\t\t\t\t\t\t, len(__lowerCamelCase ) - 1\t\t\t\t\t\t\t, 2 ):\r\n\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tpbstring[j + 1]\r\n\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tpbstring[j]\r\n\r\n\treturn pb\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t__A\t\t( __lowerCamelCase\t\t\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t\t, __lowerCamelCase = (rotora, rotora, rotora)\t\t\t\t\t\t\t, __lowerCamelCase = \"\"\t\t\t\t\t\t\t, )\t\t\t\t->\tstr:\r\n\ta\t\t\t\t\t\t =\t\t\t\t\t\t\ttext.upper()\r\n\ta ,\t\t\ta ,\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\t_validator(\r\n\t __lowerCamelCase\t\t\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t\t, plugb.upper() )\r\n\r\n\ta ,\t\t\ta ,\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\trotor_position\r\n\ta ,\t\t\ta ,\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\trotor_selection\r\n\trotorposa -= 1\r\n\trotorposa -= 1\r\n\trotorposa -= 1\r\n\r\n\ta\t\t\t\t\t\t =\t\t\t\t\t\t\t[]\r\n\r\n\t# encryption/decryption process --------------------------\r\n\tfor symbol in text:\r\n\t\tif symbol in abc:\r\n\t\t\t# 1st plugboard --------------------------\r\n\t\t\tif symbol in plugboard:\r\n\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tplugboard[symbol]\r\n\r\n\t\t\t# rotor ra --------------------------\r\n\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tabc.index(__lowerCamelCase ) + rotorposa\r\n\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\trotora[index % len(__lowerCamelCase )]\r\n\r\n\t\t\t# rotor rb --------------------------\r\n\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tabc.index(__lowerCamelCase ) + rotorposa\r\n\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\trotora[index % len(__lowerCamelCase )]\r\n\r\n\t\t\t# rotor rc --------------------------\r\n\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tabc.index(__lowerCamelCase ) + rotorposa\r\n\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\trotora[index % len(__lowerCamelCase )]\r\n\r\n\t\t\t# reflector --------------------------\r\n\t\t\t# this is the reason you don't need another machine to decipher\r\n\r\n\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\treflector[symbol]\r\n\r\n\t\t\t# 2nd rotors\r\n\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tabc[rotora.index(__lowerCamelCase ) - rotorposa]\r\n\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tabc[rotora.index(__lowerCamelCase ) - rotorposa]\r\n\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tabc[rotora.index(__lowerCamelCase ) - rotorposa]\r\n\r\n\t\t\t# 2nd plugboard\r\n\t\t\tif symbol in plugboard:\r\n\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tplugboard[symbol]\r\n\r\n\t\t\t# moves/resets rotor positions\r\n\t\t\trotorposa += 1\r\n\t\t\tif rotorposa >= len(__lowerCamelCase ):\r\n\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\t0\r\n\t\t\t\trotorposa += 1\r\n\t\t\tif rotorposa >= len(__lowerCamelCase ):\r\n\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\t0\r\n\t\t\t\trotorposa += 1\r\n\t\t\tif rotorposa >= len(__lowerCamelCase ):\r\n\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\t0\r\n\r\n # else:\r\n # pass\r\n # Error could be also raised\r\n # raise ValueError(\r\n # 'Invalid symbol('+repr(symbol)+')')\r\n\t\tresult.append(__lowerCamelCase )\r\n\r\n\treturn \"\".join(__lowerCamelCase )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t__UpperCamelCase :\tList[Any] =\t\t\t\t\"This is my Python script that emulates the Enigma machine from WWII.\"\r\n\t\t__UpperCamelCase :\tUnion[str, Any] =\t\t\t\t(1, 1, 1)\r\n\t\t__UpperCamelCase :\tList[Any] =\t\t\t\t\"pictures\"\r\n\t\t__UpperCamelCase :\tTuple =\t\t\t\t(rotora, rotora, rotora)\r\n\t\t__UpperCamelCase :\tstr =\t\t\t\tenigma(message, rotor_pos, rotor_sel, pb)\r\n\r\n\t\tprint(\"Encrypted message:\", en)\r\n\t\tprint(\"Decrypted message:\", enigma(en, rotor_pos, rotor_sel, pb))\r\n\r\n"},"code_codestyle":{"kind":"number","value":347,"string":"347"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\nimport tempfile\r\n\r\nimport torch\r\n\r\nfrom diffusers import IPNDMScheduler\r\n\r\nfrom .test_schedulers import SchedulerCommonTest\r\n\r\nclass \t\t__lowerCAmelCase ( __magic_name__\t\t\t):\r\n\t\t\t\t\t\t\tUpperCamelCase__ \t\t\t\t\t= (IPNDMScheduler,)\r\n\t\t\t\t\t\t\tUpperCamelCase__ \t\t\t\t\t= (('''num_inference_steps''', 50),)\r\n\t\t\t\t\t\t\tdef \t\t\t\tlowerCamelCase__\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t:Any\t\t,\t\t\t\t**__magic_name__\t\t\t\t\t\t:Optional[Any] ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\t{\"\"\"num_train_timesteps\"\"\": 1000}\r\n\t\t\t\t\t\t\t\tconfig.update(**__magic_name__ )\r\n\t\t\t\t\t\t\t\treturn config\r\n\t\t\t\t\t\t\tdef \t\t\t\tlowerCamelCase__\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t:Optional[int]\t\t,\t\t\t\t__magic_name__\t\t\t\t\t\t:Tuple=0\t\t,\t\t\t\t**__magic_name__\t\t\t\t\t\t:Optional[int] ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tdict(self.forward_default_kwargs )\r\n\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tkwargs.pop(\"\"\"num_inference_steps\"\"\"\t\t,\t\t\t\t__magic_name__ )\r\n\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tself.dummy_sample\r\n\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\t0.1 * sample\r\n\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\t[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]\r\n\r\n\t\t\t\t\t\t\t\tfor scheduler_class in self.scheduler_classes:\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tself.get_scheduler_config(**__magic_name__ )\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tscheduler_class(**__magic_name__ )\r\n\t\t\t\t\t\t\t\t\tscheduler.set_timesteps(__magic_name__ )\r\n\t\t\t\t\t\t\t\t\t# copy over dummy past residuals\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tdummy_past_residuals[:]\r\n\r\n\t\t\t\t\t\t\t\t\tif time_step is None:\r\n\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tscheduler.timesteps[len(scheduler.timesteps ) // 2]\r\n\r\n\t\t\t\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdirname:\r\n\t\t\t\t\t\t\t\t\t\tscheduler.save_config(__magic_name__ )\r\n\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tscheduler_class.from_pretrained(__magic_name__ )\r\n\t\t\t\t\t\t\t\t\t\tnew_scheduler.set_timesteps(__magic_name__ )\r\n\t\t\t\t\t\t\t\t\t\t# copy over dummy past residuals\r\n\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tdummy_past_residuals[:]\r\n\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tscheduler.step(__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t**__magic_name__ ).prev_sample\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tnew_scheduler.step(__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t**__magic_name__ ).prev_sample\r\n\r\n\t\t\t\t\t\t\t\t\tassert torch.sum(torch.abs(output - new_output ) ) < 1E-5, \"Scheduler outputs are not identical\"\r\n\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tscheduler.step(__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t**__magic_name__ ).prev_sample\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tnew_scheduler.step(__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t**__magic_name__ ).prev_sample\r\n\r\n\t\t\t\t\t\t\t\t\tassert torch.sum(torch.abs(output - new_output ) ) < 1E-5, \"Scheduler outputs are not identical\"\r\n\t\t\t\t\t\t\tdef \t\t\t\tlowerCamelCase__\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t:Union[str, Any] ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\t\tdef \t\t\t\tlowerCamelCase__\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t:List[Any]\t\t,\t\t\t\t__magic_name__\t\t\t\t\t\t:List[Any]=0\t\t,\t\t\t\t**__magic_name__\t\t\t\t\t\t:Any ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tdict(self.forward_default_kwargs )\r\n\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tkwargs.pop(\"\"\"num_inference_steps\"\"\"\t\t,\t\t\t\t__magic_name__ )\r\n\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tself.dummy_sample\r\n\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\t0.1 * sample\r\n\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\t[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]\r\n\r\n\t\t\t\t\t\t\t\tfor scheduler_class in self.scheduler_classes:\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tself.get_scheduler_config()\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tscheduler_class(**__magic_name__ )\r\n\t\t\t\t\t\t\t\t\tscheduler.set_timesteps(__magic_name__ )\r\n\r\n\t\t\t\t\t\t\t\t\t# copy over dummy past residuals (must be after setting timesteps)\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tdummy_past_residuals[:]\r\n\r\n\t\t\t\t\t\t\t\t\tif time_step is None:\r\n\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tscheduler.timesteps[len(scheduler.timesteps ) // 2]\r\n\r\n\t\t\t\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdirname:\r\n\t\t\t\t\t\t\t\t\t\tscheduler.save_config(__magic_name__ )\r\n\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tscheduler_class.from_pretrained(__magic_name__ )\r\n\t\t\t\t\t\t\t\t\t\t# copy over dummy past residuals\r\n\t\t\t\t\t\t\t\t\t\tnew_scheduler.set_timesteps(__magic_name__ )\r\n\r\n\t\t\t\t\t\t\t\t\t\t# copy over dummy past residual (must be after setting timesteps)\r\n\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tdummy_past_residuals[:]\r\n\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tscheduler.step(__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t**__magic_name__ ).prev_sample\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tnew_scheduler.step(__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t**__magic_name__ ).prev_sample\r\n\r\n\t\t\t\t\t\t\t\t\tassert torch.sum(torch.abs(output - new_output ) ) < 1E-5, \"Scheduler outputs are not identical\"\r\n\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tscheduler.step(__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t**__magic_name__ ).prev_sample\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tnew_scheduler.step(__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t**__magic_name__ ).prev_sample\r\n\r\n\t\t\t\t\t\t\t\t\tassert torch.sum(torch.abs(output - new_output ) ) < 1E-5, \"Scheduler outputs are not identical\"\r\n\t\t\t\t\t\t\tdef \t\t\t\tlowerCamelCase__\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t:Optional[Any]\t\t,\t\t\t\t**__magic_name__\t\t\t\t\t\t:Optional[int] ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tself.scheduler_classes[0]\r\n\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tself.get_scheduler_config(**__magic_name__ )\r\n\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tscheduler_class(**__magic_name__ )\r\n\r\n\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\t10\r\n\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tself.dummy_model()\r\n\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tself.dummy_sample_deter\r\n\t\t\t\t\t\t\t\tscheduler.set_timesteps(__magic_name__ )\r\n\r\n\t\t\t\t\t\t\t\tfor i, t in enumerate(scheduler.timesteps ):\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tmodel(__magic_name__\t\t,\t\t\t\t__magic_name__ )\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tscheduler.step(__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t__magic_name__ ).prev_sample\r\n\r\n\t\t\t\t\t\t\t\tfor i, t in enumerate(scheduler.timesteps ):\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tmodel(__magic_name__\t\t,\t\t\t\t__magic_name__ )\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tscheduler.step(__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t__magic_name__ ).prev_sample\r\n\r\n\t\t\t\t\t\t\t\treturn sample\r\n\t\t\t\t\t\t\tdef \t\t\t\tlowerCamelCase__\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t:str ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tdict(self.forward_default_kwargs )\r\n\r\n\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tkwargs.pop(\"\"\"num_inference_steps\"\"\"\t\t,\t\t\t\t__magic_name__ )\r\n\r\n\t\t\t\t\t\t\t\tfor scheduler_class in self.scheduler_classes:\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tself.get_scheduler_config()\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tscheduler_class(**__magic_name__ )\r\n\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tself.dummy_sample\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\t0.1 * sample\r\n\r\n\t\t\t\t\t\t\t\t\tif num_inference_steps is not None and hasattr(__magic_name__\t\t,\t\t\t\t\"\"\"set_timesteps\"\"\" ):\r\n\t\t\t\t\t\t\t\t\t\tscheduler.set_timesteps(__magic_name__ )\r\n\t\t\t\t\t\t\t\t\telif num_inference_steps is not None and not hasattr(__magic_name__\t\t,\t\t\t\t\"\"\"set_timesteps\"\"\" ):\r\n\t\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tnum_inference_steps\r\n\r\n\t\t\t\t\t\t\t\t\t# copy over dummy past residuals (must be done after set_timesteps)\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\t[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tdummy_past_residuals[:]\r\n\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tscheduler.timesteps[5]\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tscheduler.timesteps[6]\r\n\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tscheduler.step(__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t**__magic_name__ ).prev_sample\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tscheduler.step(__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t**__magic_name__ ).prev_sample\r\n\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(output_a.shape\t\t,\t\t\t\tsample.shape )\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(output_a.shape\t\t,\t\t\t\toutput_a.shape )\r\n\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tscheduler.step(__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t**__magic_name__ ).prev_sample\r\n\t\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tscheduler.step(__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t__magic_name__\t\t,\t\t\t\t**__magic_name__ ).prev_sample\r\n\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(output_a.shape\t\t,\t\t\t\tsample.shape )\r\n\t\t\t\t\t\t\t\t\tself.assertEqual(output_a.shape\t\t,\t\t\t\toutput_a.shape )\r\n\t\t\t\t\t\t\tdef \t\t\t\tlowerCamelCase__\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t:List[Any] ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\tfor timesteps in [100, 1000]:\r\n\t\t\t\t\t\t\t\t\tself.check_over_configs(num_train_timesteps=__magic_name__\t\t,\t\t\t\ttime_step=__magic_name__ )\r\n\t\t\t\t\t\t\tdef \t\t\t\tlowerCamelCase__\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t:Dict ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\tfor t, num_inference_steps in zip([1, 5, 10]\t\t,\t\t\t\t[10, 50, 100] ):\r\n\t\t\t\t\t\t\t\t\tself.check_over_forward(num_inference_steps=__magic_name__\t\t,\t\t\t\ttime_step=__magic_name__ )\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tdef \t\t\t\tlowerCamelCase__\t\t\t\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t\t\t:Tuple ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\tself.full_loop()\r\n\t\t\t\t\t\t\t\ta\t\t\t\t\t\t =\t\t\t\t\t\t\ttorch.mean(torch.abs(__magic_name__ ) )\r\n\r\n\t\t\t\t\t\t\t\tassert abs(result_mean.item() - 254_0529 ) < 10\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":347,"string":"347"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":892,"cells":{"code":{"kind":"string","value":"\r\r\r\r'''simple docstring'''\rfrom math import pi\r\r\r\r\r\r\rdef A__ ( UpperCAmelCase_ , UpperCAmelCase_\t\t\t\t\t\t\t):\r\t\t\t\t\t\treturn 2 * pi * radius * (angle / 3_6_0)\r\r\rif __name__ == \"__main__\":\r\t\t\t\t\tprint(arc_length(90, 10))\r\r\r"},"code_codestyle":{"kind":"number","value":83,"string":"83"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\nimport os\r\nimport unicodedata\r\nfrom shutil import copyfile\r\nfrom typing import Any, Dict, List, Optional, Tuple\r\n\r\nimport sentencepiece as spm\r\n\r\nfrom ...tokenization_utils import AddedToken, PreTrainedTokenizer\r\nfrom ...utils import SPIECE_UNDERLINE, logging\r\n\r\n\r\n_snake_case = logging.get_logger(__name__)\r\n\r\n_snake_case = {'vocab_file': 'spiece.model'}\r\n\r\n_snake_case = {\r\n 'vocab_file': {\r\n 'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',\r\n }\r\n}\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass UpperCamelCase\t\t\t(\tsnake_case_ ):\r\n\r\n\r\n def __init__( self\t\t:\t\t\t\t\t\tTuple\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tint=False\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tDict=True\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tUnion[str, Any]=False\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tDict=\"\"\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tAny=\"\"\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tAny=\"\"\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tint=\"\"\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tTuple=\"\"\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tAny=\"\"\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tOptional[Any]=\"\"\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tint=[\"\", \"\"]\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tOptional[Dict[str, Any]] = None\t\t\t\t\t\t\t,\t\t\t\t\t\t**UpperCAmelCase__\t\t:\t\t\t\t\t\tList[str]\t\t\t\t\t\t\t,\t\t\t\t\t\t)\t\t\t\t\t-> None:\r\n _a : Optional[int] =\tAddedToken(UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\tlstrip=UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\trstrip=UpperCAmelCase__\t\t\t\t\t\t\t) if isinstance(UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t\t\t\t\t\t) else mask_token\r\n\r\n _a : Union[str, Any] =\t{} if sp_model_kwargs is None else sp_model_kwargs\r\n\r\n super().__init__(\r\n do_lower_case=UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\tremove_space=UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\tkeep_accents=UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\tbos_token=UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\teos_token=UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\tunk_token=UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\tsep_token=UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\tpad_token=UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\tcls_token=UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\tmask_token=UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\tadditional_special_tokens=UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\tsp_model_kwargs=self.sp_model_kwargs\t\t\t\t\t\t\t,\t\t\t\t\t\t**UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\t)\r\n\r\n _a : Optional[Any] =\t3\r\n\r\n _a : Tuple =\tdo_lower_case\r\n _a : Tuple =\tremove_space\r\n _a : Tuple =\tkeep_accents\r\n _a : Tuple =\tvocab_file\r\n\r\n _a : Any =\tspm.SentencePieceProcessor(**self.sp_model_kwargs\t\t\t\t\t\t\t)\r\n self.sp_model.Load(UpperCAmelCase__\t\t\t\t\t\t\t)\r\n\r\n try:\r\n import jieba\r\n except ModuleNotFoundError as error:\r\n raise error.__class__(\r\n \"\"\"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. \"\"\"\r\n \"\"\"See https://pypi.org/project/jieba/ for installation.\"\"\"\t\t\t\t\t\t\t)\r\n _a : int =\tjieba\r\n _a : Tuple =\tstr.maketrans(\"\"\" \\n\"\"\"\t\t\t\t\t\t\t,\t\t\t\t\t\t\"\"\"\\u2582\\u2583\"\"\"\t\t\t\t\t\t\t)\r\n\r\n\r\n @property\r\n # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size\r\n def _lowercase\t( self\t\t:\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t)\t\t\t\t\t-> Any:\r\n return len(self.sp_model\t\t\t\t\t\t\t)\r\n\r\n\r\n def _lowercase\t( self\t\t:\t\t\t\t\t\tstr\t\t\t\t\t\t\t)\t\t\t\t\t-> Union[str, Any]:\r\n _a : int =\t{self.convert_ids_to_tokens(UpperCAmelCase__\t\t\t\t\t\t\t): i for i in range(self.vocab_size\t\t\t\t\t\t\t)}\r\n vocab.update(self.added_tokens_encoder\t\t\t\t\t\t\t)\r\n return vocab\r\n\r\n\r\n def __getstate__( self\t\t:\t\t\t\t\t\tTuple\t\t\t\t\t\t\t)\t\t\t\t\t-> List[str]:\r\n _a : Tuple =\tself.__dict__.copy()\r\n _a : Tuple =\tNone\r\n return state\r\n\r\n\r\n def __setstate__( self\t\t:\t\t\t\t\t\tAny\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tDict\t\t\t\t\t\t\t)\t\t\t\t\t-> Dict:\r\n _a : Tuple =\td\r\n\r\n # for backward compatibility\r\n if not hasattr(self\t\t\t\t\t\t\t,\t\t\t\t\t\t\"\"\"sp_model_kwargs\"\"\"\t\t\t\t\t\t\t):\r\n _a : Tuple =\t{}\r\n\r\n _a : int =\tspm.SentencePieceProcessor(**self.sp_model_kwargs\t\t\t\t\t\t\t)\r\n self.sp_model.Load(self.vocab_file\t\t\t\t\t\t\t)\r\n\r\n\r\n def _lowercase\t( self\t\t:\t\t\t\t\t\tList[str]\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t)\t\t\t\t\t-> Dict:\r\n if self.remove_space:\r\n _a : Optional[int] =\t\"\"\" \"\"\".join(inputs.strip().split()\t\t\t\t\t\t\t)\r\n else:\r\n _a : List[Any] =\tinputs\r\n _a : int =\toutputs.replace(\"\"\"``\"\"\"\t\t\t\t\t\t\t,\t\t\t\t\t\t\"\"\"\\\"\"\"\"\t\t\t\t\t\t\t).replace(\"\"\"''\"\"\"\t\t\t\t\t\t\t,\t\t\t\t\t\t\"\"\"\\\"\"\"\"\t\t\t\t\t\t\t)\r\n\r\n if not self.keep_accents:\r\n _a : Optional[Any] =\tunicodedata.normalize(\"\"\"NFKD\"\"\"\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t\t\t\t\t\t)\r\n _a : Dict =\t\"\"\"\"\"\".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__\t\t\t\t\t\t\t)]\t\t\t\t\t\t\t)\r\n if self.do_lower_case:\r\n _a : Union[str, Any] =\toutputs.lower()\r\n\r\n return outputs\r\n\r\n\r\n def _lowercase\t( self\t\t:\t\t\t\t\t\tList[str]\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tstr\t\t\t\t\t\t\t)\t\t\t\t\t-> List[str]:\r\n _a : str =\tself.preprocess_text(UpperCAmelCase__\t\t\t\t\t\t\t)\r\n _a : Dict =\tself.sp_model.encode(UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\tout_type=UpperCAmelCase__\t\t\t\t\t\t\t)\r\n _a : Union[str, Any] =\t[]\r\n for piece in pieces:\r\n if len(UpperCAmelCase__\t\t\t\t\t\t\t) > 1 and piece[-1] == str(\"\"\",\"\"\"\t\t\t\t\t\t\t) and piece[-2].isdigit():\r\n _a : Dict =\tself.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\t\"\"\"\"\"\"\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\n if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:\r\n if len(cur_pieces[0]\t\t\t\t\t\t\t) == 1:\r\n _a : Dict =\tcur_pieces[1:]\r\n else:\r\n _a : Any =\tcur_pieces[0][1:]\r\n cur_pieces.append(piece[-1]\t\t\t\t\t\t\t)\r\n new_pieces.extend(UpperCAmelCase__\t\t\t\t\t\t\t)\r\n else:\r\n new_pieces.append(UpperCAmelCase__\t\t\t\t\t\t\t)\r\n\r\n return new_pieces\r\n\r\n\r\n def _lowercase\t( self\t\t:\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tint\t\t\t\t\t\t\t)\t\t\t\t\t-> int:\r\n return self.sp_model.PieceToId(UpperCAmelCase__\t\t\t\t\t\t\t)\r\n\r\n\r\n def _lowercase\t( self\t\t:\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t)\t\t\t\t\t-> Any:\r\n return self.sp_model.IdToPiece(UpperCAmelCase__\t\t\t\t\t\t\t)\r\n\r\n\r\n def _lowercase\t( self\t\t:\t\t\t\t\t\tAny\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tAny\t\t\t\t\t\t\t)\t\t\t\t\t-> Dict:\r\n _a : Dict =\t\"\"\"\"\"\".join(UpperCAmelCase__\t\t\t\t\t\t\t).replace(UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\t\"\"\" \"\"\"\t\t\t\t\t\t\t).strip()\r\n return out_string\r\n\r\n\r\n def _lowercase\t( self\t\t:\t\t\t\t\t\tAny\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tList[int]\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tOptional[List[int]] = None\t\t\t\t\t\t\t)\t\t\t\t\t-> List[int]:\r\n _a : Optional[Any] =\t[self.sep_token_id]\r\n _a : Dict =\t[self.cls_token_id]\r\n if token_ids_a is None:\r\n return token_ids_a + sep + cls\r\n return token_ids_a + sep + token_ids_a + sep + cls\r\n\r\n\r\n def _lowercase\t( self\t\t:\t\t\t\t\t\tTuple\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tList[int]\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tOptional[List[int]] = None\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tbool = False\t\t\t\t\t\t\t)\t\t\t\t\t-> List[int]:\r\n\r\n if already_has_special_tokens:\r\n return super().get_special_tokens_mask(\r\n token_ids_a=UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\ttoken_ids_a=UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\talready_has_special_tokens=UpperCAmelCase__\t\t\t\t\t\t\t)\r\n\r\n if token_ids_a is not None:\r\n return ([0] * len(UpperCAmelCase__\t\t\t\t\t\t\t)) + [1] + ([0] * len(UpperCAmelCase__\t\t\t\t\t\t\t)) + [1, 1]\r\n return ([0] * len(UpperCAmelCase__\t\t\t\t\t\t\t)) + [1, 1]\r\n\r\n\r\n def _lowercase\t( self\t\t:\t\t\t\t\t\tstr\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tList[int]\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tOptional[List[int]] = None\t\t\t\t\t\t\t)\t\t\t\t\t-> List[int]:\r\n _a : Any =\t[self.sep_token_id]\r\n _a : Optional[Any] =\t[2]\r\n\r\n if token_ids_a is None:\r\n return len(token_ids_a + sep\t\t\t\t\t\t\t) * [0] + cls_segment_id\r\n return len(token_ids_a + sep\t\t\t\t\t\t\t) * [0] + len(token_ids_a + sep\t\t\t\t\t\t\t) * [1] + cls_segment_id\r\n\r\n\r\n def _lowercase\t( self\t\t:\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tstr\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t:\t\t\t\t\t\tOptional[str] = None\t\t\t\t\t\t\t)\t\t\t\t\t-> Tuple[str]:\r\n if not os.path.isdir(UpperCAmelCase__\t\t\t\t\t\t\t):\r\n logger.error(f\"\"\"Vocabulary path ({save_directory}) should be a directory\"\"\"\t\t\t\t\t\t\t)\r\n return\r\n _a : Union[str, Any] =\tos.path.join(\r\n UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\t(filename_prefix + \"\"\"-\"\"\" if filename_prefix else \"\"\"\"\"\") + VOCAB_FILES_NAMES[\"\"\"vocab_file\"\"\"]\t\t\t\t\t\t\t)\r\n\r\n if os.path.abspath(self.vocab_file\t\t\t\t\t\t\t) != os.path.abspath(UpperCAmelCase__\t\t\t\t\t\t\t) and os.path.isfile(self.vocab_file\t\t\t\t\t\t\t):\r\n copyfile(self.vocab_file\t\t\t\t\t\t\t,\t\t\t\t\t\tUpperCAmelCase__\t\t\t\t\t\t\t)\r\n elif not os.path.isfile(self.vocab_file\t\t\t\t\t\t\t):\r\n with open(UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\t\"\"\"wb\"\"\"\t\t\t\t\t\t\t) as fi:\r\n _a : Optional[Any] =\tself.sp_model.serialized_model_proto()\r\n fi.write(UpperCAmelCase__\t\t\t\t\t\t\t)\r\n\r\n return (out_vocab_file,)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def _lowercase\t( self\t\t:\t\t\t\t\t\tAny\t\t\t\t\t\t\t,\t\t\t\t\t\t*UpperCAmelCase__\t\t:\t\t\t\t\t\tList[str]\t\t\t\t\t\t\t,\t\t\t\t\t\t**UpperCAmelCase__\t\t:\t\t\t\t\t\tList[str]\t\t\t\t\t\t\t)\t\t\t\t\t-> List[str]:\r\n _a : Tuple =\tsuper()._decode(*UpperCAmelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\t**UpperCAmelCase__\t\t\t\t\t\t\t)\r\n _a : Optional[Any] =\ttext.replace(\"\"\" \"\"\"\t\t\t\t\t\t\t,\t\t\t\t\t\t\"\"\"\"\"\"\t\t\t\t\t\t\t).replace(\"\"\"\\u2582\"\"\"\t\t\t\t\t\t\t,\t\t\t\t\t\t\"\"\" \"\"\"\t\t\t\t\t\t\t).replace(\"\"\"\\u2583\"\"\"\t\t\t\t\t\t\t,\t\t\t\t\t\t\"\"\"\\n\"\"\"\t\t\t\t\t\t\t)\r\n return text\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":294,"string":"294"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":893,"cells":{"code":{"kind":"string","value":"\r\n_A =\t\t\t\"Alexander Joslin\"\r\n\r\nimport operator as op\r\n\r\nfrom .stack import Stack\r\n\r\n\r\n\r\ndef \t\t\t\t\t\tlowercase_ ( A__\t\t)\t\t\t\t\t\t\t->\t\t\t\t\t\t\tint:\r\n \"\"\"simple docstring\"\"\"\r\n\r\n snake_case\t\t\t\t\t\t\t\t= {\"*\": op.mul, \"/\": op.truediv, \"+\": op.add, \"-\": op.sub}\r\n\r\n snake_case\t\t\t\t\t\t\t\t= Stack()\r\n snake_case\t\t\t\t\t\t\t\t= Stack()\r\n\r\n for i in equation:\r\n if i.isdigit():\r\n # RULE 1\r\n operand_stack.push(int(A__\t\t)\t\t)\r\n elif i in operators:\r\n # RULE 2\r\n operator_stack.push(A__\t\t)\r\n elif i == \")\":\r\n # RULE 4\r\n snake_case\t\t\t\t\t\t\t\t= operator_stack.peek()\r\n operator_stack.pop()\r\n snake_case\t\t\t\t\t\t\t\t= operand_stack.peek()\r\n operand_stack.pop()\r\n snake_case\t\t\t\t\t\t\t\t= operand_stack.peek()\r\n operand_stack.pop()\r\n\r\n snake_case\t\t\t\t\t\t\t\t= operators[opr](A__\t\t\t,\t\tA__\t\t)\r\n operand_stack.push(A__\t\t)\r\n\r\n # RULE 5\r\n return operand_stack.peek()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n _A =\t\t\t\"(5 + ((4 * 2) * (2 + 3)))\"\r\n # answer = 45\r\n print(f\"{equation} = {dijkstras_two_stack_algorithm(equation)}\")\r\n\r\n"},"code_codestyle":{"kind":"number","value":137,"string":"137"},"style_context":{"kind":"string","value":"\r\nimport unittest\r\n\r\nfrom transformers.utils.backbone_utils import (\r\n BackboneMixin,\r\n get_aligned_output_features_output_indices,\r\n verify_out_features_out_indices,\r\n)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\tlowerCamelCase\t\t( unittest.TestCase ):\r\n\r\n\r\n\r\n\r\n\r\n def \t\tUpperCAmelCase(self\t\t\t\t: Tuple )\t\t\t\t\t\t->\t\t\t\t\t\tList[str]:\r\n snake_case\t\t\t\t\t\t\t\t= [\"a\", \"b\", \"c\"]\r\n\r\n # Defaults to last layer if both are None\r\n snake_case\t\t\t\t\t,\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t= get_aligned_output_features_output_indices(_A , _A , _A )\r\n self.assertEqual(_A , [\"c\"] )\r\n self.assertEqual(_A , [2] )\r\n\r\n # Out indices set to match out features\r\n snake_case\t\t\t\t\t,\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t= get_aligned_output_features_output_indices([\"a\", \"c\"] , _A , _A )\r\n self.assertEqual(_A , [\"a\", \"c\"] )\r\n self.assertEqual(_A , [0, 2] )\r\n\r\n # Out features set to match out indices\r\n snake_case\t\t\t\t\t,\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t= get_aligned_output_features_output_indices(_A , [0, 2] , _A )\r\n self.assertEqual(_A , [\"a\", \"c\"] )\r\n self.assertEqual(_A , [0, 2] )\r\n\r\n # Out features selected from negative indices\r\n snake_case\t\t\t\t\t,\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t= get_aligned_output_features_output_indices(_A , [-3, -1] , _A )\r\n self.assertEqual(_A , [\"a\", \"c\"] )\r\n self.assertEqual(_A , [-3, -1] )\r\n\r\n\r\n\r\n\r\n\r\n def \t\tUpperCAmelCase(self\t\t\t\t: Optional[int] )\t\t\t\t\t\t->\t\t\t\t\t\tstr:\r\n # Stage names must be set\r\n with self.assertRaises(_A ):\r\n verify_out_features_out_indices([\"a\", \"b\"] , (0, 1) , _A )\r\n\r\n # Out features must be a list\r\n with self.assertRaises(_A ):\r\n verify_out_features_out_indices((\"a\", \"b\") , (0, 1) , [\"a\", \"b\"] )\r\n\r\n # Out features must be a subset of stage names\r\n with self.assertRaises(_A ):\r\n verify_out_features_out_indices([\"a\", \"b\"] , (0, 1) , [\"a\"] )\r\n\r\n # Out indices must be a list or tuple\r\n with self.assertRaises(_A ):\r\n verify_out_features_out_indices(_A , 0 , [\"a\", \"b\"] )\r\n\r\n # Out indices must be a subset of stage names\r\n with self.assertRaises(_A ):\r\n verify_out_features_out_indices(_A , (0, 1) , [\"a\"] )\r\n\r\n # Out features and out indices must be the same length\r\n with self.assertRaises(_A ):\r\n verify_out_features_out_indices([\"a\", \"b\"] , (0,) , [\"a\", \"b\", \"c\"] )\r\n\r\n # Out features should match out indices\r\n with self.assertRaises(_A ):\r\n verify_out_features_out_indices([\"a\", \"b\"] , (0, 2) , [\"a\", \"b\", \"c\"] )\r\n\r\n # Out features and out indices should be in order\r\n with self.assertRaises(_A ):\r\n verify_out_features_out_indices([\"b\", \"a\"] , (0, 1) , [\"a\", \"b\"] )\r\n\r\n # Check passes with valid inputs\r\n verify_out_features_out_indices([\"a\", \"b\", \"d\"] , (0, 1, -1) , [\"a\", \"b\", \"c\", \"d\"] )\r\n\r\n\r\n\r\n\r\n\r\n def \t\tUpperCAmelCase(self\t\t\t\t: List[str] )\t\t\t\t\t\t->\t\t\t\t\t\tstr:\r\n snake_case\t\t\t\t\t\t\t\t= BackboneMixin()\r\n\r\n snake_case\t\t\t\t\t\t\t\t= [\"a\", \"b\", \"c\"]\r\n snake_case\t\t\t\t\t\t\t\t= [\"a\", \"c\"]\r\n snake_case\t\t\t\t\t\t\t\t= [0, 2]\r\n\r\n # Check that the output features and indices are set correctly\r\n self.assertEqual(backbone.out_features , [\"a\", \"c\"] )\r\n self.assertEqual(backbone.out_indices , [0, 2] )\r\n\r\n # Check out features and indices are updated correctly\r\n snake_case\t\t\t\t\t\t\t\t= [\"a\", \"b\"]\r\n self.assertEqual(backbone.out_features , [\"a\", \"b\"] )\r\n self.assertEqual(backbone.out_indices , [0, 1] )\r\n\r\n snake_case\t\t\t\t\t\t\t\t= [-3, -1]\r\n self.assertEqual(backbone.out_features , [\"a\", \"c\"] )\r\n self.assertEqual(backbone.out_indices , [-3, -1] )\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":137,"string":"137"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":894,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\nfrom __future__ import annotations\r\n\r\nimport unittest\r\n\r\nfrom transformers import RoFormerConfig, is_tf_available\r\nfrom transformers.testing_utils import require_tf, slow\r\n\r\nfrom ...test_configuration_common import ConfigTester\r\nfrom ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask\r\nfrom ...test_pipeline_mixin import PipelineTesterMixin\r\n\r\n\r\nif is_tf_available():\r\n\t\t\t\t\t\t\timport tensorflow as tf\r\n\r\n\t\t\t\t\t\t\tfrom transformers import (\r\n\t\t\t\t\t\t\t TFRoFormerForCausalLM,\r\n\t\t\t\t\t\t\t TFRoFormerForMaskedLM,\r\n\t\t\t\t\t\t\t TFRoFormerForMultipleChoice,\r\n\t\t\t\t\t\t\t TFRoFormerForQuestionAnswering,\r\n\t\t\t\t\t\t\t TFRoFormerForSequenceClassification,\r\n\t\t\t\t\t\t\t TFRoFormerForTokenClassification,\r\n\t\t\t\t\t\t\t TFRoFormerModel,\r\n\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\tfrom transformers.models.roformer.modeling_tf_roformer import (\r\n\t\t\t\t\t\t\t TFRoFormerSelfAttention,\r\n\t\t\t\t\t\t\t TFRoFormerSinusoidalPositionalEmbedding,\r\n\t\t\t\t\t\t\t)\r\nclass \tlowerCamelCase__\t\t\t:\r\n\r\n\r\n\t\t\tdef __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE=\"gelu\" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: str \t= parent\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[int] \t= 13\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[Any] \t= 7\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[int] \t= True\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: str \t= True\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[str] \t= True\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[Any] \t= True\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= 99\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: str \t= 32\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[Any] \t= 2\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Any \t= 4\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: str \t= 37\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= \"gelu\"\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[Any] \t= 0.1\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[str] \t= 0.1\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[str] \t= 512\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: str \t= 16\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Union[str, Any] \t= 2\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Union[str, Any] \t= 0.02\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Any \t= 3\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= 4\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[Any] \t= None\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: str \t= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Union[str, Any] \t= None\r\n\t\t\t\t\tif self.use_input_mask:\r\n\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: Tuple \t= random_attention_mask([self.batch_size, self.seq_length]\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[int] \t= None\r\n\t\t\t\t\tif self.use_token_type_ids:\r\n\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[Any] \t= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[str] \t= None\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[Any] \t= None\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= None\r\n\t\t\t\t\tif self.use_labels:\r\n\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: str \t= ids_tensor([self.batch_size] , self.type_sequence_label_size\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= ids_tensor([self.batch_size, self.seq_length] , self.num_labels\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: Any \t= ids_tensor([self.batch_size] , self.num_choices\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[str] \t= RoFormerConfig(\r\n\t\t\t\t\t vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=SCREAMING_SNAKE_CASE , )\r\n\r\n\t\t\t\t\treturn config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= TFRoFormerModel(config=SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Union[str, Any] \t= {\"input_ids\": input_ids, \"attention_mask\": input_mask, \"token_type_ids\": token_type_ids}\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: str \t= [input_ids, input_mask]\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Any \t= model(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Tuple \t= model(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\tself.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)\t\t\t\t\t\t\t)\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[str] \t= True\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Tuple \t= TFRoFormerForCausalLM(config=SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Any \t= {\r\n\t\t\t\t\t \"input_ids\": input_ids,\r\n\t\t\t\t\t \"attention_mask\": input_mask,\r\n\t\t\t\t\t \"token_type_ids\": token_type_ids,\r\n\t\t\t\t\t}\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[str] \t= model(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)[\"logits\"]\r\n\t\t\t\t\tself.parent.assertListEqual(\r\n\t\t\t\t\t list(prediction_scores.numpy().shape\t\t\t\t\t\t\t) , [self.batch_size, self.seq_length, self.vocab_size]\t\t\t\t\t\t\t)\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[str] \t= TFRoFormerForMaskedLM(config=SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[str] \t= {\r\n\t\t\t\t\t \"input_ids\": input_ids,\r\n\t\t\t\t\t \"attention_mask\": input_mask,\r\n\t\t\t\t\t \"token_type_ids\": token_type_ids,\r\n\t\t\t\t\t}\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Tuple \t= model(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\tself.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)\t\t\t\t\t\t\t)\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Any \t= self.num_labels\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= TFRoFormerForSequenceClassification(config=SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: str \t= {\r\n\t\t\t\t\t \"input_ids\": input_ids,\r\n\t\t\t\t\t \"attention_mask\": input_mask,\r\n\t\t\t\t\t \"token_type_ids\": token_type_ids,\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[int] \t= model(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\tself.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)\t\t\t\t\t\t\t)\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Tuple \t= self.num_choices\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[str] \t= TFRoFormerForMultipleChoice(config=SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: int \t= tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1\t\t\t\t\t\t\t) , (1, self.num_choices, 1)\t\t\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: str \t= tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1\t\t\t\t\t\t\t) , (1, self.num_choices, 1)\t\t\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: int \t= tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1\t\t\t\t\t\t\t) , (1, self.num_choices, 1)\t\t\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[Any] \t= {\r\n\t\t\t\t\t \"input_ids\": multiple_choice_inputs_ids,\r\n\t\t\t\t\t \"attention_mask\": multiple_choice_input_mask,\r\n\t\t\t\t\t \"token_type_ids\": multiple_choice_token_type_ids,\r\n\t\t\t\t\t}\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[int] \t= model(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\tself.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)\t\t\t\t\t\t\t)\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: int \t= self.num_labels\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Any \t= TFRoFormerForTokenClassification(config=SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= {\r\n\t\t\t\t\t \"input_ids\": input_ids,\r\n\t\t\t\t\t \"attention_mask\": input_mask,\r\n\t\t\t\t\t \"token_type_ids\": token_type_ids,\r\n\t\t\t\t\t}\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[Any] \t= model(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\tself.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)\t\t\t\t\t\t\t)\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[int] \t= TFRoFormerForQuestionAnswering(config=SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: int \t= {\r\n\t\t\t\t\t \"input_ids\": input_ids,\r\n\t\t\t\t\t \"attention_mask\": input_mask,\r\n\t\t\t\t\t \"token_type_ids\": token_type_ids,\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= model(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\tself.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)\t\t\t\t\t\t\t)\r\n\t\t\t\t\tself.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[int] \t= self.prepare_config_and_inputs()\r\n\t\t\t\t\t(\r\n\t\t\t\t\t (\r\n\t\t\t\t\t snake_case\r\n\t\t\t\t\t)\t, (\r\n\t\t\t\t\t snake_case\r\n\t\t\t\t\t)\t, (\r\n\t\t\t\t\t snake_case\r\n\t\t\t\t\t)\t, (\r\n\t\t\t\t\t snake_case\r\n\t\t\t\t\t)\t, (\r\n\t\t\t\t\t snake_case\r\n\t\t\t\t\t)\t, (\r\n\t\t\t\t\t snake_case\r\n\t\t\t\t\t)\t, (\r\n\t\t\t\t\t snake_case\r\n\t\t\t\t\t)\t, \r\n\t\t\t\t\t)\t\t\t\t\t\t: Any \t= config_and_inputs\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Any \t= {\"input_ids\": input_ids, \"token_type_ids\": token_type_ids, \"attention_mask\": input_mask}\r\n\t\t\t\t\treturn config, inputs_dict\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_tf\r\nclass \tlowerCamelCase__\t\t\t( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase\t):\r\n\t\t\ta__ :\t\t\t\t\tList[Any]\t = (\r\n\t\t\t (\r\n\t\t\t TFRoFormerModel,\r\n\t\t\t TFRoFormerForCausalLM,\r\n\t\t\t TFRoFormerForMaskedLM,\r\n\t\t\t TFRoFormerForQuestionAnswering,\r\n\t\t\t TFRoFormerForSequenceClassification,\r\n\t\t\t TFRoFormerForTokenClassification,\r\n\t\t\t TFRoFormerForMultipleChoice,\r\n\t\t\t )\r\n\t\t\t if is_tf_available()\r\n\t\t\t else ()\r\n\t\t\t)\r\n\t\t\ta__ :\t\t\t\t\tUnion[str, Any]\t = (\r\n\t\t\t {\r\n\t\t\t \"\"\"feature-extraction\"\"\": TFRoFormerModel,\r\n\t\t\t \"\"\"fill-mask\"\"\": TFRoFormerForMaskedLM,\r\n\t\t\t \"\"\"question-answering\"\"\": TFRoFormerForQuestionAnswering,\r\n\t\t\t \"\"\"text-classification\"\"\": TFRoFormerForSequenceClassification,\r\n\t\t\t \"\"\"text-generation\"\"\": TFRoFormerForCausalLM,\r\n\t\t\t \"\"\"token-classification\"\"\": TFRoFormerForTokenClassification,\r\n\t\t\t \"\"\"zero-shot\"\"\": TFRoFormerForSequenceClassification,\r\n\t\t\t }\r\n\t\t\t if is_tf_available()\r\n\t\t\t else {}\r\n\t\t\t)\r\n\r\n\t\t\ta__ :\t\t\t\t\tstr\t = False\r\n\t\t\ta__ :\t\t\t\t\tDict\t = False\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tif pipeline_test_casse_name == \"TextGenerationPipelineTests\":\r\n\t\t\t\t\t\t\treturn True\r\n\r\n\t\t\t\t\treturn False\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[Any] \t= TFRoFormerModelTester(self\t\t\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[int] \t= ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37\t\t\t\t\t\t\t)\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tself.config_tester.run_common_tests()\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[int] \t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\tself.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: int \t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\tself.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[str] \t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\tself.model_tester.create_and_check_lm_head(*SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[int] \t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\tself.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\tself.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[Any] \t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\tself.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[int] \t= self.model_tester.prepare_config_and_inputs()\r\n\t\t\t\t\tself.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= TFRoFormerModel.from_pretrained(\"junnyu/roformer_chinese_base\"\t\t\t\t\t\t\t)\r\n\t\t\t\t\tself.assertIsNotNone(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_tf\r\nclass \tlowerCamelCase__\t\t\t( unittest.TestCase\t):\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[Any] \t= TFRoFormerForMaskedLM.from_pretrained(\"junnyu/roformer_chinese_base\"\t\t\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Tuple \t= tf.constant([[0, 1, 2, 3, 4, 5]]\t\t\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= model(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)[0]\r\n\r\n\t\t\t\t\t# TODO Replace vocab size\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= 50_000\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[int] \t= [1, 6, vocab_size]\r\n\t\t\t\t\tself.assertEqual(output.shape , SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\tprint(output[:, :3, :3]\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t# TODO Replace values below with what was printed above.\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[str] \t= tf.constant(\r\n\t\t\t\t\t [\r\n\t\t\t\t\t [\r\n\t\t\t\t\t [-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],\r\n\t\t\t\t\t [-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],\r\n\t\t\t\t\t [-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],\r\n\t\t\t\t\t ]\r\n\t\t\t\t\t ]\t\t\t\t\t\t\t)\r\n\t\t\t\t\ttf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_tf\r\nclass \tlowerCamelCase__\t\t\t( unittest.TestCase\t):\r\n\t\t\ta__ :\t\t\t\t\tOptional[int]\t = 1e-4\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: str \t= tf.constant([[4, 10]]\t\t\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[Any] \t= TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= emba(input_ids.shape\t\t\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: int \t= tf.constant(\r\n\t\t\t\t\t [[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]]\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\ttf.debugging.assert_near(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=self.tolerance\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: int \t= tf.constant(\r\n\t\t\t\t\t [\r\n\t\t\t\t\t [0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],\r\n\t\t\t\t\t [0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],\r\n\t\t\t\t\t [0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],\r\n\t\t\t\t\t ]\t\t\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[int] \t= TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512\t\t\t\t\t\t\t)\r\n\t\t\t\t\temba([2, 16, 512]\t\t\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= emba.weight[:3, :5]\r\n\r\n\t\t\t\t\ttf.debugging.assert_near(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=self.tolerance\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_tf\r\nclass \tlowerCamelCase__\t\t\t( unittest.TestCase\t):\r\n\t\t\ta__ :\t\t\t\t\tList[str]\t = 1e-4\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa\t\t\t\t\t\t\t) , shape=(2, 12, 16, 64)\t\t\t\t\t\t\t) / 100\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: str \t= -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa\t\t\t\t\t\t\t) , shape=(2, 12, 16, 64)\t\t\t\t\t\t\t) / 100\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[Any] \t= TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64\t\t\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Any \t= embed_positions([2, 16, 768]\t\t\t\t\t\t\t)[None, None, :, :]\r\n\r\n\t\t\t\t\tsnake_case\t, snake_case\t\t\t\t\t\t: Optional[Any] \t= TFRoFormerSelfAttention.apply_rotary_position_embeddings(\r\n\t\t\t\t\t SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Union[str, Any] \t= tf.constant(\r\n\t\t\t\t\t [\r\n\t\t\t\t\t [0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],\r\n\t\t\t\t\t [-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],\r\n\t\t\t\t\t [-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],\r\n\t\t\t\t\t [-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],\r\n\t\t\t\t\t [0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],\r\n\t\t\t\t\t [3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],\r\n\t\t\t\t\t ]\t\t\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[str] \t= tf.constant(\r\n\t\t\t\t\t [\r\n\t\t\t\t\t [0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],\r\n\t\t\t\t\t [0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],\r\n\t\t\t\t\t [1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],\r\n\t\t\t\t\t [2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],\r\n\t\t\t\t\t [-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],\r\n\t\t\t\t\t [-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],\r\n\t\t\t\t\t ]\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\ttf.debugging.assert_near(query_layer[0, 0, :6, :8] , SCREAMING_SNAKE_CASE , atol=self.tolerance\t\t\t\t\t\t\t)\r\n\t\t\t\t\ttf.debugging.assert_near(key_layer[0, 0, :6, :8] , SCREAMING_SNAKE_CASE , atol=self.tolerance\t\t\t\t\t\t\t)\r\n\r\n"},"code_codestyle":{"kind":"number","value":148,"string":"148"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\nimport pickle\r\nimport shutil\r\nimport tempfile\r\nimport unittest\r\n\r\nfrom transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast\r\nfrom transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow\r\nfrom transformers.utils import cached_property\r\n\r\nfrom ...test_tokenization_common import TokenizerTesterMixin\r\n\r\n\r\n__A \t\t\t\t\t\t=\tget_tests_dir(\"fixtures/test_sentencepiece.model\")\r\n@require_sentencepiece\r\n@require_tokenizers\r\nclass \tlowerCamelCase__\t\t\t( lowerCamelCase_ , unittest.TestCase\t):\r\n\t\t\ta__ :\t\t\t\t\tUnion[str, Any]\t = XLMRobertaTokenizer\r\n\t\t\ta__ :\t\t\t\t\tOptional[int]\t = XLMRobertaTokenizerFast\r\n\t\t\ta__ :\t\t\t\t\tList[str]\t = True\r\n\t\t\ta__ :\t\t\t\t\tList[Any]\t = True\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsuper().setUp()\r\n\r\n\t\t\t\t\t# We have a SentencePiece fixture for testing\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Any \t= XLMRobertaTokenizer(SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\ttokenizer.save_pretrained(self.tmpdirname\t\t\t\t\t\t\t)\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[Any] \t= \"\"\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Tuple \t= 1\r\n\r\n\t\t\t\t\tself.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t) , SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\tself.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t) , SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: str \t= list(self.get_tokenizer().get_vocab().keys()\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\tself.assertEqual(vocab_keys[0] , \"\"\t\t\t\t\t\t\t)\r\n\t\t\t\t\tself.assertEqual(vocab_keys[1] , \"\"\t\t\t\t\t\t\t)\r\n\t\t\t\t\tself.assertEqual(vocab_keys[-1] , \"\"\t\t\t\t\t\t\t)\r\n\t\t\t\t\tself.assertEqual(len(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t) , 1_002\t\t\t\t\t\t\t)\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tself.assertEqual(self.get_tokenizer().vocab_size , 1_002\t\t\t\t\t\t\t)\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: str \t= XLMRobertaTokenizer(SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[Any] \t= tokenizer.tokenize(\"This is a test\"\t\t\t\t\t\t\t)\r\n\t\t\t\t\tself.assertListEqual(SCREAMING_SNAKE_CASE , [\"▁This\", \"▁is\", \"▁a\", \"▁t\", \"est\"]\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\tself.assertListEqual(\r\n\t\t\t\t\t tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Union[str, Any] \t= tokenizer.tokenize(\"I was born in 92000, and this is falsé.\"\t\t\t\t\t\t\t)\r\n\t\t\t\t\tself.assertListEqual(\r\n\t\t\t\t\t SCREAMING_SNAKE_CASE , [\r\n\t\t\t\t\t SPIECE_UNDERLINE + \"I\",\r\n\t\t\t\t\t SPIECE_UNDERLINE + \"was\",\r\n\t\t\t\t\t SPIECE_UNDERLINE + \"b\",\r\n\t\t\t\t\t \"or\",\r\n\t\t\t\t\t \"n\",\r\n\t\t\t\t\t SPIECE_UNDERLINE + \"in\",\r\n\t\t\t\t\t SPIECE_UNDERLINE + \"\",\r\n\t\t\t\t\t \"9\",\r\n\t\t\t\t\t \"2\",\r\n\t\t\t\t\t \"0\",\r\n\t\t\t\t\t \"0\",\r\n\t\t\t\t\t \"0\",\r\n\t\t\t\t\t \",\",\r\n\t\t\t\t\t SPIECE_UNDERLINE + \"and\",\r\n\t\t\t\t\t SPIECE_UNDERLINE + \"this\",\r\n\t\t\t\t\t SPIECE_UNDERLINE + \"is\",\r\n\t\t\t\t\t SPIECE_UNDERLINE + \"f\",\r\n\t\t\t\t\t \"al\",\r\n\t\t\t\t\t \"s\",\r\n\t\t\t\t\t \"é\",\r\n\t\t\t\t\t \".\",\r\n\t\t\t\t\t ] , )\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[Any] \t= tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\tself.assertListEqual(\r\n\t\t\t\t\t SCREAMING_SNAKE_CASE , [\r\n\t\t\t\t\t value + tokenizer.fairseq_offset\r\n\t\t\t\t\t for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]\r\n\t\t\t\t\t # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^\r\n\t\t\t\t\t ] , )\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[int] \t= tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\tself.assertListEqual(\r\n\t\t\t\t\t SCREAMING_SNAKE_CASE , [\r\n\t\t\t\t\t SPIECE_UNDERLINE + \"I\",\r\n\t\t\t\t\t SPIECE_UNDERLINE + \"was\",\r\n\t\t\t\t\t SPIECE_UNDERLINE + \"b\",\r\n\t\t\t\t\t \"or\",\r\n\t\t\t\t\t \"n\",\r\n\t\t\t\t\t SPIECE_UNDERLINE + \"in\",\r\n\t\t\t\t\t SPIECE_UNDERLINE + \"\",\r\n\t\t\t\t\t \"\",\r\n\t\t\t\t\t \"2\",\r\n\t\t\t\t\t \"0\",\r\n\t\t\t\t\t \"0\",\r\n\t\t\t\t\t \"0\",\r\n\t\t\t\t\t \",\",\r\n\t\t\t\t\t SPIECE_UNDERLINE + \"and\",\r\n\t\t\t\t\t SPIECE_UNDERLINE + \"this\",\r\n\t\t\t\t\t SPIECE_UNDERLINE + \"is\",\r\n\t\t\t\t\t SPIECE_UNDERLINE + \"f\",\r\n\t\t\t\t\t \"al\",\r\n\t\t\t\t\t \"s\",\r\n\t\t\t\t\t \"\",\r\n\t\t\t\t\t \".\",\r\n\t\t\t\t\t ] , )\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tif not self.test_slow_tokenizer:\r\n\t\t\t\t\t\t\t# as we don't have a slow version, we can't compare the outputs between slow and fast versions\r\n\t\t\t\t\t\t\treturn\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[str] \t= (self.rust_tokenizer_class, \"hf-internal-testing/tiny-xlm-roberta\", {})\r\n\t\t\t\t\tfor tokenizer, pretrained_name, kwargs in self.tokenizers_list:\r\n\t\t\t\t\t\t\twith self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''\t\t\t\t\t\t\t):\r\n\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[int] \t= self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[Any] \t= self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: Tuple \t= tempfile.mkdtemp()\r\n\r\n\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: Tuple \t= tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[str] \t= tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Checks it save with the same files + the tokenizer.json file for the fast one\r\n\t\t\t\t\t\t\t\t\tself.assertTrue(any(\"tokenizer.json\" in f for f in tokenizer_r_files\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= tuple(f for f in tokenizer_r_files if \"tokenizer.json\" not in f\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\tself.assertSequenceEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Checks everything loads correctly in the same way\r\n\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: int \t= tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: Tuple \t= tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Check special tokens are set accordingly on Rust and Python\r\n\t\t\t\t\t\t\t\t\tfor key in tokenizer_pp.special_tokens_map:\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))\r\n\t\t\t\t\t\t\t\t\t\t\t# self.assertEqual(getattr(tokenizer_rp, key + \"_id\"), getattr(tokenizer_pp, key + \"_id\"))\r\n\r\n\t\t\t\t\t\t\t\t\tshutil.rmtree(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Save tokenizer rust, legacy_format=True\r\n\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: Tuple \t= tempfile.mkdtemp()\r\n\r\n\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: str \t= tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: str \t= tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Checks it save with the same files\r\n\t\t\t\t\t\t\t\t\tself.assertSequenceEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Checks everything loads correctly in the same way\r\n\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Check special tokens are set accordingly on Rust and Python\r\n\t\t\t\t\t\t\t\t\tfor key in tokenizer_pp.special_tokens_map:\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\tshutil.rmtree(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Save tokenizer rust, legacy_format=False\r\n\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[str] \t= tempfile.mkdtemp()\r\n\r\n\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: List[Any] \t= tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: Union[str, Any] \t= tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Checks it saved the tokenizer.json file\r\n\t\t\t\t\t\t\t\t\tself.assertTrue(any(\"tokenizer.json\" in f for f in tokenizer_r_files\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Checks everything loads correctly in the same way\r\n\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[Any] \t= tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: Union[str, Any] \t= tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t# Check special tokens are set accordingly on Rust and Python\r\n\t\t\t\t\t\t\t\t\tfor key in tokenizer_pp.special_tokens_map:\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\tshutil.rmtree(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\r\n\t\t\t@cached_property\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\treturn XLMRobertaTokenizer.from_pretrained(\"xlm-roberta-base\"\t\t\t\t\t\t\t)\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\twith tempfile.NamedTemporaryFile() as f:\r\n\t\t\t\t\t\t\tshutil.copyfile(SCREAMING_SNAKE_CASE , f.name\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: int \t= XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t: Tuple \t= pickle.dumps(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\tpickle.loads(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tif not self.test_rust_tokenizer:\r\n\t\t\t\t\t\t\treturn\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Union[str, Any] \t= self.get_tokenizer()\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= self.get_rust_tokenizer()\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[Any] \t= \"I was born in 92000, and this is falsé.\"\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: str \t= tokenizer.tokenize(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Union[str, Any] \t= rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\tself.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: str \t= tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: str \t= rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\tself.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Union[str, Any] \t= self.get_rust_tokenizer()\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Tuple \t= tokenizer.encode(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= rust_tokenizer.encode(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\t\t\t\t\tself.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: int \t= \"Hello World!\"\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Optional[Any] \t= [0, 35_378, 6_661, 38, 2]\r\n\t\t\t\t\t# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer\r\n\t\t\t\t\t# xlmr.eval()\r\n\t\t\t\t\t# xlmr.encode(symbols)\r\n\r\n\t\t\t\t\tself.assertListEqual(SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= (\r\n\t\t\t\t\t \"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \\\" [ ] ! : - . Also we will\"\r\n\t\t\t\t\t \" add words that should not exsist and be tokenized to , such as saoneuhaoesuth\"\r\n\t\t\t\t\t)\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Dict \t= [\r\n\t\t\t\t\t 0,\r\n\t\t\t\t\t 3_293,\r\n\t\t\t\t\t 83,\r\n\t\t\t\t\t 10,\r\n\t\t\t\t\t 4_552,\r\n\t\t\t\t\t 4_989,\r\n\t\t\t\t\t 7_986,\r\n\t\t\t\t\t 678,\r\n\t\t\t\t\t 10,\r\n\t\t\t\t\t 5_915,\r\n\t\t\t\t\t 111,\r\n\t\t\t\t\t 179_459,\r\n\t\t\t\t\t 124_850,\r\n\t\t\t\t\t 4,\r\n\t\t\t\t\t 6_044,\r\n\t\t\t\t\t 237,\r\n\t\t\t\t\t 12,\r\n\t\t\t\t\t 6,\r\n\t\t\t\t\t 5,\r\n\t\t\t\t\t 6,\r\n\t\t\t\t\t 4,\r\n\t\t\t\t\t 6_780,\r\n\t\t\t\t\t 705,\r\n\t\t\t\t\t 15,\r\n\t\t\t\t\t 1_388,\r\n\t\t\t\t\t 44,\r\n\t\t\t\t\t 378,\r\n\t\t\t\t\t 10_114,\r\n\t\t\t\t\t 711,\r\n\t\t\t\t\t 152,\r\n\t\t\t\t\t 20,\r\n\t\t\t\t\t 6,\r\n\t\t\t\t\t 5,\r\n\t\t\t\t\t 22_376,\r\n\t\t\t\t\t 642,\r\n\t\t\t\t\t 1_221,\r\n\t\t\t\t\t 15_190,\r\n\t\t\t\t\t 34_153,\r\n\t\t\t\t\t 450,\r\n\t\t\t\t\t 5_608,\r\n\t\t\t\t\t 959,\r\n\t\t\t\t\t 1_119,\r\n\t\t\t\t\t 57_702,\r\n\t\t\t\t\t 136,\r\n\t\t\t\t\t 186,\r\n\t\t\t\t\t 47,\r\n\t\t\t\t\t 1_098,\r\n\t\t\t\t\t 29_367,\r\n\t\t\t\t\t 47,\r\n\t\t\t\t\t # 4426, # What fairseq tokenizes from \"\": \"_<\"\r\n\t\t\t\t\t # 3678, # What fairseq tokenizes from \"\": \"unk\"\r\n\t\t\t\t\t # 2740, # What fairseq tokenizes from \"\": \">\"\r\n\t\t\t\t\t 3, # What we tokenize from \"\": \"\"\r\n\t\t\t\t\t 6, # Residue from the tokenization: an extra sentencepiece underline\r\n\t\t\t\t\t 4,\r\n\t\t\t\t\t 6_044,\r\n\t\t\t\t\t 237,\r\n\t\t\t\t\t 6_284,\r\n\t\t\t\t\t 50_901,\r\n\t\t\t\t\t 528,\r\n\t\t\t\t\t 31,\r\n\t\t\t\t\t 90,\r\n\t\t\t\t\t 34,\r\n\t\t\t\t\t 927,\r\n\t\t\t\t\t 2,\r\n\t\t\t\t\t]\r\n\t\t\t\t\t# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer\r\n\t\t\t\t\t# xlmr.eval()\r\n\t\t\t\t\t# xlmr.encode(symbols)\r\n\r\n\t\t\t\t\tself.assertListEqual(SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\t\t\t@slow\r\n\t\t\tdef \t\t\t\t\t\t\tlowerCamelCase_ ( self\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\tsnake_case\t\t\t\t\t\t: Any \t= {\"input_ids\": [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], \"attention_mask\": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501\r\n\t\t\t\t\t# fmt: on\r\n\r\n\t\t\t\t\tself.tokenizer_integration_test_util(\r\n\t\t\t\t\t expected_encoding=SCREAMING_SNAKE_CASE , model_name=\"xlm-roberta-base\" , revision=\"d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3\" , )\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":148,"string":"148"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":895,"cells":{"code":{"kind":"string","value":"\r\rimport shutil\rimport tempfile\rimport unittest\r\rfrom transformers import (\r SPIECE_UNDERLINE,\r AddedToken,\r BatchEncoding,\r NllbTokenizer,\r NllbTokenizerFast,\r is_torch_available,\r)\rfrom transformers.testing_utils import (\r get_tests_dir,\r nested_simplify,\r require_sentencepiece,\r require_tokenizers,\r require_torch,\r)\r\rfrom ...test_tokenization_common import TokenizerTesterMixin\r\r\r_UpperCAmelCase\t\t: Union[str, Any]\t\t\t\t\t\t =\tget_tests_dir(\"\"\"fixtures/test_sentencepiece.model\"\"\")\r\r\rif is_torch_available():\r from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right\r\r_UpperCAmelCase\t\t: Tuple\t\t\t\t\t\t =\t25_6047\r_UpperCAmelCase\t\t: Optional[Any]\t\t\t\t\t\t =\t25_6145\r\r\r\r\r\r\r\r@require_sentencepiece\r@require_tokenizers\rclass \t\t\tlowercase (\t__snake_case , unittest.TestCase ):\r __SCREAMING_SNAKE_CASE :\t\t\tTuple\t = NllbTokenizer\r __SCREAMING_SNAKE_CASE :\t\t\tAny\t = NllbTokenizerFast\r __SCREAMING_SNAKE_CASE :\t\t\tOptional[Any]\t = True\r __SCREAMING_SNAKE_CASE :\t\t\tTuple\t = True\r __SCREAMING_SNAKE_CASE :\t\t\tList[Any]\t = {}\r\r\r\r\r def a\t\t\t\t\t\t\t( self\t):\r super().setUp()\r\r # We have a SentencePiece fixture for testing\r snake_case_ = NllbTokenizer(lowerCamelCase_\t\t\t\t, keep_accents=lowerCamelCase_\t)\r tokenizer.save_pretrained(self.tmpdirname\t)\r\r\r\r\r def a\t\t\t\t\t\t\t( self\t):\r snake_case_ = NllbTokenizer(lowerCamelCase_\t\t\t\t, keep_accents=lowerCamelCase_\t)\r\r snake_case_ = tokenizer.tokenize('This is a test'\t)\r self.assertListEqual(lowerCamelCase_\t\t\t\t, ['▁This', '▁is', '▁a', '▁t', 'est']\t)\r\r self.assertListEqual(\r tokenizer.convert_tokens_to_ids(lowerCamelCase_\t)\t\t\t\t, [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]]\t\t\t\t, )\r\r snake_case_ = tokenizer.tokenize('I was born in 92000, and this is falsé.'\t)\r self.assertListEqual(\r lowerCamelCase_\t\t\t\t, [\r SPIECE_UNDERLINE + 'I',\r SPIECE_UNDERLINE + 'was',\r SPIECE_UNDERLINE + 'b',\r 'or',\r 'n',\r SPIECE_UNDERLINE + 'in',\r SPIECE_UNDERLINE + '',\r '9',\r '2',\r '0',\r '0',\r '0',\r ',',\r SPIECE_UNDERLINE + 'and',\r SPIECE_UNDERLINE + 'this',\r SPIECE_UNDERLINE + 'is',\r SPIECE_UNDERLINE + 'f',\r 'al',\r 's',\r 'é',\r '.',\r ]\t\t\t\t, )\r snake_case_ = tokenizer.convert_tokens_to_ids(lowerCamelCase_\t)\r self.assertListEqual(\r lowerCamelCase_\t\t\t\t, [\r value + tokenizer.fairseq_offset\r for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]\r ]\t\t\t\t, )\r\r snake_case_ = tokenizer.convert_ids_to_tokens(lowerCamelCase_\t)\r self.assertListEqual(\r lowerCamelCase_\t\t\t\t, [\r SPIECE_UNDERLINE + 'I',\r SPIECE_UNDERLINE + 'was',\r SPIECE_UNDERLINE + 'b',\r 'or',\r 'n',\r SPIECE_UNDERLINE + 'in',\r SPIECE_UNDERLINE + '',\r '',\r '2',\r '0',\r '0',\r '0',\r ',',\r SPIECE_UNDERLINE + 'and',\r SPIECE_UNDERLINE + 'this',\r SPIECE_UNDERLINE + 'is',\r SPIECE_UNDERLINE + 'f',\r 'al',\r 's',\r '',\r '.',\r ]\t\t\t\t, )\r\r\r\r\r def a\t\t\t\t\t\t\t( self\t):\r snake_case_ = (self.rust_tokenizer_class, \"\"\"hf-internal-testing/tiny-random-nllb\"\"\", {})\r for tokenizer, pretrained_name, kwargs in self.tokenizers_list:\r with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''\t):\r snake_case_ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_\t\t\t\t, **lowerCamelCase_\t)\r snake_case_ = self.tokenizer_class.from_pretrained(lowerCamelCase_\t\t\t\t, **lowerCamelCase_\t)\r\r snake_case_ = tempfile.mkdtemp()\r\r snake_case_ = tokenizer_r.save_pretrained(lowerCamelCase_\t)\r snake_case_ = tokenizer_p.save_pretrained(lowerCamelCase_\t)\r\r # Checks it save with the same files + the tokenizer.json file for the fast one\r self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files\t)\t)\r snake_case_ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f\t)\r self.assertSequenceEqual(lowerCamelCase_\t\t\t\t, lowerCamelCase_\t)\r\r # Checks everything loads correctly in the same way\r snake_case_ = tokenizer_r.from_pretrained(lowerCamelCase_\t)\r snake_case_ = tokenizer_p.from_pretrained(lowerCamelCase_\t)\r\r # Check special tokens are set accordingly on Rust and Python\r for key in tokenizer_pp.special_tokens_map:\r self.assertTrue(hasattr(lowerCamelCase_\t\t\t\t, lowerCamelCase_\t)\t)\r\r shutil.rmtree(lowerCamelCase_\t)\r\r # Save tokenizer rust, legacy_format=True\r snake_case_ = tempfile.mkdtemp()\r\r snake_case_ = tokenizer_r.save_pretrained(lowerCamelCase_\t\t\t\t, legacy_format=lowerCamelCase_\t)\r snake_case_ = tokenizer_p.save_pretrained(lowerCamelCase_\t)\r\r # Checks it save with the same files\r self.assertSequenceEqual(lowerCamelCase_\t\t\t\t, lowerCamelCase_\t)\r\r # Checks everything loads correctly in the same way\r snake_case_ = tokenizer_r.from_pretrained(lowerCamelCase_\t)\r snake_case_ = tokenizer_p.from_pretrained(lowerCamelCase_\t)\r\r # Check special tokens are set accordingly on Rust and Python\r for key in tokenizer_pp.special_tokens_map:\r self.assertTrue(hasattr(lowerCamelCase_\t\t\t\t, lowerCamelCase_\t)\t)\r\r shutil.rmtree(lowerCamelCase_\t)\r\r # Save tokenizer rust, legacy_format=False\r snake_case_ = tempfile.mkdtemp()\r\r snake_case_ = tokenizer_r.save_pretrained(lowerCamelCase_\t\t\t\t, legacy_format=lowerCamelCase_\t)\r snake_case_ = tokenizer_p.save_pretrained(lowerCamelCase_\t)\r\r # Checks it saved the tokenizer.json file\r self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files\t)\t)\r\r # Checks everything loads correctly in the same way\r snake_case_ = tokenizer_r.from_pretrained(lowerCamelCase_\t)\r snake_case_ = tokenizer_p.from_pretrained(lowerCamelCase_\t)\r\r # Check special tokens are set accordingly on Rust and Python\r for key in tokenizer_pp.special_tokens_map:\r self.assertTrue(hasattr(lowerCamelCase_\t\t\t\t, lowerCamelCase_\t)\t)\r\r shutil.rmtree(lowerCamelCase_\t)\r\r\r\r\r @require_torch\r def a\t\t\t\t\t\t\t( self\t):\r if not self.test_seqaseq:\r return\r\r snake_case_ = self.get_tokenizers()\r for tokenizer in tokenizers:\r with self.subTest(F'''{tokenizer.__class__.__name__}'''\t):\r # Longer text that will definitely require truncation.\r snake_case_ = [\r \"\"\" UN Chief Says There Is No Military Solution in Syria\"\"\",\r \"\"\" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for\"\"\"\r \"\"\" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons\"\"\"\r \"\"\" will only worsen the violence and misery for millions of people.\"\"\",\r ]\r snake_case_ = [\r \"\"\"Şeful ONU declară că nu există o soluţie militară în Siria\"\"\",\r \"\"\"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al\"\"\"\r \"\"\" Rusiei pentru Siria este că \\\"nu există o soluţie militară\\\" la conflictul de aproape cinci ani şi\"\"\"\r \"\"\" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.\"\"\",\r ]\r try:\r snake_case_ = tokenizer.prepare_seqaseq_batch(\r src_texts=lowerCamelCase_\t\t\t\t, tgt_texts=lowerCamelCase_\t\t\t\t, max_length=3\t\t\t\t, max_target_length=10\t\t\t\t, return_tensors='pt'\t\t\t\t, src_lang='eng_Latn'\t\t\t\t, tgt_lang='ron_Latn'\t\t\t\t, )\r except NotImplementedError:\r return\r self.assertEqual(batch.input_ids.shape[1]\t\t\t\t, 3\t)\r self.assertEqual(batch.labels.shape[1]\t\t\t\t, 10\t)\r # max_target_length will default to max_length if not specified\r snake_case_ = tokenizer.prepare_seqaseq_batch(\r lowerCamelCase_\t\t\t\t, tgt_texts=lowerCamelCase_\t\t\t\t, max_length=3\t\t\t\t, return_tensors='pt'\t)\r self.assertEqual(batch.input_ids.shape[1]\t\t\t\t, 3\t)\r self.assertEqual(batch.labels.shape[1]\t\t\t\t, 3\t)\r\r snake_case_ = tokenizer.prepare_seqaseq_batch(\r src_texts=lowerCamelCase_\t\t\t\t, max_length=3\t\t\t\t, max_target_length=10\t\t\t\t, return_tensors='pt'\t)\r self.assertEqual(batch_encoder_only.input_ids.shape[1]\t\t\t\t, 3\t)\r self.assertEqual(batch_encoder_only.attention_mask.shape[1]\t\t\t\t, 3\t)\r self.assertNotIn('decoder_input_ids'\t\t\t\t, lowerCamelCase_\t)\r\r\r\r\r @unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.'\t)\r def a\t\t\t\t\t\t\t( self\t):\r pass\r\r\r\r\r def a\t\t\t\t\t\t\t( self\t):\r for tokenizer, pretrained_name, kwargs in self.tokenizers_list:\r with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''\t):\r snake_case_ = [AddedToken(''\t\t\t\t, lstrip=lowerCamelCase_\t)]\r\r snake_case_ = self.rust_tokenizer_class.from_pretrained(\r lowerCamelCase_\t\t\t\t, additional_special_tokens=lowerCamelCase_\t\t\t\t, **lowerCamelCase_\t)\r snake_case_ = tokenizer_r.encode('Hey this is a token'\t)\r\r snake_case_ = tokenizer_r.encode(''\t\t\t\t, add_special_tokens=lowerCamelCase_\t)[0]\r\r self.assertTrue(special_token_id in r_output\t)\r\r if self.test_slow_tokenizer:\r snake_case_ = self.rust_tokenizer_class.from_pretrained(\r lowerCamelCase_\t\t\t\t, additional_special_tokens=lowerCamelCase_\t\t\t\t, **lowerCamelCase_\t\t\t\t, )\r snake_case_ = self.tokenizer_class.from_pretrained(\r lowerCamelCase_\t\t\t\t, additional_special_tokens=lowerCamelCase_\t\t\t\t, **lowerCamelCase_\t)\r\r snake_case_ = tokenizer_p.encode('Hey this is a token'\t)\r\r snake_case_ = tokenizer_cr.encode('Hey this is a token'\t)\r\r self.assertEqual(lowerCamelCase_\t\t\t\t, lowerCamelCase_\t)\r self.assertEqual(lowerCamelCase_\t\t\t\t, lowerCamelCase_\t)\r self.assertTrue(special_token_id in p_output\t)\r self.assertTrue(special_token_id in cr_output\t)\r\r\r\r\r\r\r\r@require_torch\r@require_sentencepiece\r@require_tokenizers\rclass \t\t\tlowercase (\tunittest.TestCase ):\r __SCREAMING_SNAKE_CASE :\t\t\tTuple\t = \"facebook/nllb-200-distilled-600M\"\r __SCREAMING_SNAKE_CASE :\t\t\tTuple\t = [\r \" UN Chief Says There Is No Military Solution in Syria\",\r \" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \\\"there is no military solution\\\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.\",\r ]\r __SCREAMING_SNAKE_CASE :\t\t\tList[Any]\t = [\r \"Şeful ONU declară că nu există o soluţie militară în Siria\",\r \"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei\"\r \" pentru Siria este că \\\"nu există o soluţie militară\\\" la conflictul de aproape cinci ani şi că noi arme nu vor\"\r \" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.\",\r ]\r __SCREAMING_SNAKE_CASE :\t\t\tList[str]\t = [\r 256_047,\r 16_297,\r 134_408,\r 8_165,\r 248_066,\r 14_734,\r 950,\r 1_135,\r 105_721,\r 3_573,\r 83,\r 27_352,\r 108,\r 49_486,\r 2,\r ]\r\r\r\r\r @classmethod\r def a\t\t\t\t\t\t\t( cls\t):\r snake_case_ = NllbTokenizer.from_pretrained(\r cls.checkpoint_name\t\t\t\t, src_lang='eng_Latn'\t\t\t\t, tgt_lang='ron_Latn'\t)\r snake_case_ = 1\r return cls\r\r\r\r\r def a\t\t\t\t\t\t\t( self\t):\r self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab']\t\t\t\t, 25_6001\t)\r self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn']\t\t\t\t, 25_6002\t)\r self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn']\t\t\t\t, 25_6057\t)\r\r\r\r\r def a\t\t\t\t\t\t\t( self\t):\r snake_case_ = self.tokenizer.batch_encode_plus(self.src_text\t).input_ids[0]\r self.assertListEqual(self.expected_src_tokens\t\t\t\t, lowerCamelCase_\t)\r\r\r\r\r def a\t\t\t\t\t\t\t( self\t):\r self.assertIn(lowerCamelCase_\t\t\t\t, self.tokenizer.all_special_ids\t)\r # fmt: off\r snake_case_ = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]\r # fmt: on\r\r snake_case_ = self.tokenizer.decode(lowerCamelCase_\t\t\t\t, skip_special_tokens=lowerCamelCase_\t)\r snake_case_ = self.tokenizer.decode(generated_ids[1:]\t\t\t\t, skip_special_tokens=lowerCamelCase_\t)\r self.assertEqual(lowerCamelCase_\t\t\t\t, lowerCamelCase_\t)\r self.assertNotIn(self.tokenizer.eos_token\t\t\t\t, lowerCamelCase_\t)\r\r\r\r\r def a\t\t\t\t\t\t\t( self\t):\r snake_case_ = [\"\"\"this is gunna be a long sentence \"\"\" * 20]\r assert isinstance(src_text[0]\t\t\t\t, lowerCamelCase_\t)\r snake_case_ = 10\r snake_case_ = self.tokenizer(lowerCamelCase_\t\t\t\t, max_length=lowerCamelCase_\t\t\t\t, truncation=lowerCamelCase_\t).input_ids[0]\r self.assertEqual(ids[-1]\t\t\t\t, 2\t)\r self.assertEqual(ids[0]\t\t\t\t, lowerCamelCase_\t)\r self.assertEqual(len(lowerCamelCase_\t)\t\t\t\t, lowerCamelCase_\t)\r\r\r\r\r def a\t\t\t\t\t\t\t( self\t):\r self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['', 'ar_AR']\t)\t\t\t\t, [25_6203, 3]\t)\r\r\r\r\r def a\t\t\t\t\t\t\t( self\t):\r snake_case_ = tempfile.mkdtemp()\r snake_case_ = self.tokenizer.fairseq_tokens_to_ids\r self.tokenizer.save_pretrained(lowerCamelCase_\t)\r snake_case_ = NllbTokenizer.from_pretrained(lowerCamelCase_\t)\r self.assertDictEqual(new_tok.fairseq_tokens_to_ids\t\t\t\t, lowerCamelCase_\t)\r\r\r\r\r @require_torch\r def a\t\t\t\t\t\t\t( self\t):\r snake_case_ = self.tokenizer(\r self.src_text\t\t\t\t, text_target=self.tgt_text\t\t\t\t, padding=lowerCamelCase_\t\t\t\t, truncation=lowerCamelCase_\t\t\t\t, max_length=len(self.expected_src_tokens\t)\t\t\t\t, return_tensors='pt'\t\t\t\t, )\r snake_case_ = shift_tokens_right(\r batch['labels']\t\t\t\t, self.tokenizer.pad_token_id\t\t\t\t, self.tokenizer.lang_code_to_id['ron_Latn']\t)\r\r self.assertIsInstance(lowerCamelCase_\t\t\t\t, lowerCamelCase_\t)\r\r self.assertEqual((2, 15)\t\t\t\t, batch.input_ids.shape\t)\r self.assertEqual((2, 15)\t\t\t\t, batch.attention_mask.shape\t)\r snake_case_ = batch.input_ids.tolist()[0]\r self.assertListEqual(self.expected_src_tokens\t\t\t\t, lowerCamelCase_\t)\r self.assertEqual(lowerCamelCase_\t\t\t\t, batch.decoder_input_ids[0, 0]\t) # EOS\r # Test that special tokens are reset\r self.assertEqual(self.tokenizer.prefix_tokens\t\t\t\t, [EN_CODE]\t)\r self.assertEqual(self.tokenizer.suffix_tokens\t\t\t\t, [self.tokenizer.eos_token_id]\t)\r\r\r\r\r def a\t\t\t\t\t\t\t( self\t):\r snake_case_ = self.tokenizer(self.src_text\t\t\t\t, padding=lowerCamelCase_\t\t\t\t, truncation=lowerCamelCase_\t\t\t\t, max_length=3\t\t\t\t, return_tensors='pt'\t)\r snake_case_ = self.tokenizer(\r text_target=self.tgt_text\t\t\t\t, padding=lowerCamelCase_\t\t\t\t, truncation=lowerCamelCase_\t\t\t\t, max_length=10\t\t\t\t, return_tensors='pt'\t)\r snake_case_ = targets[\"\"\"input_ids\"\"\"]\r snake_case_ = shift_tokens_right(\r lowerCamelCase_\t\t\t\t, self.tokenizer.pad_token_id\t\t\t\t, decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang]\t\t\t\t, )\r\r self.assertEqual(batch.input_ids.shape[1]\t\t\t\t, 3\t)\r self.assertEqual(batch.decoder_input_ids.shape[1]\t\t\t\t, 10\t)\r\r\r\r\r @require_torch\r def a\t\t\t\t\t\t\t( self\t):\r snake_case_ = self.tokenizer._build_translation_inputs(\r 'A test'\t\t\t\t, return_tensors='pt'\t\t\t\t, src_lang='eng_Latn'\t\t\t\t, tgt_lang='fra_Latn'\t)\r\r self.assertEqual(\r nested_simplify(lowerCamelCase_\t)\t\t\t\t, {\r # A, test, EOS, en_XX\r 'input_ids': [[25_6047, 70, 7356, 2]],\r 'attention_mask': [[1, 1, 1, 1]],\r # ar_AR\r 'forced_bos_token_id': 25_6057,\r }\t\t\t\t, )\r\r\r\r\r @require_torch\r def a\t\t\t\t\t\t\t( self\t):\r snake_case_ = True\r snake_case_ = self.tokenizer(\r 'UN Chief says there is no military solution in Syria'\t\t\t\t, src_lang='eng_Latn'\t\t\t\t, tgt_lang='fra_Latn'\t)\r self.assertEqual(\r inputs.input_ids\t\t\t\t, [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047]\t)\r\r snake_case_ = False\r snake_case_ = self.tokenizer(\r 'UN Chief says there is no military solution in Syria'\t\t\t\t, src_lang='eng_Latn'\t\t\t\t, tgt_lang='fra_Latn'\t)\r self.assertEqual(\r inputs.input_ids\t\t\t\t, [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2]\t)\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":365,"string":"365"},"style_context":{"kind":"string","value":"\r\rfrom typing import TYPE_CHECKING\r\rfrom ...utils import (\r OptionalDependencyNotAvailable,\r _LazyModule,\r is_tf_available,\r is_torch_available,\r is_vision_available,\r)\r\r\r_UpperCAmelCase\t\t: Dict\t\t\t\t\t\t =\t{\r \"\"\"configuration_blip\"\"\": [\r \"\"\"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP\"\"\",\r \"\"\"BlipConfig\"\"\",\r \"\"\"BlipTextConfig\"\"\",\r \"\"\"BlipVisionConfig\"\"\",\r ],\r \"\"\"processing_blip\"\"\": [\"\"\"BlipProcessor\"\"\"],\r}\r\rtry:\r\t\t\t\t\tif not is_vision_available():\r\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\rexcept OptionalDependencyNotAvailable:\r\t\t\t\t\tpass\relse:\r\t\t\t\t\t_UpperCAmelCase\t\t: Optional[int]\t\t\t\t\t\t =\t[\"\"\"BlipImageProcessor\"\"\"]\r\r\rtry:\r\t\t\t\t\tif not is_torch_available():\r\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\rexcept OptionalDependencyNotAvailable:\r\t\t\t\t\tpass\relse:\r\t\t\t\t\t_UpperCAmelCase\t\t: Any\t\t\t\t\t\t =\t[\r\t\t\t\t\t \"\"\"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST\"\"\",\r\t\t\t\t\t \"\"\"BlipModel\"\"\",\r\t\t\t\t\t \"\"\"BlipPreTrainedModel\"\"\",\r\t\t\t\t\t \"\"\"BlipForConditionalGeneration\"\"\",\r\t\t\t\t\t \"\"\"BlipForQuestionAnswering\"\"\",\r\t\t\t\t\t \"\"\"BlipVisionModel\"\"\",\r\t\t\t\t\t \"\"\"BlipTextModel\"\"\",\r\t\t\t\t\t \"\"\"BlipForImageTextRetrieval\"\"\",\r\t\t\t\t\t]\r\rtry:\r\t\t\t\t\tif not is_tf_available():\r\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\rexcept OptionalDependencyNotAvailable:\r\t\t\t\t\tpass\relse:\r\t\t\t\t\t_UpperCAmelCase\t\t: Any\t\t\t\t\t\t =\t[\r\t\t\t\t\t \"\"\"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST\"\"\",\r\t\t\t\t\t \"\"\"TFBlipModel\"\"\",\r\t\t\t\t\t \"\"\"TFBlipPreTrainedModel\"\"\",\r\t\t\t\t\t \"\"\"TFBlipForConditionalGeneration\"\"\",\r\t\t\t\t\t \"\"\"TFBlipForQuestionAnswering\"\"\",\r\t\t\t\t\t \"\"\"TFBlipVisionModel\"\"\",\r\t\t\t\t\t \"\"\"TFBlipTextModel\"\"\",\r\t\t\t\t\t \"\"\"TFBlipForImageTextRetrieval\"\"\",\r\t\t\t\t\t]\r\rif TYPE_CHECKING:\r\t\t\t\t\tfrom .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig\r\t\t\t\t\tfrom .processing_blip import BlipProcessor\r\r\t\t\t\t\ttry:\r\t\t\t\t\t\t\t\t\t\tif not is_vision_available():\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\t\t\t\t\texcept OptionalDependencyNotAvailable:\r\t\t\t\t\t\t\t\t\t\tpass\r\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\tfrom .image_processing_blip import BlipImageProcessor\r\r\t\t\t\t\ttry:\r\t\t\t\t\t\t\t\t\t\tif not is_torch_available():\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\t\t\t\t\texcept OptionalDependencyNotAvailable:\r\t\t\t\t\t\t\t\t\t\tpass\r\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\tfrom .modeling_blip import (\r\t\t\t\t\t\t\t\t\t\t BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,\r\t\t\t\t\t\t\t\t\t\t BlipForConditionalGeneration,\r\t\t\t\t\t\t\t\t\t\t BlipForImageTextRetrieval,\r\t\t\t\t\t\t\t\t\t\t BlipForQuestionAnswering,\r\t\t\t\t\t\t\t\t\t\t BlipModel,\r\t\t\t\t\t\t\t\t\t\t BlipPreTrainedModel,\r\t\t\t\t\t\t\t\t\t\t BlipTextModel,\r\t\t\t\t\t\t\t\t\t\t BlipVisionModel,\r\t\t\t\t\t\t\t\t\t\t)\r\r\t\t\t\t\ttry:\r\t\t\t\t\t\t\t\t\t\tif not is_tf_available():\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\t\t\t\t\texcept OptionalDependencyNotAvailable:\r\t\t\t\t\t\t\t\t\t\tpass\r\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\tfrom .modeling_tf_blip import (\r\t\t\t\t\t\t\t\t\t\t TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,\r\t\t\t\t\t\t\t\t\t\t TFBlipForConditionalGeneration,\r\t\t\t\t\t\t\t\t\t\t TFBlipForImageTextRetrieval,\r\t\t\t\t\t\t\t\t\t\t TFBlipForQuestionAnswering,\r\t\t\t\t\t\t\t\t\t\t TFBlipModel,\r\t\t\t\t\t\t\t\t\t\t TFBlipPreTrainedModel,\r\t\t\t\t\t\t\t\t\t\t TFBlipTextModel,\r\t\t\t\t\t\t\t\t\t\t TFBlipVisionModel,\r\t\t\t\t\t\t\t\t\t\t)\r\relse:\r\t\t\t\t\timport sys\r\r\t\t\t\t\t_UpperCAmelCase\t\t: Optional[int]\t\t\t\t\t\t =\t_LazyModule(__name__, globals()[\"\"\"__file__\"\"\"], _import_structure, module_spec=__spec__)\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":200,"string":"200"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":896,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\nclass A__ :\n\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n def __init__(\t\t\t\t\tself\t\t\t\t\t, lowercase)\t\t\t->\t\t\t\t\t\tNone:\n\n\n\n\n\n\n\n '''simple docstring'''\n\n a__\t\t\t\t\t\t:\tOptional[Any] =\t\t\t\tlen(lowercase)\n a__\t\t\t\t\t\t:\tTuple =\t\t\t\t[0] * len_array\n\n if len_array > 0:\n a__\t\t\t\t\t\t:\tList[Any] =\t\t\t\tarray[0]\n\n for i in range(1\t\t\t\t\t, lowercase):\n a__\t\t\t\t\t\t:\tList[str] =\t\t\t\tself.prefix_sum[i - 1] + array[i]\n\n def \t\t\t\t\t\t\t__lowercase\t\t(\t\t\t\t\tself\t\t\t\t\t, lowercase\t\t\t\t\t, lowercase)\t\t\t->\t\t\t\t\t\tint:\n\n\n\n\n\n\n\n '''simple docstring'''\n\n if start == 0:\n return self.prefix_sum[end]\n\n return self.prefix_sum[end] - self.prefix_sum[start - 1]\n\n def \t\t\t\t\t\t\t__lowercase\t\t(\t\t\t\t\tself\t\t\t\t\t, lowercase)\t\t\t->\t\t\t\t\t\tbool:\n\n\n\n\n\n\n\n '''simple docstring'''\n\n a__\t\t\t\t\t\t:\tUnion[str, Any] =\t\t\t\t{0}\n for sum_item in self.prefix_sum:\n if sum_item - target_sum in sums:\n return True\n\n sums.add(lowercase)\n\n return False\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n\n\n"},"code_codestyle":{"kind":"number","value":99,"string":"99"},"style_context":{"kind":"string","value":"\n\n\n\n\n\nfrom collections.abc import Callable\nfrom math import pi, sqrt\nfrom random import uniform\nfrom statistics import mean\n\n\n\ndef A_\t(\t\tA__\t\t\t\t) ->\t\tTuple:\n\n # A local function to see if a dot lands in the circle.\n def is_in_circle(A__\t\t, A__\t\t\t\t) -> bool:\n a__\t\t\t\t\t\t:\tList[str] =\t\t\t\tsqrt((x**2) + (y**2)\t\t\t\t)\n # Our circle has a radius of 1, so a distance\n # greater than 1 would land outside the circle.\n return distance_from_centre <= 1\n\n # The proportion of guesses that landed in the circle\n a__\t\t\t\t\t\t:\tList[str] =\t\t\t\tmean(\n int(is_in_circle(uniform(-1.0\t\t, 1.0\t\t\t\t)\t\t, uniform(-1.0\t\t, 1.0\t\t\t\t)\t\t\t\t)\t\t\t\t)\n for _ in range(A__\t\t\t\t)\t\t\t\t)\n # The ratio of the area for circle to square is pi/4.\n a__\t\t\t\t\t\t:\tOptional[Any] =\t\t\t\tproportion * 4\n print(F'The estimated value of pi is {pi_estimate}'\t\t\t\t)\n print(F'The numpy value of pi is {pi}'\t\t\t\t)\n print(F'The total error is {abs(pi - pi_estimate\t\t\t\t)}'\t\t\t\t)\n\n\n\ndef A_\t(\t\tA__\t\t, A__\t\t, A__ = 0.0\t\t, A__ = 1.0\t\t, ) ->\t\tfloat:\n return mean(\n function_to_integrate(uniform(A__\t\t, A__\t\t\t\t)\t\t\t\t) for _ in range(A__\t\t\t\t)\t\t\t\t) * (max_value - min_value)\n\n\n\ndef A_\t(\t\tA__\t\t, A__ = 0.0\t\t, A__ = 1.0\t\t\t\t) ->\t\tNone:\n\n def identity_function(A__\t\t\t\t) -> float:\n return x\n\n a__\t\t\t\t\t\t:\tList[Any] =\t\t\t\tarea_under_curve_estimator(\n A__\t\t, A__\t\t, A__\t\t, A__\t\t\t\t)\n a__\t\t\t\t\t\t:\tUnion[str, Any] =\t\t\t\t(max_value * max_value - min_value * min_value) / 2\n\n print('******************'\t\t\t\t)\n print(F'Estimating area under y=x where x varies from {min_value} to {max_value}'\t\t\t\t)\n print(F'Estimated value is {estimated_value}'\t\t\t\t)\n print(F'Expected value is {expected_value}'\t\t\t\t)\n print(F'Total error is {abs(estimated_value - expected_value\t\t\t\t)}'\t\t\t\t)\n print('******************'\t\t\t\t)\n\n\n\ndef A_\t(\t\tA__\t\t\t\t) ->\t\tNone:\n\n def function_to_integrate(A__\t\t\t\t) -> float:\n return sqrt(4.0 - x * x\t\t\t\t)\n\n a__\t\t\t\t\t\t:\tDict =\t\t\t\tarea_under_curve_estimator(\n A__\t\t, A__\t\t, 0.0\t\t, 2.0\t\t\t\t)\n\n print('******************'\t\t\t\t)\n print('Estimating pi using area_under_curve_estimator'\t\t\t\t)\n print(F'Estimated value is {estimated_value}'\t\t\t\t)\n print(F'Expected value is {pi}'\t\t\t\t)\n print(F'Total error is {abs(estimated_value - pi\t\t\t\t)}'\t\t\t\t)\n print('******************'\t\t\t\t)\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n\n\n"},"style_context_codestyle":{"kind":"number","value":99,"string":"99"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":897,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\nfrom typing import TYPE_CHECKING\n\nfrom ...utils import (\n OptionalDependencyNotAvailable,\n _LazyModule,\n is_tokenizers_available,\n is_torch_available,\n is_vision_available,\n)\n\n\n_UpperCAmelCase :\tint = {\n \"\"\"configuration_layoutlmv2\"\"\": [\"\"\"LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP\"\"\", \"\"\"LayoutLMv2Config\"\"\"],\n \"\"\"processing_layoutlmv2\"\"\": [\"\"\"LayoutLMv2Processor\"\"\"],\n \"\"\"tokenization_layoutlmv2\"\"\": [\"\"\"LayoutLMv2Tokenizer\"\"\"],\n}\n\ntry:\n if not is_tokenizers_available():\n raise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n pass\nelse:\n _UpperCAmelCase :\tOptional[int] = [\"\"\"LayoutLMv2TokenizerFast\"\"\"]\n\ntry:\n if not is_vision_available():\n raise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n pass\nelse:\n _UpperCAmelCase :\tint = [\"\"\"LayoutLMv2FeatureExtractor\"\"\"]\n _UpperCAmelCase :\tAny = [\"\"\"LayoutLMv2ImageProcessor\"\"\"]\n\ntry:\n if not is_torch_available():\n raise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n pass\nelse:\n _UpperCAmelCase :\tList[Any] = [\n \"\"\"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST\"\"\",\n \"\"\"LayoutLMv2ForQuestionAnswering\"\"\",\n \"\"\"LayoutLMv2ForSequenceClassification\"\"\",\n \"\"\"LayoutLMv2ForTokenClassification\"\"\",\n \"\"\"LayoutLMv2Layer\"\"\",\n \"\"\"LayoutLMv2Model\"\"\",\n \"\"\"LayoutLMv2PreTrainedModel\"\"\",\n ]\n\nif TYPE_CHECKING:\n from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig\n from .processing_layoutlmva import LayoutLMvaProcessor\n from .tokenization_layoutlmva import LayoutLMvaTokenizer\n\n try:\n if not is_tokenizers_available():\n raise OptionalDependencyNotAvailable()\n except OptionalDependencyNotAvailable:\n pass\n else:\n from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast\n\n try:\n if not is_vision_available():\n raise OptionalDependencyNotAvailable()\n except OptionalDependencyNotAvailable:\n pass\n else:\n from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor\n\n try:\n if not is_torch_available():\n raise OptionalDependencyNotAvailable()\n except OptionalDependencyNotAvailable:\n pass\n else:\n from .modeling_layoutlmva import (\n LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,\n LayoutLMvaForQuestionAnswering,\n LayoutLMvaForSequenceClassification,\n LayoutLMvaForTokenClassification,\n LayoutLMvaLayer,\n LayoutLMvaModel,\n LayoutLMvaPreTrainedModel,\n )\nelse:\n import sys\n\n _UpperCAmelCase :\tDict = _LazyModule(__name__, globals()[\"\"\"__file__\"\"\"], _import_structure, module_spec=__spec__)"},"code_codestyle":{"kind":"number","value":365,"string":"365"},"style_context":{"kind":"string","value":"\n\n\n\n\n\nfrom collections import deque\n\nclass lowerCAmelCase\t\t\t\t\t\t\t:\n def __init__(\t\t\t\tself :\t\t\t\tstr ,\t\t\t\t\tUpperCAmelCase :\t\t\t\tstr ,\t\t\t\t\tUpperCAmelCase :\t\t\t\tint ,\t\t\t\t\tUpperCAmelCase :\t\t\t\tint ) -> None:\n lowerCamelCase__ :\t\t\t\tOptional[int] \t\t\t=\t\tprocess_name # process name\n lowerCamelCase__ :\t\t\t\tOptional[int] \t\t\t=\t\tarrival_time # arrival time of the process\n # completion time of finished process or last interrupted time\n lowerCamelCase__ :\t\t\t\tstr \t\t\t=\t\tarrival_time\n lowerCamelCase__ :\t\t\t\tList[Any] \t\t\t=\t\tburst_time # remaining burst time\n lowerCamelCase__ :\t\t\t\tAny \t\t\t=\t\t0 # total time of the process wait in ready queue\n lowerCamelCase__ :\t\t\t\tTuple \t\t\t=\t\t0 # time from arrival time to completion time\n\n\nclass lowerCAmelCase\t\t\t\t\t\t\t:\n def __init__(\t\t\t\tself :\t\t\t\tList[str] ,\t\t\t\t\tUpperCAmelCase :\t\t\t\tint ,\t\t\t\t\tUpperCAmelCase :\t\t\t\tlist[int] ,\t\t\t\t\tUpperCAmelCase :\t\t\t\tdeque[Process] ,\t\t\t\t\tUpperCAmelCase :\t\t\t\tint ,\t\t\t\t\t) -> None:\n # total number of mlfq's queues\n lowerCamelCase__ :\t\t\t\tOptional[int] \t\t\t=\t\tnumber_of_queues\n # time slice of queues that round robin algorithm applied\n lowerCamelCase__ :\t\t\t\tList[str] \t\t\t=\t\ttime_slices\n # unfinished process is in this ready_queue\n lowerCamelCase__ :\t\t\t\tList[str] \t\t\t=\t\tqueue\n # current time\n lowerCamelCase__ :\t\t\t\tOptional[Any] \t\t\t=\t\tcurrent_time\n # finished process is in this sequence queue\n lowerCamelCase__ :\t\t\t\tdeque[Process] \t\t\t=\t\tdeque()\n def \t\t\t\t\tA_ (\t\t\t\tself :\t\t\t\tTuple ) -> list[str]:\n lowerCamelCase__ :\t\t\t\tUnion[str, Any] \t\t\t=\t\t[]\n for i in range(len(self.finish_queue ) ):\n sequence.append(self.finish_queue[i].process_name )\n return sequence\n def \t\t\t\t\tA_ (\t\t\t\tself :\t\t\t\tTuple ,\t\t\t\t\tUpperCAmelCase :\t\t\t\tlist[Process] ) -> list[int]:\n lowerCamelCase__ :\t\t\t\tTuple \t\t\t=\t\t[]\n for i in range(len(UpperCAmelCase ) ):\n waiting_times.append(queue[i].waiting_time )\n return waiting_times\n def \t\t\t\t\tA_ (\t\t\t\tself :\t\t\t\tUnion[str, Any] ,\t\t\t\t\tUpperCAmelCase :\t\t\t\tlist[Process] ) -> list[int]:\n lowerCamelCase__ :\t\t\t\tint \t\t\t=\t\t[]\n for i in range(len(UpperCAmelCase ) ):\n turnaround_times.append(queue[i].turnaround_time )\n return turnaround_times\n def \t\t\t\t\tA_ (\t\t\t\tself :\t\t\t\tOptional[int] ,\t\t\t\t\tUpperCAmelCase :\t\t\t\tlist[Process] ) -> list[int]:\n lowerCamelCase__ :\t\t\t\tTuple \t\t\t=\t\t[]\n for i in range(len(UpperCAmelCase ) ):\n completion_times.append(queue[i].stop_time )\n return completion_times\n def \t\t\t\t\tA_ (\t\t\t\tself :\t\t\t\tstr ,\t\t\t\t\tUpperCAmelCase :\t\t\t\tdeque[Process] ) -> list[int]:\n return [q.burst_time for q in queue]\n def \t\t\t\t\tA_ (\t\t\t\tself :\t\t\t\tint ,\t\t\t\t\tUpperCAmelCase :\t\t\t\tProcess ) -> int:\n process.waiting_time += self.current_time - process.stop_time\n return process.waiting_time\n def \t\t\t\t\tA_ (\t\t\t\tself :\t\t\t\tOptional[int] ,\t\t\t\t\tUpperCAmelCase :\t\t\t\tdeque[Process] ) -> deque[Process]:\n lowerCamelCase__ :\t\t\t\tdeque[Process] \t\t\t=\t\tdeque() # sequence deque of finished process\n while len(UpperCAmelCase ) != 0:\n lowerCamelCase__ :\t\t\t\tList[Any] \t\t\t=\t\tready_queue.popleft() # current process\n\n # if process's arrival time is later than current time, update current time\n if self.current_time < cp.arrival_time:\n self.current_time += cp.arrival_time\n\n # update waiting time of current process\n self.update_waiting_time(UpperCAmelCase )\n # update current time\n self.current_time += cp.burst_time\n # finish the process and set the process's burst-time 0\n lowerCamelCase__ :\t\t\t\tOptional[int] \t\t\t=\t\t0\n # set the process's turnaround time because it is finished\n lowerCamelCase__ :\t\t\t\tUnion[str, Any] \t\t\t=\t\tself.current_time - cp.arrival_time\n # set the completion time\n lowerCamelCase__ :\t\t\t\tAny \t\t\t=\t\tself.current_time\n # add the process to queue that has finished queue\n finished.append(UpperCAmelCase )\n\n self.finish_queue.extend(UpperCAmelCase ) # add finished process to finish queue\n # FCFS will finish all remaining processes\n return finished\n def \t\t\t\t\tA_ (\t\t\t\tself :\t\t\t\tstr ,\t\t\t\t\tUpperCAmelCase :\t\t\t\tdeque[Process] ,\t\t\t\t\tUpperCAmelCase :\t\t\t\tint ) -> tuple[deque[Process], deque[Process]]:\n lowerCamelCase__ :\t\t\t\tdeque[Process] \t\t\t=\t\tdeque() # sequence deque of terminated process\n # just for 1 cycle and unfinished processes will go back to queue\n for _ in range(len(UpperCAmelCase ) ):\n lowerCamelCase__ :\t\t\t\tDict \t\t\t=\t\tready_queue.popleft() # current process\n\n # if process's arrival time is later than current time, update current time\n if self.current_time < cp.arrival_time:\n self.current_time += cp.arrival_time\n\n # update waiting time of unfinished processes\n self.update_waiting_time(UpperCAmelCase )\n # if the burst time of process is bigger than time-slice\n if cp.burst_time > time_slice:\n # use CPU for only time-slice\n self.current_time += time_slice\n # update remaining burst time\n cp.burst_time -= time_slice\n # update end point time\n lowerCamelCase__ :\t\t\t\tList[str] \t\t\t=\t\tself.current_time\n # locate the process behind the queue because it is not finished\n ready_queue.append(UpperCAmelCase )\n else:\n # use CPU for remaining burst time\n self.current_time += cp.burst_time\n # set burst time 0 because the process is finished\n lowerCamelCase__ :\t\t\t\tAny \t\t\t=\t\t0\n # set the finish time\n lowerCamelCase__ :\t\t\t\tint \t\t\t=\t\tself.current_time\n # update the process' turnaround time because it is finished\n lowerCamelCase__ :\t\t\t\tDict \t\t\t=\t\tself.current_time - cp.arrival_time\n # add the process to queue that has finished queue\n finished.append(UpperCAmelCase )\n\n self.finish_queue.extend(UpperCAmelCase ) # add finished process to finish queue\n # return finished processes queue and remaining processes queue\n return finished, ready_queue\n\n\n\n\n\n\n\n def \t\t\t\t\tA_ (\t\t\t\tself :\t\t\t\tDict ) -> deque[Process]:\n\n # all queues except last one have round_robin algorithm\n for i in range(self.number_of_queues - 1 ):\n lowerCamelCase__\t\t\t,\t\t\t\t\t\t\tlowerCamelCase__ :\t\t\t\tAny \t\t\t=\t\tself.round_robin(\n self.ready_queue ,\t\t\t\t\tself.time_slices[i] )\n # the last queue has first_come_first_served algorithm\n self.first_come_first_served(self.ready_queue )\n\n return self.finish_queue\n\n\nif __name__ == \"__main__\":\n import doctest\n\n _UpperCAmelCase :\tList[str] = Process(\"\"\"P1\"\"\", 0, 53)\n _UpperCAmelCase :\tUnion[str, Any] = Process(\"\"\"P2\"\"\", 0, 17)\n _UpperCAmelCase :\tint = Process(\"\"\"P3\"\"\", 0, 68)\n _UpperCAmelCase :\tstr = Process(\"\"\"P4\"\"\", 0, 24)\n _UpperCAmelCase :\tOptional[int] = 3\n _UpperCAmelCase :\tOptional[Any] = [17, 25]\n _UpperCAmelCase :\tOptional[int] = deque([Pa, Pa, Pa, Pa])\n\n if len(time_slices) != number_of_queues - 1:\n raise SystemExit(0)\n\n doctest.testmod(extraglobs={\"\"\"queue\"\"\": deque([Pa, Pa, Pa, Pa])})\n\n _UpperCAmelCase :\tTuple = Process(\"\"\"P1\"\"\", 0, 53)\n _UpperCAmelCase :\tAny = Process(\"\"\"P2\"\"\", 0, 17)\n _UpperCAmelCase :\tAny = Process(\"\"\"P3\"\"\", 0, 68)\n _UpperCAmelCase :\tList[Any] = Process(\"\"\"P4\"\"\", 0, 24)\n _UpperCAmelCase :\tList[str] = 3\n _UpperCAmelCase :\tOptional[int] = [17, 25]\n _UpperCAmelCase :\tOptional[int] = deque([Pa, Pa, Pa, Pa])\n _UpperCAmelCase :\tUnion[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)\n _UpperCAmelCase :\tDict = mlfq.multi_level_feedback_queue()\n\n # print total waiting times of processes(P1, P2, P3, P4)\n print(\n F\"\"\"waiting time:\\\n \\t\\t\\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}\"\"\"\n )\n # print completion times of processes(P1, P2, P3, P4)\n print(\n F\"\"\"completion time:\\\n \\t\\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}\"\"\"\n )\n # print total turnaround times of processes(P1, P2, P3, P4)\n print(\n F\"\"\"turnaround time:\\\n \\t\\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}\"\"\"\n )\n # print sequence of finished processes\n print(\n F\"\"\"sequence of finished processes:\\\n {mlfq.calculate_sequence_of_finish_queue()}\"\"\"\n )\n\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":45,"string":"45"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":898,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\ndef lowercase_ ( _snake_case\t\t\t\t\t\t):\r\n\t\t\t\t\t\t\tif a < 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\"\"\"Input value must be a positive integer\"\"\"\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\telif isinstance(_snake_case ,_snake_case\t\t\t\t\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\traise TypeError(\"\"\"Input value must be a 'int' type\"\"\"\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\treturn bin(_snake_case\t\t\t\t\t\t).count(\"\"\"1\"\"\"\t\t\t\t\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t\timport doctest\r\n\r\n\t\t\t\t\tdoctest.testmod()\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":25,"string":"25"},"style_context":{"kind":"string","value":"\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport warnings\r\n\r\nfrom ...processing_utils import ProcessorMixin\r\nfrom ...tokenization_utils_base import BatchEncoding\r\n\r\nclass \t\tUpperCamelCase\t\t\t( lowercase\t\t\t\t\t\t):\r\n\t\t\t\tUpperCAmelCase\t\t\t\t: Optional[Any] =\t\t[\"\"\"image_processor\"\"\", \"\"\"tokenizer\"\"\"]\r\n\t\t\t\tUpperCAmelCase\t\t\t\t: Dict =\t\t\"\"\"CLIPImageProcessor\"\"\"\r\n\t\t\t\tUpperCAmelCase\t\t\t\t: Dict =\t\t(\"\"\"XLMRobertaTokenizer\"\"\", \"\"\"XLMRobertaTokenizerFast\"\"\")\r\n\r\n\r\n\r\n\t\t\t\tdef __init__(self\t\t\t\t: Union[str, Any]\t, _A\t\t\t\t: Dict=None\t, _A\t\t\t\t: Tuple=None\t, **_A\t\t\t\t: Optional[int]) ->\tOptional[int]:\r\n\t\t\t\t\t__snake_case\t\t\t:\t\t\t\t\t\t\tstr \t\t= None\r\n\t\t\t\t\tif \"feature_extractor\" in kwargs:\r\n\t\t\t\t\t\twarnings.warn(\r\n\t\t\t\t\t\t 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'\r\n\t\t\t\t\t\t ' instead.'\t, _A\t, )\r\n\t\t\t\t\t\t__snake_case\t\t\t:\t\t\t\t\t\t\tList[Any] \t\t= kwargs.pop('feature_extractor')\r\n\r\n\t\t\t\t\t__snake_case\t\t\t:\t\t\t\t\t\t\tint \t\t= image_processor if image_processor is not None else feature_extractor\r\n\t\t\t\t\tif image_processor is None:\r\n\t\t\t\t\t\traise ValueError('You need to specify an `image_processor`.')\r\n\t\t\t\t\tif tokenizer is None:\r\n\t\t\t\t\t\traise ValueError('You need to specify a `tokenizer`.')\r\n\r\n\t\t\t\t\tsuper().__init__(_A\t, _A)\r\n\r\n\r\n\r\n\t\t\t\tdef __call__(self\t\t\t\t: Dict\t, _A\t\t\t\t: Tuple=None\t, _A\t\t\t\t: Optional[int]=None\t, _A\t\t\t\t: Tuple=None\t, **_A\t\t\t\t: Any) ->\tint:\r\n\r\n\t\t\t\t\tif text is None and images is None:\r\n\t\t\t\t\t\traise ValueError('You have to specify either text or images. Both cannot be none.')\r\n\r\n\t\t\t\t\tif text is not None:\r\n\t\t\t\t\t\t__snake_case\t\t\t:\t\t\t\t\t\t\tList[str] \t\t= self.tokenizer(_A\t, return_tensors=_A\t, **_A)\r\n\r\n\t\t\t\t\tif images is not None:\r\n\t\t\t\t\t\t__snake_case\t\t\t:\t\t\t\t\t\t\tAny \t\t= self.image_processor(_A\t, return_tensors=_A\t, **_A)\r\n\r\n\t\t\t\t\tif text is not None and images is not None:\r\n\t\t\t\t\t\t__snake_case\t\t\t:\t\t\t\t\t\t\tAny \t\t= image_features.pixel_values\r\n\t\t\t\t\t\treturn encoding\r\n\t\t\t\t\telif text is not None:\r\n\t\t\t\t\t\treturn encoding\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\treturn BatchEncoding(data=dict(**_A)\t, tensor_type=_A)\r\n\r\n\r\n\r\n\t\t\t\tdef \t\t\t\t\t_lowercase (self\t\t\t\t: List[Any]\t, *_A\t\t\t\t: Dict\t, **_A\t\t\t\t: int) ->\tint:\r\n\t\t\t\t\treturn self.tokenizer.batch_decode(*_A\t, **_A)\r\n\r\n\r\n\r\n\t\t\t\tdef \t\t\t\t\t_lowercase (self\t\t\t\t: List[Any]\t, *_A\t\t\t\t: Union[str, Any]\t, **_A\t\t\t\t: Tuple) ->\tOptional[Any]:\r\n\t\t\t\t\treturn self.tokenizer.decode(*_A\t, **_A)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t@property\r\n\t\t\t\tdef \t\t\t\t\t_lowercase (self\t\t\t\t: Union[str, Any]) ->\tList[Any]:\r\n\t\t\t\t\t__snake_case\t\t\t:\t\t\t\t\t\t\tDict \t\t= self.tokenizer.model_input_names\r\n\t\t\t\t\t__snake_case\t\t\t:\t\t\t\t\t\t\tint \t\t= self.image_processor.model_input_names\r\n\t\t\t\t\treturn list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":172,"string":"172"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":899,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\nfrom fractions import Fraction\r\ndef \t\t\t\t\tlowerCamelCase (\t\t\t\t\t\ta_\t\t\t,\t\t\t\t\t\ta_ ) -> bool:\r\n\t\t\t\t\t\t\treturn (\r\n\t\t\t\t\t\t\t num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den\r\n\t\t\t\t\t\t\t)\r\ndef \t\t\t\t\tlowerCamelCase (\t\t\t\t\t\ta_ ) -> list[str]:\r\n\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t\t\t\t=\t\t\t\t\t[]\r\n\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t\t\t\t=\t\t\t\t\t11\r\n\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t\t\t\t=\t\t\t\t\tint('1' + '0' * digit_len )\r\n\t\t\t\t\t\t\tfor num in range(a_\t\t\t,\t\t\t\t\t\ta_ ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\twhile den <= 99:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif (num != den) and (num % 10 == den // 10) and (den % 10 != 0):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif is_digit_cancelling(a_\t\t\t,\t\t\t\t\t\ta_ ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsolutions.append(F'''{num}/{den}''' )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tden += 1\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tnum += 1\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t\t\t\t=\t\t\t\t\t10\r\n\t\t\t\t\t\t\treturn solutions\r\ndef \t\t\t\t\tlowerCamelCase (\t\t\t\t\t\ta_ = 2 ) -> int:\r\n\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t\t\t\t=\t\t\t\t\t1.0\r\n\t\t\t\t\t\t\tfor fraction in fraction_list(a_ ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_ \t\t\t\t\t\t=\t\t\t\t\tFraction(a_ )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tresult *= frac.denominator / frac.numerator\r\n\t\t\t\t\t\t\treturn int(a_ )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t\t\tprint(solution())\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":352,"string":"352"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport warnings\r\n\r\nfrom ...utils import logging\r\nfrom .image_processing_poolformer import PoolFormerImageProcessor\r\n\r\n\r\nlowerCamelCase_\t\t\t\t\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\t\t\ta_\t\t\t(\t\t\t\t\t\ta_\t\t):\r\n\r\n\r\n\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tdef __init__(\t\tself , *lowercase_ , **lowercase_ ) -> None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\twarnings.warn(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ' Please use PoolFormerImageProcessor instead.' , lowercase_ , )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsuper().__init__(*lowercase_ , **lowercase_ )\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":14,"string":"14"},"label":{"kind":"number","value":0,"string":"0"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":8,"numItemsPerPage":100,"numTotalItems":153992,"offset":800,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NTg0MzYzMSwic3ViIjoiL2RhdGFzZXRzL2luZmluaXR5b2ZzcGFjZS9weXRob25fY29kZXN0eWxlcy1taXhlZDEtNTAwIiwiZXhwIjoxNzU1ODQ3MjMxLCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0._AzlN5-_bcOvg5DpyAa-u8KV8werRpCsCwnFi6YdzwWS7qT4oqHeVx6z1lt97wz8XbejXf08wvpW2UUcVto-DA","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">

code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __lowercase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ['''ViTFeatureExtractor'''] __lowercase = ['''ViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTForImageClassification''', '''ViTForMaskedImageModeling''', '''ViTModel''', '''ViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''TFViTForImageClassification''', '''TFViTModel''', '''TFViTPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''FlaxViTForImageClassification''', '''FlaxViTModel''', '''FlaxViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
43
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __lowercase = logging.get_logger(__name__) __lowercase = {'''tokenizer_file''': '''tokenizer.json'''} __lowercase = { '''tokenizer_file''': { '''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''', '''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''', '''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''', '''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''', '''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''', '''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''', '''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''', }, } class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' a__ : int = VOCAB_FILES_NAMES a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP a__ : List[str] = ["""input_ids""", """attention_mask"""] a__ : int = None def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase=False , __lowercase=False , **__lowercase , ) -> List[str]: super().__init__( __lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , add_prefix_space=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , ) __UpperCamelCase :int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('''add_prefix_space''' , __lowercase) != add_prefix_space: __UpperCamelCase :Any = getattr(__lowercase , pre_tok_state.pop('''type''')) __UpperCamelCase :str = add_prefix_space __UpperCamelCase :List[str] = pre_tok_class(**__lowercase) __UpperCamelCase :Tuple = add_prefix_space def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding: __UpperCamelCase :Tuple = kwargs.get('''is_split_into_words''' , __lowercase) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with""" ''' pretokenized inputs.''') return super()._batch_encode_plus(*__lowercase , **__lowercase) def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding: __UpperCamelCase :List[str] = kwargs.get('''is_split_into_words''' , __lowercase) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with""" ''' pretokenized inputs.''') return super()._encode_plus(*__lowercase , **__lowercase) def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]: __UpperCamelCase :Optional[Any] = self._tokenizer.model.save(__lowercase , name=__lowercase) return tuple(__lowercase) def UpperCamelCase__ ( self , __lowercase) -> List[int]: __UpperCamelCase :str = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase) + [self.eos_token_id]) if len(__lowercase) > self.model_max_length: __UpperCamelCase :Any = input_ids[-self.model_max_length :] return input_ids
43
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ : List[Any] = logging.get_logger(__name__) snake_case__ : int = { '''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''', } class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ): '''simple docstring''' lowerCamelCase_ :List[Any] = '''gpt_bigcode''' lowerCamelCase_ :Any = ['''past_key_values'''] lowerCamelCase_ :Optional[Any] = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , snake_case_=5_0_2_5_7 , snake_case_=1_0_2_4 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=None , snake_case_="gelu_pytorch_tanh" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_=True , snake_case_=True , snake_case_=5_0_2_5_6 , snake_case_=5_0_2_5_6 , snake_case_=True , snake_case_=True , snake_case_=True , **snake_case_ , ): '''simple docstring''' UpperCAmelCase_ : Dict = vocab_size UpperCAmelCase_ : Tuple = n_positions UpperCAmelCase_ : Any = n_embd UpperCAmelCase_ : int = n_layer UpperCAmelCase_ : Optional[int] = n_head UpperCAmelCase_ : str = n_inner UpperCAmelCase_ : str = activation_function UpperCAmelCase_ : Union[str, Any] = resid_pdrop UpperCAmelCase_ : Union[str, Any] = embd_pdrop UpperCAmelCase_ : List[Any] = attn_pdrop UpperCAmelCase_ : Optional[Any] = layer_norm_epsilon UpperCAmelCase_ : Union[str, Any] = initializer_range UpperCAmelCase_ : Any = scale_attn_weights UpperCAmelCase_ : List[str] = use_cache UpperCAmelCase_ : Optional[Any] = attention_softmax_in_fpaa UpperCAmelCase_ : List[str] = scale_attention_softmax_in_fpaa UpperCAmelCase_ : Any = multi_query UpperCAmelCase_ : str = bos_token_id UpperCAmelCase_ : List[Any] = eos_token_id super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
274
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig snake_case__ : Dict = { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''', } class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ): '''simple docstring''' lowerCamelCase_ :str = '''albert''' def __init__( self , snake_case_=3_0_0_0_0 , snake_case_=1_2_8 , snake_case_=4_0_9_6 , snake_case_=1_2 , snake_case_=1 , snake_case_=6_4 , snake_case_=1_6_3_8_4 , snake_case_=1 , snake_case_="gelu_new" , snake_case_=0 , snake_case_=0 , snake_case_=5_1_2 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.1 , snake_case_="absolute" , snake_case_=0 , snake_case_=2 , snake_case_=3 , **snake_case_ , ): '''simple docstring''' super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ ) UpperCAmelCase_ : List[Any] = vocab_size UpperCAmelCase_ : Dict = embedding_size UpperCAmelCase_ : str = hidden_size UpperCAmelCase_ : Any = num_hidden_layers UpperCAmelCase_ : Union[str, Any] = num_hidden_groups UpperCAmelCase_ : List[str] = num_attention_heads UpperCAmelCase_ : Any = inner_group_num UpperCAmelCase_ : Optional[int] = hidden_act UpperCAmelCase_ : Tuple = intermediate_size UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : List[str] = attention_probs_dropout_prob UpperCAmelCase_ : Union[str, Any] = max_position_embeddings UpperCAmelCase_ : Dict = type_vocab_size UpperCAmelCase_ : Union[str, Any] = initializer_range UpperCAmelCase_ : Optional[Any] = layer_norm_eps UpperCAmelCase_ : Dict = classifier_dropout_prob UpperCAmelCase_ : Tuple = position_embedding_type class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ): '''simple docstring''' @property def _UpperCamelCase ( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase_ : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: UpperCAmelCase_ : List[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
274
1
'''simple docstring''' # Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = { '''en''': '''Machine learning is great, isn\'t it?''', '''ru''': '''Машинное обучение - это здорово, не так ли?''', '''de''': '''Maschinelles Lernen ist großartig, oder?''', } # BLUE scores as follows: # "pair": [fairseq, transformers] __lowerCAmelCase = { '''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''], '''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''], '''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''], '''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''], } __lowerCAmelCase = F"""{src_lang}-{tgt_lang}""" __lowerCAmelCase = F""" --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = \"{texts[src_lang]}\" input_ids = tokenizer.encode(input, return_tensors=\"pt\") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR's WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) """ os.makedirs(lowerCamelCase, exist_ok=lowerCamelCase) __lowerCAmelCase = os.path.join(lowerCamelCase, '''README.md''') print(F"""Generating {path}""") with open(lowerCamelCase, '''w''', encoding='''utf-8''') as f: f.write(lowerCamelCase) # make sure we are under the root of the project _UpperCAmelCase : Dict = Path(__file__).resolve().parent.parent.parent _UpperCAmelCase : Optional[int] = repo_dir / """model_cards""" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = model_name.split("""-""") _UpperCAmelCase : Union[str, Any] = model_cards_dir / """facebook""" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
174
'''simple docstring''' import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int _UpperCAmelCase : int = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class a__ ( datasets.BuilderConfig ): """simple docstring""" __UpperCamelCase : Optional[datasets.Features] = None def __magic_name__( lowerCamelCase, lowerCamelCase, ): import pyspark def generate_fn(): __lowerCAmelCase = df.select('''*''', pyspark.sql.functions.spark_partition_id().alias('''part_id''')) for partition_id in partition_order: __lowerCAmelCase = df_with_partition_id.select('''*''').where(F"""part_id = {partition_id}""").drop('''part_id''') __lowerCAmelCase = partition_df.collect() __lowerCAmelCase = 0 for row in rows: yield F"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class a__ ( _BaseExamplesIterable ): """simple docstring""" def __init__(self , __lowercase , __lowercase=None , ): __lowerCAmelCase = df __lowerCAmelCase = partition_order or range(self.df.rdd.getNumPartitions() ) __lowerCAmelCase = _generate_iterable_examples(self.df , self.partition_order ) def __iter__(self ): yield from self.generate_examples_fn() def _snake_case (self , __lowercase ): __lowerCAmelCase = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(__lowercase ) return SparkExamplesIterable(self.df , partition_order=__lowercase ) def _snake_case (self , __lowercase , __lowercase ): __lowerCAmelCase = self.split_shard_indices_by_worker(__lowercase , __lowercase ) return SparkExamplesIterable(self.df , partition_order=__lowercase ) @property def _snake_case (self ): return len(self.partition_order ) class a__ ( datasets.DatasetBuilder ): """simple docstring""" __UpperCamelCase : int = SparkConfig def __init__(self , __lowercase , __lowercase = None , __lowercase = None , **__lowercase , ): import pyspark __lowerCAmelCase = pyspark.sql.SparkSession.builder.getOrCreate() __lowerCAmelCase = df __lowerCAmelCase = working_dir super().__init__( cache_dir=__lowercase , config_name=str(self.df.semanticHash() ) , **__lowercase , ) def _snake_case (self ): # Returns the path of the created file. def create_cache_and_write_probe(__lowercase ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=__lowercase ) __lowerCAmelCase = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(__lowercase , '''a''' ) return [probe_file] if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: __lowerCAmelCase = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__lowercase ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( '''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' ) def _snake_case (self ): return datasets.DatasetInfo(features=self.config.features ) def _snake_case (self , __lowercase ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def _snake_case (self , __lowercase ): import pyspark def get_arrow_batch_size(__lowercase ): for batch in it: yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} ) __lowerCAmelCase = self.df.count() __lowerCAmelCase = df_num_rows if df_num_rows <= 1_00 else 1_00 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. __lowerCAmelCase = ( self.df.limit(__lowercase ) .repartition(1 ) .mapInArrow(__lowercase , '''batch_bytes: long''' ) .agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) ) .collect()[0] .sample_bytes / sample_num_rows ) __lowerCAmelCase = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. __lowerCAmelCase = min(__lowercase , int(approx_total_size / max_shard_size ) ) __lowerCAmelCase = self.df.repartition(__lowercase ) def _snake_case (self , __lowercase , __lowercase , __lowercase , ): import pyspark __lowerCAmelCase = ParquetWriter if file_format == '''parquet''' else ArrowWriter __lowerCAmelCase = os.path.join(self._working_dir , os.path.basename(__lowercase ) ) if self._working_dir else fpath __lowerCAmelCase = file_format == '''parquet''' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. __lowerCAmelCase = self.config.features __lowerCAmelCase = self._writer_batch_size __lowerCAmelCase = self._fs.storage_options def write_arrow(__lowercase ): # Within the same SparkContext, no two task attempts will share the same attempt ID. __lowerCAmelCase = pyspark.TaskContext().taskAttemptId() __lowerCAmelCase = next(__lowercase , __lowercase ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) __lowerCAmelCase = 0 __lowerCAmelCase = writer_class( features=__lowercase , path=working_fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , writer_batch_size=__lowercase , storage_options=__lowercase , embed_local_files=__lowercase , ) __lowerCAmelCase = pa.Table.from_batches([first_batch] ) writer.write_table(__lowercase ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: __lowerCAmelCase , __lowerCAmelCase = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) shard_id += 1 __lowerCAmelCase = writer_class( features=writer._features , path=working_fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , writer_batch_size=__lowercase , storage_options=__lowercase , embed_local_files=__lowercase , ) __lowerCAmelCase = pa.Table.from_batches([batch] ) writer.write_table(__lowercase ) if writer._num_bytes > 0: __lowerCAmelCase , __lowerCAmelCase = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(__lowercase ) ): __lowerCAmelCase = os.path.join(os.path.dirname(__lowercase ) , os.path.basename(__lowercase ) ) shutil.move(__lowercase , __lowercase ) __lowerCAmelCase = ( self.df.mapInArrow(__lowercase , '''task_id: long, num_examples: long, num_bytes: long''' ) .groupBy('''task_id''' ) .agg( pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def _snake_case (self , __lowercase , __lowercase = "arrow" , __lowercase = None , __lowercase = None , **__lowercase , ): self._validate_cache_dir() __lowerCAmelCase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(__lowercase ) __lowerCAmelCase = not is_remote_filesystem(self._fs ) __lowerCAmelCase = os.path.join if is_local else posixpath.join __lowerCAmelCase = '''-TTTTT-SSSSS-of-NNNNN''' __lowerCAmelCase = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" __lowerCAmelCase = path_join(self._output_dir , __lowercase ) __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = [] __lowerCAmelCase = [] for task_id, content in self._prepare_split_single(__lowercase , __lowercase , __lowercase ): ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(__lowercase ) __lowerCAmelCase = total_num_examples __lowerCAmelCase = total_num_bytes # should rename everything at the end logger.debug(F"""Renaming {total_shards} shards.""" ) if total_shards > 1: __lowerCAmelCase = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. __lowerCAmelCase = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( __lowercase , __lowercase , __lowercase , ): rename( __lowercase , fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , fpath.replace('''TTTTT-SSSSS''' , F"""{global_shard_id:05d}""" ).replace('''NNNNN''' , F"""{total_shards:05d}""" ) , ) __lowerCAmelCase = [] __lowerCAmelCase = 0 for i in range(len(__lowercase ) ): __lowerCAmelCase , __lowerCAmelCase = task_id_and_num_shards[i] for shard_id in range(__lowercase ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(__lowercase , len(__lowercase ) ).map(lambda __lowercase : _rename_shard(*__lowercase ) ).collect() else: # don't use any pattern __lowerCAmelCase = 0 __lowerCAmelCase = task_id_and_num_shards[0][0] self._rename( fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , fpath.replace(__lowercase , '''''' ) , ) def _snake_case (self , __lowercase , ): return SparkExamplesIterable(self.df )
174
1
'''simple docstring''' import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = ['''image_processor''', '''tokenizer'''] snake_case = '''BlipImageProcessor''' snake_case = '''AutoTokenizer''' def __init__( self : str , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Any ): '''simple docstring''' super().__init__(__UpperCAmelCase , __UpperCAmelCase ) # add QFormer tokenizer _A = qformer_tokenizer def __call__( self : List[Any] , __UpperCAmelCase : ImageInput = None , __UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : int = 0 , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , **__UpperCAmelCase : Union[str, Any] , ): '''simple docstring''' if images is None and text is None: raise ValueError("You have to specify at least images or text." ) _A = BatchFeature() if text is not None: _A = self.tokenizer( text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , ) encoding.update(__UpperCAmelCase ) _A = self.qformer_tokenizer( text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , ) _A = qformer_text_encoding.pop("input_ids" ) _A = qformer_text_encoding.pop("attention_mask" ) if images is not None: _A = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) encoding.update(__UpperCAmelCase ) return encoding def lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : int ): '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def lowerCAmelCase ( self : Any , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : Union[str, Any] ): '''simple docstring''' return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.tokenizer.model_input_names _A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ): '''simple docstring''' if os.path.isfile(__UpperCAmelCase ): raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase ) _A = os.path.join(__UpperCAmelCase , "qformer_tokenizer" ) self.qformer_tokenizer.save_pretrained(__UpperCAmelCase ) return super().save_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) @classmethod def lowerCAmelCase ( cls : Tuple , __UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = AutoTokenizer.from_pretrained(__UpperCAmelCase , subfolder="qformer_tokenizer" ) _A = cls._get_arguments_from_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) args.append(__UpperCAmelCase ) return cls(*__UpperCAmelCase )
174
'''simple docstring''' from collections import defaultdict from math import gcd def __lowercase ( __lowercase = 150_0000 ) -> int: '''simple docstring''' _A = defaultdict(__lowercase ) _A = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , __lowercase , 2 ): if gcd(__lowercase , __lowercase ) > 1: continue _A = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(__lowercase , limit + 1 , __lowercase ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F"""{solution() = }""")
174
1
import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def UpperCAmelCase_ ( __lowerCAmelCase = 8 ) -> str: __lowercase : Optional[int] = ascii_letters + digits + punctuation return "".join(secrets.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) ) def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> str: # Password Generator = full boot with random_number, random_letters, and # random_character FUNCTIONS # Put your code here... i -= len(__lowerCAmelCase ) __lowercase : Dict = i // 3 __lowercase : List[str] = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) __lowercase : int = ( chars_incl + random(__lowerCAmelCase , quotient + remainder ) + random(__lowerCAmelCase , __lowerCAmelCase ) + random(__lowerCAmelCase , __lowerCAmelCase ) ) __lowercase : List[str] = list(__lowerCAmelCase ) shuffle(__lowerCAmelCase ) return "".join(__lowerCAmelCase ) # random is a generalised function for letters, characters and numbers def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> str: return "".join(secrets.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) ) def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict: pass # Put your code here... def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple: pass # Put your code here... def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict: pass # Put your code here... def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = 8 ) -> bool: if len(__lowerCAmelCase ) < min_length: # Your Password must be at least 8 characters long return False __lowercase : Dict = any(char in ascii_uppercase for char in password ) __lowercase : str = any(char in ascii_lowercase for char in password ) __lowercase : Tuple = any(char in digits for char in password ) __lowercase : Optional[Any] = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def UpperCAmelCase_ ( ) -> Any: __lowercase : Optional[int] = int(input('''Please indicate the max length of your password: ''' ).strip() ) __lowercase : Optional[int] = input( '''Please indicate the characters that must be in your password: ''' ).strip() print('''Password generated:''' , password_generator(__lowerCAmelCase ) ) print( '''Alternative Password generated:''' , alternative_password_generator(__lowerCAmelCase , __lowerCAmelCase ) , ) print('''[If you are thinking of using this passsword, You better save it.]''' ) if __name__ == "__main__": main()
156
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch __lowerCAmelCase : int = logging.get_logger(__name__) class __lowerCAmelCase ( lowerCAmelCase_ ): """simple docstring""" A__ : Optional[Any] = ['''pixel_values'''] def __init__( self : Dict , _snake_case : bool = True , _snake_case : Optional[Dict[str, int]] = None , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : bool = True , _snake_case : Union[int, float] = 1 / 255 , _snake_case : bool = True , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , **_snake_case : Tuple , ): super().__init__(**_snake_case ) __lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 256} __lowercase : Dict = get_size_dict(_snake_case , default_to_square=_snake_case ) __lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __lowercase : Any = get_size_dict(_snake_case , param_name='''crop_size''' ) __lowercase : int = do_resize __lowercase : Union[str, Any] = size __lowercase : Optional[int] = resample __lowercase : str = do_center_crop __lowercase : str = crop_size __lowercase : Optional[int] = do_rescale __lowercase : str = rescale_factor __lowercase : List[Any] = do_normalize __lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __lowercase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD def snake_case_ ( self : List[str] , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : PILImageResampling = PILImageResampling.BICUBIC , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Any , ): __lowercase : Union[str, Any] = get_size_dict(_snake_case , default_to_square=_snake_case ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) __lowercase : Dict = get_resize_output_image_size(_snake_case , size=size['''shortest_edge'''] , default_to_square=_snake_case ) return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case ) def snake_case_ ( self : Union[str, Any] , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str , ): __lowercase : str = get_size_dict(_snake_case ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(_snake_case , size=(size['''height'''], size['''width''']) , data_format=_snake_case , **_snake_case ) def snake_case_ ( self : str , _snake_case : np.ndarray , _snake_case : float , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Any ): return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case ) def snake_case_ ( self : Any , _snake_case : np.ndarray , _snake_case : Union[float, List[float]] , _snake_case : Union[float, List[float]] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str , ): return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case ) def snake_case_ ( self : Optional[Any] , _snake_case : ImageInput , _snake_case : Optional[bool] = None , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = None , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[float] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_snake_case : int , ): __lowercase : List[str] = do_resize if do_resize is not None else self.do_resize __lowercase : str = size if size is not None else self.size __lowercase : Any = get_size_dict(_snake_case , default_to_square=_snake_case ) __lowercase : str = resample if resample is not None else self.resample __lowercase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __lowercase : List[str] = crop_size if crop_size is not None else self.crop_size __lowercase : int = get_size_dict(_snake_case , param_name='''crop_size''' ) __lowercase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale __lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor __lowercase : Any = do_normalize if do_normalize is not None else self.do_normalize __lowercase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean __lowercase : Dict = image_std if image_std is not None else self.image_std __lowercase : Any = make_list_of_images(_snake_case ) if not valid_images(_snake_case ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __lowercase : List[str] = [to_numpy_array(_snake_case ) for image in images] if do_resize: __lowercase : str = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images] if do_center_crop: __lowercase : Dict = [self.center_crop(image=_snake_case , size=_snake_case ) for image in images] if do_rescale: __lowercase : List[str] = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images] if do_normalize: __lowercase : str = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images] __lowercase : Dict = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images] __lowercase : List[str] = {'''pixel_values''': images} return BatchFeature(data=_snake_case , tensor_type=_snake_case ) def snake_case_ ( self : Optional[int] , _snake_case : str , _snake_case : List[Tuple] = None ): __lowercase : Tuple = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_snake_case ) != len(_snake_case ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(_snake_case ): __lowercase : str = target_sizes.numpy() __lowercase : Union[str, Any] = [] for idx in range(len(_snake_case ) ): __lowercase : Union[str, Any] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_snake_case ) __lowercase : Tuple = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_snake_case ) else: __lowercase : str = logits.argmax(dim=1 ) __lowercase : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
156
1
'''simple docstring''' def _lowerCamelCase ( lowerCamelCase_ : int ): """simple docstring""" UpperCAmelCase_ : str = len(lowerCamelCase_ ) while cur > 1: # Find the maximum number in arr UpperCAmelCase_ : Optional[Any] = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi UpperCAmelCase_ : Optional[Any] = arr[mi::-1] + arr[mi + 1 : len(lowerCamelCase_ )] # Reverse whole list UpperCAmelCase_ : Any = arr[cur - 1 :: -1] + arr[cur : len(lowerCamelCase_ )] cur -= 1 return arr if __name__ == "__main__": snake_case__ : List[Any] = input('''Enter numbers separated by a comma:\n''').strip() snake_case__ : List[Any] = [int(item) for item in user_input.split(''',''')] print(pancake_sort(unsorted))
354
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' @register_to_config def __init__( self , snake_case_ = 7_6_8 , ): '''simple docstring''' super().__init__() UpperCAmelCase_ : int = nn.Parameter(torch.zeros(1 , snake_case_ ) ) UpperCAmelCase_ : str = nn.Parameter(torch.ones(1 , snake_case_ ) ) def _UpperCamelCase ( self , snake_case_ = None , snake_case_ = None , ): '''simple docstring''' UpperCAmelCase_ : int = nn.Parameter(self.mean.to(snake_case_ ).to(snake_case_ ) ) UpperCAmelCase_ : Tuple = nn.Parameter(self.std.to(snake_case_ ).to(snake_case_ ) ) return self def _UpperCamelCase ( self , snake_case_ ): '''simple docstring''' UpperCAmelCase_ : Dict = (embeds - self.mean) * 1.0 / self.std return embeds def _UpperCamelCase ( self , snake_case_ ): '''simple docstring''' UpperCAmelCase_ : List[Any] = (embeds * self.std) + self.mean return embeds
274
0
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig lowerCamelCase_ = logging.get_logger(__name__) # General docstring lowerCamelCase_ = '''MobileNetV1Config''' # Base docstring lowerCamelCase_ = '''google/mobilenet_v1_1.0_224''' lowerCamelCase_ = [1, 10_24, 7, 7] # Image classification docstring lowerCamelCase_ = '''google/mobilenet_v1_1.0_224''' lowerCamelCase_ = '''tabby, tabby cat''' lowerCamelCase_ = [ '''google/mobilenet_v1_1.0_224''', '''google/mobilenet_v1_0.75_192''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def __lowercase ( __lowercase , __lowercase , __lowercase=None ) -> List[str]: '''simple docstring''' _A = {} if isinstance(__lowercase , __lowercase ): _A = model.mobilenet_va else: _A = model _A = "MobilenetV1/Conv2d_0/" _A = backbone.conv_stem.convolution.weight _A = backbone.conv_stem.normalization.bias _A = backbone.conv_stem.normalization.weight _A = backbone.conv_stem.normalization.running_mean _A = backbone.conv_stem.normalization.running_var for i in range(13 ): _A = i + 1 _A = i * 2 _A = backbone.layer[pt_index] _A = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/''' _A = pointer.convolution.weight _A = pointer.normalization.bias _A = pointer.normalization.weight _A = pointer.normalization.running_mean _A = pointer.normalization.running_var _A = backbone.layer[pt_index + 1] _A = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/''' _A = pointer.convolution.weight _A = pointer.normalization.bias _A = pointer.normalization.weight _A = pointer.normalization.running_mean _A = pointer.normalization.running_var if isinstance(__lowercase , __lowercase ): _A = "MobilenetV1/Logits/Conv2d_1c_1x1/" _A = model.classifier.weight _A = model.classifier.bias return tf_to_pt_map def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Dict: '''simple docstring''' try: import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise # Load weights from TF model _A = tf.train.list_variables(__lowercase ) _A = {} for name, shape in init_vars: logger.info(F'''Loading TF weight {name} with shape {shape}''' ) _A = tf.train.load_variable(__lowercase , __lowercase ) _A = array # Build TF to PyTorch weights loading map _A = _build_tf_to_pytorch_map(__lowercase , __lowercase , __lowercase ) for name, pointer in tf_to_pt_map.items(): logger.info(F'''Importing {name}''' ) if name not in tf_weights: logger.info(F'''{name} not in tf pre-trained weights, skipping''' ) continue _A = tf_weights[name] if "depthwise_weights" in name: logger.info("Transposing depthwise" ) _A = np.transpose(__lowercase , (2, 3, 0, 1) ) elif "weights" in name: logger.info("Transposing" ) if len(pointer.shape ) == 2: # copying into linear layer _A = array.squeeze().transpose() else: _A = np.transpose(__lowercase , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' ) logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' ) _A = torch.from_numpy(__lowercase ) tf_weights.pop(__lowercase , __lowercase ) tf_weights.pop(name + "/RMSProp" , __lowercase ) tf_weights.pop(name + "/RMSProp_1" , __lowercase ) tf_weights.pop(name + "/ExponentialMovingAverage" , __lowercase ) logger.info(F'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' ) return model def __lowercase ( __lowercase , __lowercase ) -> torch.Tensor: '''simple docstring''' _A , _A = features.shape[-2:] _A , _A = conv_layer.stride _A , _A = conv_layer.kernel_size if in_height % stride_height == 0: _A = max(kernel_height - stride_height , 0 ) else: _A = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: _A = max(kernel_width - stride_width , 0 ) else: _A = max(kernel_width - (in_width % stride_width) , 0 ) _A = pad_along_width // 2 _A = pad_along_width - pad_left _A = pad_along_height // 2 _A = pad_along_height - pad_top _A = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(__lowercase , __lowercase , "constant" , 0.0 ) class _UpperCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : int , __UpperCAmelCase : MobileNetVaConfig , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[bool] = True , __UpperCAmelCase : Optional[bool or str] = True , ): '''simple docstring''' super().__init__() _A = config if in_channels % groups != 0: raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''' ) if out_channels % groups != 0: raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''' ) _A = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) _A = nn.Convad( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=__UpperCAmelCase , stride=__UpperCAmelCase , padding=__UpperCAmelCase , groups=__UpperCAmelCase , bias=__UpperCAmelCase , padding_mode="zeros" , ) if use_normalization: _A = nn.BatchNormad( num_features=__UpperCAmelCase , eps=config.layer_norm_eps , momentum=0.9997 , affine=__UpperCAmelCase , track_running_stats=__UpperCAmelCase , ) else: _A = None if use_activation: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = ACTaFN[use_activation] elif isinstance(config.hidden_act , __UpperCAmelCase ): _A = ACTaFN[config.hidden_act] else: _A = config.hidden_act else: _A = None def lowerCAmelCase ( self : str , __UpperCAmelCase : torch.Tensor ): '''simple docstring''' if self.config.tf_padding: _A = apply_tf_padding(__UpperCAmelCase , self.convolution ) _A = self.convolution(__UpperCAmelCase ) if self.normalization is not None: _A = self.normalization(__UpperCAmelCase ) if self.activation is not None: _A = self.activation(__UpperCAmelCase ) return features class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = MobileNetVaConfig snake_case = load_tf_weights_in_mobilenet_va snake_case = '''mobilenet_v1''' snake_case = '''pixel_values''' snake_case = False def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Union[nn.Linear, nn.Convad] ): '''simple docstring''' if isinstance(__UpperCAmelCase , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__UpperCAmelCase , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) lowerCamelCase_ = r''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' lowerCamelCase_ = r''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( '''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , snake_case_ , ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" def __init__( self : List[Any] , __UpperCAmelCase : MobileNetVaConfig , __UpperCAmelCase : bool = True ): '''simple docstring''' super().__init__(__UpperCAmelCase ) _A = config _A = 32 _A = max(int(depth * config.depth_multiplier ) , config.min_depth ) _A = MobileNetVaConvLayer( __UpperCAmelCase , in_channels=config.num_channels , out_channels=__UpperCAmelCase , kernel_size=3 , stride=2 , ) _A = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] _A = nn.ModuleList() for i in range(13 ): _A = out_channels if strides[i] == 2 or i == 0: depth *= 2 _A = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( __UpperCAmelCase , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=3 , stride=strides[i] , groups=__UpperCAmelCase , ) ) self.layer.append( MobileNetVaConvLayer( __UpperCAmelCase , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=1 , ) ) _A = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : List[str] ): '''simple docstring''' raise NotImplementedError @add_start_docstrings_to_model_forward(__UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , ): '''simple docstring''' _A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _A = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) _A = self.conv_stem(__UpperCAmelCase ) _A = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): _A = layer_module(__UpperCAmelCase ) if output_hidden_states: _A = all_hidden_states + (hidden_states,) _A = hidden_states if self.pooler is not None: _A = torch.flatten(self.pooler(__UpperCAmelCase ) , start_dim=1 ) else: _A = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__UpperCAmelCase , pooler_output=__UpperCAmelCase , hidden_states=__UpperCAmelCase , ) @add_start_docstrings( ''' MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , snake_case_ , ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : MobileNetVaConfig ): '''simple docstring''' super().__init__(__UpperCAmelCase ) _A = config.num_labels _A = MobileNetVaModel(__UpperCAmelCase ) _A = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head _A = nn.Dropout(config.classifier_dropout_prob , inplace=__UpperCAmelCase ) _A = nn.Linear(__UpperCAmelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , ): '''simple docstring''' _A = return_dict if return_dict is not None else self.config.use_return_dict _A = self.mobilenet_va(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase ) _A = outputs.pooler_output if return_dict else outputs[1] _A = self.classifier(self.dropout(__UpperCAmelCase ) ) _A = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _A = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _A = "single_label_classification" else: _A = "multi_label_classification" if self.config.problem_type == "regression": _A = MSELoss() if self.num_labels == 1: _A = loss_fct(logits.squeeze() , labels.squeeze() ) else: _A = loss_fct(__UpperCAmelCase , __UpperCAmelCase ) elif self.config.problem_type == "single_label_classification": _A = CrossEntropyLoss() _A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _A = BCEWithLogitsLoss() _A = loss_fct(__UpperCAmelCase , __UpperCAmelCase ) if not return_dict: _A = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states , )
79
'''simple docstring''' from __future__ import annotations from random import choice def UpperCAmelCase_ (__a : str ): """simple docstring""" return choice(__a ) def UpperCAmelCase_ (__a : list[int] , __a : int ): """simple docstring""" _a : Dict = random_pivot(__a ) # partition based on pivot # linear time _a : Optional[int] = [e for e in lst if e < pivot] _a : List[str] = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(__a ) == k - 1: return pivot # pivot is in elements bigger than k elif len(__a ) < k - 1: return kth_number(__a , k - len(__a ) - 1 ) # pivot is in elements smaller than k else: return kth_number(__a , __a ) if __name__ == "__main__": import doctest doctest.testmod()
271
0
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : int) -> Any: '''simple docstring''' __UpperCamelCase : Tuple = XCLIPTextConfig() # derive patch size from model name __UpperCamelCase : Any = model_name.find("patch") __UpperCamelCase : Dict = int(model_name[start_idx + len("patch") : start_idx + len("patch") + 2]) __UpperCamelCase : List[str] = XCLIPVisionConfig(patch_size=_lowerCamelCase , num_frames=_lowerCamelCase) if "large" in model_name: __UpperCamelCase : Any = 768 __UpperCamelCase : List[str] = 3_072 __UpperCamelCase : Dict = 12 __UpperCamelCase : List[Any] = 1_024 __UpperCamelCase : int = 4_096 __UpperCamelCase : List[Any] = 16 __UpperCamelCase : List[Any] = 24 __UpperCamelCase : Tuple = 768 __UpperCamelCase : Union[str, Any] = 3_072 if model_name == "xclip-large-patch14-16-frames": __UpperCamelCase : List[Any] = 336 __UpperCamelCase : List[Any] = XCLIPConfig.from_text_vision_configs(_lowerCamelCase , _lowerCamelCase) if "large" in model_name: __UpperCamelCase : str = 768 return config def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict) -> Tuple: '''simple docstring''' if name == "token_embedding.weight": __UpperCamelCase : Tuple = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight") if name == "positional_embedding": __UpperCamelCase : Dict = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight") if "ln_1" in name: __UpperCamelCase : Union[str, Any] = name.replace("ln_1" , "layer_norm1") if "ln_2" in name: __UpperCamelCase : Tuple = name.replace("ln_2" , "layer_norm2") if "c_fc" in name: __UpperCamelCase : Dict = name.replace("c_fc" , "fc1") if "c_proj" in name: __UpperCamelCase : str = name.replace("c_proj" , "fc2") if name.startswith("transformer.resblocks"): __UpperCamelCase : str = name.replace("transformer.resblocks" , "text_model.encoder.layers") if "attn.out_proj" in name and "message" not in name: __UpperCamelCase : int = name.replace("attn.out_proj" , "self_attn.out_proj") if "ln_final" in name: __UpperCamelCase : Union[str, Any] = name.replace("ln_final" , "text_model.final_layer_norm") # visual encoder if name == "visual.class_embedding": __UpperCamelCase : List[Any] = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding") if name == "visual.positional_embedding": __UpperCamelCase : List[str] = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight") if name.startswith("visual.transformer.resblocks"): __UpperCamelCase : int = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers") if "visual.conv1" in name: __UpperCamelCase : List[Any] = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding") if "visual.ln_pre" in name: __UpperCamelCase : Union[str, Any] = name.replace("visual.ln_pre" , "vision_model.pre_layernorm") if "visual.ln_post" in name: __UpperCamelCase : Tuple = name.replace("visual.ln_post" , "vision_model.post_layernorm") if "visual.proj" in name: __UpperCamelCase : Optional[Any] = name.replace("visual.proj" , "visual_projection.weight") if "text_projection" in name: __UpperCamelCase : List[Any] = name.replace("text_projection" , "text_projection.weight") # things on top if "prompts_visual_proj" in name: __UpperCamelCase : int = name.replace("prompts_visual_proj" , "prompts_visual_projection") if "prompts_visual_ln" in name: __UpperCamelCase : str = name.replace("prompts_visual_ln" , "prompts_visual_layernorm") # mit if name == "mit.positional_embedding": __UpperCamelCase : Dict = name.replace("positional" , "position") if name.startswith("mit.resblocks"): __UpperCamelCase : str = name.replace("mit.resblocks" , "mit.encoder.layers") # prompts generator if name.startswith("prompts_generator.norm"): __UpperCamelCase : Any = name.replace("prompts_generator.norm" , "prompts_generator.layernorm") return name def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]) -> str: '''simple docstring''' for key in orig_state_dict.copy().keys(): __UpperCamelCase : Union[str, Any] = orig_state_dict.pop(_lowerCamelCase) if "attn.in_proj" in key: __UpperCamelCase : Optional[int] = key.split(".") if key.startswith("visual"): __UpperCamelCase : Optional[Any] = key_split[3] __UpperCamelCase : List[str] = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __UpperCamelCase : str = val[ :dim, : ] __UpperCamelCase : Tuple = val[ dim : dim * 2, : ] __UpperCamelCase : str = val[ -dim:, : ] else: __UpperCamelCase : str = val[ :dim ] __UpperCamelCase : Optional[Any] = val[ dim : dim * 2 ] __UpperCamelCase : List[str] = val[ -dim: ] else: if "weight" in key: __UpperCamelCase : Optional[Any] = val[ :dim, : ] __UpperCamelCase : Optional[int] = val[ dim : dim * 2, : ] __UpperCamelCase : Union[str, Any] = val[ -dim:, : ] else: __UpperCamelCase : Optional[Any] = val[:dim] __UpperCamelCase : Optional[int] = val[ dim : dim * 2 ] __UpperCamelCase : List[str] = val[-dim:] elif key.startswith("mit"): __UpperCamelCase : List[Any] = key_split[2] __UpperCamelCase : Optional[int] = config.vision_config.mit_hidden_size if "weight" in key: __UpperCamelCase : Any = val[:dim, :] __UpperCamelCase : Optional[Any] = val[dim : dim * 2, :] __UpperCamelCase : Dict = val[-dim:, :] else: __UpperCamelCase : Optional[int] = val[:dim] __UpperCamelCase : Optional[int] = val[dim : dim * 2] __UpperCamelCase : Dict = val[-dim:] else: __UpperCamelCase : List[str] = key_split[2] __UpperCamelCase : Tuple = config.text_config.hidden_size if "weight" in key: __UpperCamelCase : Optional[Any] = val[:dim, :] __UpperCamelCase : Union[str, Any] = val[ dim : dim * 2, : ] __UpperCamelCase : Optional[Any] = val[-dim:, :] else: __UpperCamelCase : Any = val[:dim] __UpperCamelCase : Union[str, Any] = val[ dim : dim * 2 ] __UpperCamelCase : Optional[int] = val[-dim:] else: __UpperCamelCase : Dict = rename_key(_lowerCamelCase) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __UpperCamelCase : List[Any] = val.T __UpperCamelCase : Any = val return orig_state_dict def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any]) -> Tuple: '''simple docstring''' if num_frames == 8: __UpperCamelCase : Any = "eating_spaghetti_8_frames.npy" elif num_frames == 16: __UpperCamelCase : Any = "eating_spaghetti.npy" elif num_frames == 32: __UpperCamelCase : Tuple = "eating_spaghetti_32_frames.npy" __UpperCamelCase : Optional[int] = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" , filename=_lowerCamelCase , repo_type="dataset" , ) __UpperCamelCase : Optional[Any] = np.load(_lowerCamelCase) return list(_lowerCamelCase) def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[Any]=False) -> Optional[int]: '''simple docstring''' __UpperCamelCase : Any = { # fully supervised kinetics-400 checkpoints "xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth", "xclip-base-patch32-16-frames": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth" ), "xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth", "xclip-base-patch16-16-frames": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth" ), "xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb", "xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f", # fully supervised kinetics-600 checkpoints "xclip-base-patch16-kinetics-600": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth" ), "xclip-base-patch16-kinetics-600-16-frames": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth" ), "xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be", # few shot "xclip-base-patch16-hmdb-2-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth" ), "xclip-base-patch16-hmdb-4-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth" ), "xclip-base-patch16-hmdb-8-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth" ), "xclip-base-patch16-hmdb-16-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth" ), "xclip-base-patch16-ucf-2-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth" ), "xclip-base-patch16-ucf-4-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth" ), "xclip-base-patch16-ucf-8-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth" ), "xclip-base-patch16-ucf-16-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth" ), # zero shot "xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth", } __UpperCamelCase : Union[str, Any] = model_to_url[model_name] __UpperCamelCase : Tuple = 8 if "16-frames" in model_name: __UpperCamelCase : Optional[int] = 16 elif "shot" in model_name: __UpperCamelCase : List[Any] = 32 __UpperCamelCase : Optional[int] = get_xclip_config(_lowerCamelCase , _lowerCamelCase) __UpperCamelCase : Optional[Any] = XCLIPModel(_lowerCamelCase) model.eval() if "drive" in checkpoint_url: __UpperCamelCase : str = "pytorch_model.bin" gdown.cached_download(_lowerCamelCase , _lowerCamelCase , quiet=_lowerCamelCase) __UpperCamelCase : Optional[Any] = torch.load(_lowerCamelCase , map_location="cpu")["model"] else: __UpperCamelCase : Optional[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase)["model"] __UpperCamelCase : Dict = convert_state_dict(_lowerCamelCase , _lowerCamelCase) __UpperCamelCase : Dict = XCLIPModel(_lowerCamelCase) __UpperCamelCase , __UpperCamelCase : Dict = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __UpperCamelCase : str = 336 if model_name == "xclip-large-patch14-16-frames" else 224 __UpperCamelCase : str = VideoMAEImageProcessor(size=_lowerCamelCase) __UpperCamelCase : Any = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32") __UpperCamelCase : Optional[int] = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32") __UpperCamelCase : Optional[Any] = XCLIPProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase) __UpperCamelCase : str = prepare_video(_lowerCamelCase) __UpperCamelCase : Tuple = processor( text=["playing sports", "eating spaghetti", "go shopping"] , videos=_lowerCamelCase , return_tensors="pt" , padding=_lowerCamelCase) print("Shape of pixel values:" , inputs.pixel_values.shape) with torch.no_grad(): __UpperCamelCase : Optional[int] = model(**_lowerCamelCase) # Verify outputs __UpperCamelCase : Union[str, Any] = outputs.logits_per_video __UpperCamelCase : List[str] = logits_per_video.softmax(dim=1) print("Probs:" , _lowerCamelCase) # kinetics-400 if model_name == "xclip-base-patch32": __UpperCamelCase : int = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]]) elif model_name == "xclip-base-patch32-16-frames": __UpperCamelCase : List[str] = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]]) elif model_name == "xclip-base-patch16": __UpperCamelCase : Any = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]]) elif model_name == "xclip-base-patch16-16-frames": __UpperCamelCase : int = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]]) elif model_name == "xclip-large-patch14": __UpperCamelCase : Optional[Any] = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]]) elif model_name == "xclip-large-patch14-16-frames": __UpperCamelCase : List[str] = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]]) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __UpperCamelCase : Dict = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]]) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __UpperCamelCase : int = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]]) elif model_name == "xclip-large-patch14-kinetics-600": __UpperCamelCase : List[Any] = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]]) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __UpperCamelCase : Union[str, Any] = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]]) elif model_name == "xclip-base-patch16-hmdb-4-shot": __UpperCamelCase : Dict = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]]) elif model_name == "xclip-base-patch16-hmdb-8-shot": __UpperCamelCase : Any = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]]) elif model_name == "xclip-base-patch16-hmdb-16-shot": __UpperCamelCase : Optional[int] = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]]) elif model_name == "xclip-base-patch16-ucf-2-shot": __UpperCamelCase : Tuple = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]]) elif model_name == "xclip-base-patch16-ucf-4-shot": __UpperCamelCase : Union[str, Any] = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]]) elif model_name == "xclip-base-patch16-ucf-8-shot": __UpperCamelCase : List[str] = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]]) elif model_name == "xclip-base-patch16-ucf-16-shot": __UpperCamelCase : Any = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]]) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __UpperCamelCase : int = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]]) else: raise ValueError(F'Model name {model_name} not supported') assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3) print("Looks ok!") if pytorch_dump_folder_path is not None: print(F'Saving model {model_name} to {pytorch_dump_folder_path}') model.save_pretrained(_lowerCamelCase) if push_to_hub: print("Pushing model, processor and slow tokenizer files to the hub...") model.push_to_hub(_lowerCamelCase , organization="nielsr") processor.push_to_hub(_lowerCamelCase , organization="nielsr") slow_tokenizer.push_to_hub(_lowerCamelCase , organization="nielsr") if __name__ == "__main__": lowercase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='xclip-base-patch32', type=str, help='Name of the model.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action="https://huggingface.co/datasets/infinityofspace/python_codestyles-mixed1-500/viewer/default/store_true", help='Whether or not to push the converted model to the 🤗 hub.' ) lowercase : Any = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
151
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowercase : List[Any] = { 'configuration_pix2struct': [ 'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Pix2StructConfig', 'Pix2StructTextConfig', 'Pix2StructVisionConfig', ], 'processing_pix2struct': ['Pix2StructProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : str = ['Pix2StructImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ 'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Pix2StructPreTrainedModel', 'Pix2StructForConditionalGeneration', 'Pix2StructVisionModel', 'Pix2StructTextModel', ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
151
1
"""simple docstring""" from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name __lowercase = """ Examples: ```py >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\") >>> pipe_prior.to(\"cuda\") >>> prompt = \"red cat, 4k photo\" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> zero_image_emb = out.negative_image_embeds >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\") >>> pipe.to(\"cuda\") >>> image = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=50, ... ).images >>> image[0].save(\"cat.png\") ``` """ def lowercase ( A_ , A_ , A_=8 )-> List[Any]: '''simple docstring''' a : int = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 a : List[str] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class _A ( _a ): """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : DDPMScheduler , __UpperCAmelCase : VQModel , ): super().__init__() self.register_modules( unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , movq=__UpperCAmelCase , ) a : int = 2 ** (len(self.movq.config.block_out_channels) - 1) def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str]): if latents is None: a : Any = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''') a : Dict = latents.to(__UpperCAmelCase) a : Dict = latents * scheduler.init_noise_sigma return latents def __snake_case ( self : Any , __UpperCAmelCase : List[Any]=0): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`") a : Dict = torch.device(f'''cuda:{gpu_id}''') a : Any = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : List[Any] , __UpperCAmelCase : Union[str, Any]=0): if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0"): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") a : List[Any] = torch.device(f'''cuda:{gpu_id}''') if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=__UpperCAmelCase) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) a : Any = None for cpu_offloaded_model in [self.unet, self.movq]: a , a : Tuple = cpu_offload_with_hook(__UpperCAmelCase , __UpperCAmelCase , prev_module_hook=__UpperCAmelCase) # We'll offload the last model manually. a : Dict = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __snake_case ( self : Optional[int]): if not hasattr(self.unet , "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(__UpperCAmelCase , "_hf_hook") and hasattr(module._hf_hook , "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device @torch.no_grad() @replace_example_docstring(__UpperCAmelCase) def __call__( self : Optional[Any] , __UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 100 , __UpperCAmelCase : float = 4.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , ): a : Optional[int] = self._execution_device a : int = guidance_scale > 1.0 if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : Union[str, Any] = torch.cat(__UpperCAmelCase , dim=0) a : int = image_embeds.shape[0] * num_images_per_prompt if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : List[str] = torch.cat(__UpperCAmelCase , dim=0) if do_classifier_free_guidance: a : Union[str, Any] = image_embeds.repeat_interleave(__UpperCAmelCase , dim=0) a : Tuple = negative_image_embeds.repeat_interleave(__UpperCAmelCase , dim=0) a : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=__UpperCAmelCase) self.scheduler.set_timesteps(__UpperCAmelCase , device=__UpperCAmelCase) a : Union[str, Any] = self.scheduler.timesteps a : Any = self.unet.config.in_channels a , a : str = downscale_height_and_width(__UpperCAmelCase , __UpperCAmelCase , self.movq_scale_factor) # create initial latent a : Any = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(__UpperCAmelCase)): # expand the latents if we are doing classifier free guidance a : str = torch.cat([latents] * 2) if do_classifier_free_guidance else latents a : Dict = {"image_embeds": image_embeds} a : Tuple = self.unet( sample=__UpperCAmelCase , timestep=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , added_cond_kwargs=__UpperCAmelCase , return_dict=__UpperCAmelCase , )[0] if do_classifier_free_guidance: a , a : int = noise_pred.split(latents.shape[1] , dim=1) a , a : Any = noise_pred.chunk(2) a , a : int = variance_pred.chunk(2) a : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) a : str = torch.cat([noise_pred, variance_pred_text] , dim=1) if not ( hasattr(self.scheduler.config , "variance_type") and self.scheduler.config.variance_type in ["learned", "learned_range"] ): a , a : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1) # compute the previous noisy sample x_t -> x_t-1 a : Any = self.scheduler.step( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase , )[0] # post-processing a : Tuple = self.movq.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase)["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''') if output_type in ["np", "pil"]: a : Optional[Any] = image * 0.5 + 0.5 a : Union[str, Any] = image.clamp(0 , 1) a : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": a : Optional[int] = self.numpy_to_pil(__UpperCAmelCase) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCAmelCase)
40
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast __lowercase = datasets.utils.logging.get_logger(__name__) @dataclass class _A ( datasets.BuilderConfig ): """simple docstring""" UpperCAmelCase : int = 1_0_0_0_0 UpperCAmelCase : Optional[List[str]] = None UpperCAmelCase : Optional[datasets.Features] = None class _A ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCAmelCase : str = ParquetConfig def __snake_case ( self : Tuple): return datasets.DatasetInfo(features=self.config.features) def __snake_case ( self : List[Any] , __UpperCAmelCase : str): if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''') a : str = dl_manager.download_and_extract(self.config.data_files) if isinstance(__UpperCAmelCase , (str, list, tuple)): a : Dict = data_files if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})] a : Dict = [] for split_name, files in data_files.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(__UpperCAmelCase): with open(__UpperCAmelCase , "rb") as f: a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase)) break splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files})) return splits def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table): if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema) return pa_table def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int): a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema) != sorted(self.config.columns): raise ValueError( f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''') for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)): with open(__UpperCAmelCase , "rb") as f: a : Tuple = pq.ParquetFile(__UpperCAmelCase) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)): a : Optional[Any] = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase) except ValueError as e: logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''') raise
40
1
'''simple docstring''' import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets _lowercase = """ @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ _lowercase = """\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. """ _lowercase = """ Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=[\"About 95 species are currently accepted .\"] >>> predictions=[\"About 95 you now get in .\"] >>> references=[[\"About 95 species are currently known .\"]] >>> wiki_split = datasets.load_metric(\"wiki_split\") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0} """ def A (__lowerCamelCase :Dict ): def remove_articles(__lowerCamelCase :Optional[Any] ): _lowerCAmelCase = re.compile(r"""\b(a|an|the)\b""" , re.UNICODE ) return re.sub(UpperCamelCase__ , """ """ , UpperCamelCase__ ) def white_space_fix(__lowerCamelCase :Any ): return " ".join(text.split() ) def remove_punc(__lowerCamelCase :int ): _lowerCAmelCase = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__lowerCamelCase :Tuple ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase__ ) ) ) ) def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Dict ): return int(normalize_answer(UpperCamelCase__ ) == normalize_answer(UpperCamelCase__ ) ) def A (__lowerCamelCase :Any , __lowerCamelCase :Union[str, Any] ): _lowerCAmelCase = [any(compute_exact(UpperCamelCase__ , UpperCamelCase__ ) for ref in refs ) for pred, refs in zip(UpperCamelCase__ , UpperCamelCase__ )] return (sum(UpperCamelCase__ ) / len(UpperCamelCase__ )) * 100 def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Any , __lowerCamelCase :str , __lowerCamelCase :str ): _lowerCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams] _lowerCAmelCase = Counter(UpperCamelCase__ ) _lowerCAmelCase = Counter(UpperCamelCase__ ) _lowerCAmelCase = Counter() for sgram, scount in sgramcounter.items(): _lowerCAmelCase = scount * numref _lowerCAmelCase = Counter(UpperCamelCase__ ) _lowerCAmelCase = Counter() for cgram, ccount in cgramcounter.items(): _lowerCAmelCase = ccount * numref # KEEP _lowerCAmelCase = sgramcounter_rep & cgramcounter_rep _lowerCAmelCase = keepgramcounter_rep & rgramcounter _lowerCAmelCase = sgramcounter_rep & rgramcounter _lowerCAmelCase = 0 _lowerCAmelCase = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _lowerCAmelCase = 1 _lowerCAmelCase = 1 if len(UpperCamelCase__ ) > 0: _lowerCAmelCase = keeptmpscorea / len(UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) _lowerCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() ) _lowerCAmelCase = 0 if keepscore_precision > 0 or keepscore_recall > 0: _lowerCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION _lowerCAmelCase = sgramcounter_rep - cgramcounter_rep _lowerCAmelCase = delgramcounter_rep - rgramcounter _lowerCAmelCase = sgramcounter_rep - rgramcounter _lowerCAmelCase = 0 _lowerCAmelCase = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _lowerCAmelCase = 1 if len(UpperCamelCase__ ) > 0: _lowerCAmelCase = deltmpscorea / len(UpperCamelCase__ ) # ADDITION _lowerCAmelCase = set(UpperCamelCase__ ) - set(UpperCamelCase__ ) _lowerCAmelCase = set(UpperCamelCase__ ) & set(UpperCamelCase__ ) _lowerCAmelCase = set(UpperCamelCase__ ) - set(UpperCamelCase__ ) _lowerCAmelCase = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _lowerCAmelCase = 1 _lowerCAmelCase = 1 if len(UpperCamelCase__ ) > 0: _lowerCAmelCase = addtmpscore / len(UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: _lowerCAmelCase = addtmpscore / len(UpperCamelCase__ ) _lowerCAmelCase = 0 if addscore_precision > 0 or addscore_recall > 0: _lowerCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def A (__lowerCamelCase :Any , __lowerCamelCase :List[Any] , __lowerCamelCase :Optional[int] ): _lowerCAmelCase = len(UpperCamelCase__ ) _lowerCAmelCase = ssent.split(""" """ ) _lowerCAmelCase = csent.split(""" """ ) _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] for rsent in rsents: _lowerCAmelCase = rsent.split(""" """ ) _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] ragramslist.append(UpperCamelCase__ ) for i in range(0 , len(UpperCamelCase__ ) - 1 ): if i < len(UpperCamelCase__ ) - 1: _lowerCAmelCase = ragrams[i] + """ """ + ragrams[i + 1] ragrams.append(UpperCamelCase__ ) if i < len(UpperCamelCase__ ) - 2: _lowerCAmelCase = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] ragrams.append(UpperCamelCase__ ) if i < len(UpperCamelCase__ ) - 3: _lowerCAmelCase = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3] ragrams.append(UpperCamelCase__ ) ragramslist.append(UpperCamelCase__ ) ragramslist.append(UpperCamelCase__ ) ragramslist.append(UpperCamelCase__ ) for i in range(0 , len(UpperCamelCase__ ) - 1 ): if i < len(UpperCamelCase__ ) - 1: _lowerCAmelCase = sagrams[i] + """ """ + sagrams[i + 1] sagrams.append(UpperCamelCase__ ) if i < len(UpperCamelCase__ ) - 2: _lowerCAmelCase = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] sagrams.append(UpperCamelCase__ ) if i < len(UpperCamelCase__ ) - 3: _lowerCAmelCase = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3] sagrams.append(UpperCamelCase__ ) for i in range(0 , len(UpperCamelCase__ ) - 1 ): if i < len(UpperCamelCase__ ) - 1: _lowerCAmelCase = cagrams[i] + """ """ + cagrams[i + 1] cagrams.append(UpperCamelCase__ ) if i < len(UpperCamelCase__ ) - 2: _lowerCAmelCase = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] cagrams.append(UpperCamelCase__ ) if i < len(UpperCamelCase__ ) - 3: _lowerCAmelCase = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3] cagrams.append(UpperCamelCase__ ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) _lowerCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 _lowerCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4 _lowerCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4 _lowerCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def A (__lowerCamelCase :List[str] , __lowerCamelCase :bool = True , __lowerCamelCase :str = "13a" , __lowerCamelCase :bool = True ): if lowercase: _lowerCAmelCase = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: _lowerCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase__ )()(UpperCamelCase__ ) else: _lowerCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase__ ) elif tokenizer == "moses": _lowerCAmelCase = sacremoses.MosesTokenizer().tokenize(UpperCamelCase__ , return_str=UpperCamelCase__ , escape=UpperCamelCase__ ) elif tokenizer == "penn": _lowerCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase__ , return_str=UpperCamelCase__ ) else: _lowerCAmelCase = sentence if not return_str: _lowerCAmelCase = normalized_sent.split() return normalized_sent def A (__lowerCamelCase :int , __lowerCamelCase :Tuple , __lowerCamelCase :Union[str, Any] ): if not (len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == len(UpperCamelCase__ )): raise ValueError("""Sources length must match predictions and references lengths.""" ) _lowerCAmelCase = 0 for src, pred, refs in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): sari_score += SARIsent(normalize(UpperCamelCase__ ) , normalize(UpperCamelCase__ ) , [normalize(UpperCamelCase__ ) for sent in refs] ) _lowerCAmelCase = sari_score / len(UpperCamelCase__ ) return 100 * sari_score def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :int , __lowerCamelCase :Union[str, Any]="exp" , __lowerCamelCase :List[str]=None , __lowerCamelCase :List[str]=False , __lowerCamelCase :Dict=False , __lowerCamelCase :str=False , ): _lowerCAmelCase = len(references[0] ) if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ): raise ValueError("""Sacrebleu requires the same number of references for each prediction""" ) _lowerCAmelCase = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )] _lowerCAmelCase = sacrebleu.corpus_bleu( UpperCamelCase__ , UpperCamelCase__ , smooth_method=UpperCamelCase__ , smooth_value=UpperCamelCase__ , force=UpperCamelCase__ , lowercase=UpperCamelCase__ , use_effective_order=UpperCamelCase__ , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): '''simple docstring''' def _lowercase ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=[ """https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""", """https://github.com/cocoxu/simplification/blob/master/SARI.py""", """https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""", """https://github.com/mjpost/sacreBLEU""", ] , reference_urls=[ """https://www.aclweb.org/anthology/Q16-1029.pdf""", """https://github.com/mjpost/sacreBLEU""", """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def _lowercase ( self , _lowercase , _lowercase , _lowercase ): """simple docstring""" _lowerCAmelCase = {} result.update({"""sari""": compute_sari(sources=_a , predictions=_a , references=_a )} ) result.update({"""sacrebleu""": compute_sacrebleu(predictions=_a , references=_a )} ) result.update({"""exact""": compute_em(predictions=_a , references=_a )} ) return result
363
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { """YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""", """YituTech/conv-bert-medium-small""": ( """https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json""" ), """YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""", # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' _lowercase : List[Any] = '''convbert''' def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase=768 , _lowercase=2 , _lowercase=9 , _lowercase=1 , _lowercase=None , **_lowercase , ): """simple docstring""" super().__init__( pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , ) _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = embedding_size _lowerCAmelCase = head_ratio _lowerCAmelCase = conv_kernel_size _lowerCAmelCase = num_groups _lowerCAmelCase = classifier_dropout class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def _lowercase ( self ): """simple docstring""" if self.task == "multiple-choice": _lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _lowerCAmelCase = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
229
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''], '''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''BertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BertForMaskedLM''', '''BertForMultipleChoice''', '''BertForNextSentencePrediction''', '''BertForPreTraining''', '''BertForQuestionAnswering''', '''BertForSequenceClassification''', '''BertForTokenClassification''', '''BertLayer''', '''BertLMHeadModel''', '''BertModel''', '''BertPreTrainedModel''', '''load_tf_weights_in_bert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFBertEmbeddings''', '''TFBertForMaskedLM''', '''TFBertForMultipleChoice''', '''TFBertForNextSentencePrediction''', '''TFBertForPreTraining''', '''TFBertForQuestionAnswering''', '''TFBertForSequenceClassification''', '''TFBertForTokenClassification''', '''TFBertLMHeadModel''', '''TFBertMainLayer''', '''TFBertModel''', '''TFBertPreTrainedModel''', ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''TFBertTokenizer'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''FlaxBertForCausalLM''', '''FlaxBertForMaskedLM''', '''FlaxBertForMultipleChoice''', '''FlaxBertForNextSentencePrediction''', '''FlaxBertForPreTraining''', '''FlaxBertForQuestionAnswering''', '''FlaxBertForSequenceClassification''', '''FlaxBertForTokenClassification''', '''FlaxBertModel''', '''FlaxBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
268
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class UpperCamelCase_ : def __init__( self : str ) -> Dict: UpperCAmelCase_ : List[Any] = "" UpperCAmelCase_ : int = "" UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : int = 0 UpperCAmelCase_ : List[Any] = 256 UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : List[Any] = 0 UpperCAmelCase_ : str = 0 UpperCAmelCase_ : List[str] = 0 def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Dict ) -> Optional[Any]: UpperCAmelCase_ : Dict = cva.imread(lowerCAmelCase_ , 0 ) UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.img ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" ) UpperCAmelCase_ : List[Any] = np.sum(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) ): UpperCAmelCase_ : List[Any] = x[i] / self.k self.sk += prk UpperCAmelCase_ : Optional[Any] = (self.L - 1) * self.sk if self.rem != 0: UpperCAmelCase_ : Any = int(last % last ) UpperCAmelCase_ : List[str] = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = int(np.ma.count(self.img ) / self.img[1].size ) UpperCAmelCase_ : Dict = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): UpperCAmelCase_ : Any = self.img[j][i] if num != self.last_list[num]: UpperCAmelCase_ : Tuple = self.last_list[num] cva.imwrite("output_data/output.jpg" , self.img ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: plt.hist(self.img.ravel() , 256 , [0, 256] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: cva.imshow("Output-Image" , self.img ) cva.imshow("Input-Image" , self.original_image ) cva.waitKey(5_000 ) cva.destroyAllWindows() if __name__ == "__main__": lowerCamelCase_ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') lowerCamelCase_ = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
268
1
'''simple docstring''' def UpperCamelCase__ ( lowerCAmelCase = 4_00_00_00 ): """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase , _lowerCAmelCase = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(lowerCAmelCase ) _lowerCAmelCase , _lowerCAmelCase = b, a + b return sum(lowerCAmelCase ) if __name__ == "__main__": print(F"""{solution() = }""")
358
'''simple docstring''' from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = cva.getAffineTransform(lowerCAmelCase , lowerCAmelCase ) return cva.warpAffine(lowerCAmelCase , lowerCAmelCase , (rows, cols) ) if __name__ == "__main__": # read original image A__ : Any =cva.imread( str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''') ) # turn image in gray scale value A__ : Any =cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape A__ , A__ : Tuple =gray_img.shape # set different points to rotate image A__ : Optional[int] =np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa) A__ : Tuple =np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa) A__ : List[str] =np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa) A__ : Dict =np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa) # add all rotated images in a list A__ : List[Any] =[ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations A__ : Tuple =plt.figure(1) A__ : int =['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3'''] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''') plt.title(titles[i]) plt.axis('''off''') plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
220
0
"""simple docstring""" import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = (DDPMScheduler,) def A_ ( self , **lowercase ): _lowerCamelCase : Dict = { 'num_train_timesteps': 1000, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**lowercase ) return config def A_ ( self ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=lowercase ) def A_ ( self ): for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=lowercase , beta_end=lowercase ) def A_ ( self ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase ) def A_ ( self ): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=lowercase ) def A_ ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowercase ) def A_ ( self ): self.check_over_configs(thresholding=lowercase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=lowercase , prediction_type=lowercase , sample_max_value=lowercase , ) def A_ ( self ): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=lowercase ) def A_ ( self ): for t in [0, 500, 999]: self.check_over_forward(time_step=lowercase ) def A_ ( self ): _lowerCamelCase : Union[str, Any] = self.scheduler_classes[0] _lowerCamelCase : int = self.get_scheduler_config() _lowerCamelCase : List[Any] = scheduler_class(**lowercase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_09_79 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def A_ ( self ): _lowerCamelCase : Optional[int] = self.scheduler_classes[0] _lowerCamelCase : int = self.get_scheduler_config() _lowerCamelCase : Optional[int] = scheduler_class(**lowercase ) _lowerCamelCase : Tuple = len(lowercase ) _lowerCamelCase : int = self.dummy_model() _lowerCamelCase : Tuple = self.dummy_sample_deter _lowerCamelCase : Tuple = torch.manual_seed(0 ) for t in reversed(range(lowercase ) ): # 1. predict noise residual _lowerCamelCase : Tuple = model(lowercase , lowercase ) # 2. predict previous mean of sample x_t-1 _lowerCamelCase : str = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance _lowerCamelCase : List[str] = pred_prev_sample _lowerCamelCase : List[Any] = torch.sum(torch.abs(lowercase ) ) _lowerCamelCase : Dict = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 2_58.96_06 ) < 1E-2 assert abs(result_mean.item() - 0.33_72 ) < 1E-3 def A_ ( self ): _lowerCamelCase : str = self.scheduler_classes[0] _lowerCamelCase : int = self.get_scheduler_config(prediction_type='v_prediction' ) _lowerCamelCase : List[Any] = scheduler_class(**lowercase ) _lowerCamelCase : List[Any] = len(lowercase ) _lowerCamelCase : int = self.dummy_model() _lowerCamelCase : Any = self.dummy_sample_deter _lowerCamelCase : Any = torch.manual_seed(0 ) for t in reversed(range(lowercase ) ): # 1. predict noise residual _lowerCamelCase : Union[str, Any] = model(lowercase , lowercase ) # 2. predict previous mean of sample x_t-1 _lowerCamelCase : Any = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance _lowerCamelCase : Optional[int] = pred_prev_sample _lowerCamelCase : List[str] = torch.sum(torch.abs(lowercase ) ) _lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 2_02.02_96 ) < 1E-2 assert abs(result_mean.item() - 0.26_31 ) < 1E-3 def A_ ( self ): _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : List[str] = self.get_scheduler_config() _lowerCamelCase : Any = scheduler_class(**lowercase ) _lowerCamelCase : Dict = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=lowercase ) _lowerCamelCase : Any = scheduler.timesteps for i, timestep in enumerate(lowercase ): if i == len(lowercase ) - 1: _lowerCamelCase : Optional[int] = -1 else: _lowerCamelCase : Optional[Any] = timesteps[i + 1] _lowerCamelCase : Optional[int] = scheduler.previous_timestep(lowercase ) _lowerCamelCase : Tuple = prev_t.item() self.assertEqual(lowercase , lowercase ) def A_ ( self ): _lowerCamelCase : Union[str, Any] = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config() _lowerCamelCase : Tuple = scheduler_class(**lowercase ) _lowerCamelCase : List[Any] = [100, 87, 50, 51, 0] with self.assertRaises(lowercase , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=lowercase ) def A_ ( self ): _lowerCamelCase : str = self.scheduler_classes[0] _lowerCamelCase : List[Any] = self.get_scheduler_config() _lowerCamelCase : int = scheduler_class(**lowercase ) _lowerCamelCase : int = [100, 87, 50, 1, 0] _lowerCamelCase : Optional[int] = len(lowercase ) with self.assertRaises(lowercase , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=lowercase , timesteps=lowercase ) def A_ ( self ): _lowerCamelCase : Optional[int] = self.scheduler_classes[0] _lowerCamelCase : int = self.get_scheduler_config() _lowerCamelCase : List[Any] = scheduler_class(**lowercase ) _lowerCamelCase : int = [scheduler.config.num_train_timesteps] with self.assertRaises( lowercase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=lowercase )
96
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMSNModel""", """ViTMSNForImageClassification""", """ViTMSNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
96
1
"""simple docstring""" import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' pass @nightly @require_onnxruntime @require_torch_gpu class A__ ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self: str) -> int: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str: """simple docstring""" __lowerCAmelCase : int = ort.SessionOptions() __lowerCAmelCase : Tuple = False return options def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]: """simple docstring""" __lowerCAmelCase : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png") __lowerCAmelCase : int = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png") __lowerCAmelCase : Any = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE) __lowerCAmelCase : Any = "A red cat sitting on a park bench" __lowerCAmelCase : Optional[int] = np.random.RandomState(0) __lowerCAmelCase : str = pipe( prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=10 , generator=_SCREAMING_SNAKE_CASE , output_type="np" , ) __lowerCAmelCase : Optional[int] = output.images __lowerCAmelCase : List[Any] = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) __lowerCAmelCase : Dict = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[int]: """simple docstring""" __lowerCAmelCase : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png") __lowerCAmelCase : int = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png") __lowerCAmelCase : List[Any] = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx") __lowerCAmelCase : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE) __lowerCAmelCase : Any = "A red cat sitting on a park bench" __lowerCAmelCase : Optional[int] = np.random.RandomState(0) __lowerCAmelCase : Optional[int] = pipe( prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=20 , generator=_SCREAMING_SNAKE_CASE , output_type="np" , ) __lowerCAmelCase : List[str] = output.images __lowerCAmelCase : Union[str, Any] = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) __lowerCAmelCase : Tuple = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
58
"""simple docstring""" from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def _lowercase ( __snake_case ) -> Dict: return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] ,unknown_args[1::2] )} def _lowercase ( ) -> Union[str, Any]: __lowerCAmelCase : List[str] = ArgumentParser( "HuggingFace Datasets CLI tool" ,usage="datasets-cli <command> [<args>]" ,allow_abbrev=__snake_case ) __lowerCAmelCase : str = parser.add_subparsers(help="datasets-cli command helpers" ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(__snake_case ) EnvironmentCommand.register_subcommand(__snake_case ) TestCommand.register_subcommand(__snake_case ) RunBeamCommand.register_subcommand(__snake_case ) DummyDataCommand.register_subcommand(__snake_case ) # Parse args __lowerCAmelCase , __lowerCAmelCase : Any = parser.parse_known_args() if not hasattr(__snake_case ,"func" ): parser.print_help() exit(1 ) __lowerCAmelCase : List[Any] = parse_unknown_args(__snake_case ) # Run __lowerCAmelCase : Union[str, Any] = args.func(__snake_case ,**__snake_case ) service.run() if __name__ == "__main__": main()
58
1
'''simple docstring''' def _A ( _lowerCAmelCase , _lowerCAmelCase ): """simple docstring""" while a != 0: __lowercase , __lowercase =b % a, a return b def _A ( _lowerCAmelCase , _lowerCAmelCase ): """simple docstring""" if gcd(__lowerCamelCase , __lowerCamelCase ) != 1: __lowercase =f"""mod inverse of {a!r} and {m!r} does not exist""" raise ValueError(__lowerCamelCase ) __lowercase , __lowercase , __lowercase =1, 0, a __lowercase , __lowercase , __lowercase =0, 1, m while va != 0: __lowercase =ua // va __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase =(ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
166
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __UpperCAmelCase = { "configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"], "processing_layoutlmv2": ["LayoutLMv2Processor"], "tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["LayoutLMv2TokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["LayoutLMv2FeatureExtractor"] __UpperCAmelCase = ["LayoutLMv2ImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ "LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST", "LayoutLMv2ForQuestionAnswering", "LayoutLMv2ForSequenceClassification", "LayoutLMv2ForTokenClassification", "LayoutLMv2Layer", "LayoutLMv2Model", "LayoutLMv2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
299
0
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int]=13 , lowerCAmelCase : Union[str, Any]=32 , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : int=4 , lowerCAmelCase : int=[10, 20, 30, 40] , lowerCAmelCase : List[Any]=[2, 2, 3, 2] , lowerCAmelCase : Any=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Dict=37 , lowerCAmelCase : List[Any]="gelu" , lowerCAmelCase : str=10 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : str=["stage2", "stage3", "stage4"] , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Tuple=None , ) -> List[Any]: """simple docstring""" __lowerCAmelCase : int = parent __lowerCAmelCase : List[Any] = batch_size __lowerCAmelCase : Optional[int] = image_size __lowerCAmelCase : Tuple = num_channels __lowerCAmelCase : str = num_stages __lowerCAmelCase : str = hidden_sizes __lowerCAmelCase : Optional[Any] = depths __lowerCAmelCase : Optional[Any] = is_training __lowerCAmelCase : int = use_labels __lowerCAmelCase : int = intermediate_size __lowerCAmelCase : Optional[int] = hidden_act __lowerCAmelCase : List[Any] = type_sequence_label_size __lowerCAmelCase : Optional[int] = initializer_range __lowerCAmelCase : Dict = out_features __lowerCAmelCase : List[Any] = num_labels __lowerCAmelCase : Union[str, Any] = scope __lowerCAmelCase : Union[str, Any] = num_stages def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: """simple docstring""" __lowerCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCAmelCase : Tuple = None if self.use_labels: __lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase : List[Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: """simple docstring""" return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowerCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=lowerCAmelCase , loss_ignore_index=2_55 , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] ) -> Dict: """simple docstring""" __lowerCAmelCase : List[Any] = UperNetForSemanticSegmentation(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowerCAmelCase : Tuple = model(lowerCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: """simple docstring""" __lowerCAmelCase : List[Any] = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) ,( __lowerCAmelCase ) ,( __lowerCAmelCase ) , ) : Dict = config_and_inputs __lowerCAmelCase : Optional[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ): """simple docstring""" lowerCamelCase : Any =(UperNetForSemanticSegmentation,) if is_torch_available() else () lowerCamelCase : List[str] ={"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} lowerCamelCase : Dict =False lowerCamelCase : Union[str, Any] =False lowerCamelCase : Union[str, Any] =False lowerCamelCase : str =False lowerCamelCase : List[Any] =False lowerCamelCase : Optional[Any] =False def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: """simple docstring""" __lowerCAmelCase : List[Any] = UperNetModelTester(self ) __lowerCAmelCase : Dict = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE ( self : Dict ) -> str: """simple docstring""" return def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: """simple docstring""" __lowerCAmelCase ,__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase : Optional[int] = model_class(lowerCAmelCase ) __lowerCAmelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase : str = [*signature.parameters.keys()] __lowerCAmelCase : Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: """simple docstring""" pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason="""UperNet does not have a base model""" ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: """simple docstring""" pass @unittest.skip(reason="""UperNet does not have a base model""" ) def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: """simple docstring""" def check_hidden_states_output(lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] ): __lowerCAmelCase : int = model_class(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() with torch.no_grad(): __lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) ) __lowerCAmelCase : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowerCAmelCase : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase : Dict = True check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCAmelCase : str = True check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: """simple docstring""" __lowerCAmelCase ,__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase : Any = _config_zero_init(lowerCAmelCase ) __lowerCAmelCase : List[str] = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: __lowerCAmelCase : str = model_class(config=lowerCAmelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: """simple docstring""" pass @slow def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : int = UperNetForSemanticSegmentation.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) def snake_case_ () -> int: __lowerCAmelCase : int = hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) __lowerCAmelCase : str = Image.open(__A ).convert("""RGB""" ) return image @require_torch @require_vision @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: """simple docstring""" __lowerCAmelCase : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) __lowerCAmelCase : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(lowerCAmelCase ) __lowerCAmelCase : List[str] = prepare_img() __lowerCAmelCase : int = processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase ) with torch.no_grad(): __lowerCAmelCase : Any = model(**lowerCAmelCase ) __lowerCAmelCase : Any = torch.Size((1, model.config.num_labels, 5_12, 5_12) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase ) __lowerCAmelCase : Optional[int] = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase , atol=1e-4 ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase : Tuple = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) __lowerCAmelCase : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(lowerCAmelCase ) __lowerCAmelCase : Optional[Any] = prepare_img() __lowerCAmelCase : List[Any] = processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase ) with torch.no_grad(): __lowerCAmelCase : Union[str, Any] = model(**lowerCAmelCase ) __lowerCAmelCase : List[Any] = torch.Size((1, model.config.num_labels, 5_12, 5_12) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase ) __lowerCAmelCase : List[str] = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase , atol=1e-4 ) )
139
from pathlib import Path import fire def snake_case_ (__A : str , __A : str , __A : int ) -> Any: __lowerCAmelCase : Tuple = Path(__A ) __lowerCAmelCase : Tuple = Path(__A ) dest_dir.mkdir(exist_ok=__A ) for path in src_dir.iterdir(): __lowerCAmelCase : str = [x.rstrip() for x in list(path.open().readlines() )][:n] __lowerCAmelCase : Dict = dest_dir.joinpath(path.name ) print(__A ) dest_path.open("""w""" ).write("""\n""".join(__A ) ) if __name__ == "__main__": fire.Fire(minify)
139
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''', '''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''', '''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''', '''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''', '''funnel-transformer/intermediate''': ( '''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json''' ), '''funnel-transformer/intermediate-base''': ( '''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json''' ), '''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''', '''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''', '''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''', '''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''', } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "funnel" SCREAMING_SNAKE_CASE_ = { "hidden_size": "d_model", "num_attention_heads": "n_head", } def __init__( self, lowerCAmelCase__=3_0522, lowerCAmelCase__=[4, 4, 4], lowerCAmelCase__=None, lowerCAmelCase__=2, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=64, lowerCAmelCase__=3072, lowerCAmelCase__="gelu_new", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0, lowerCAmelCase__=0.1, lowerCAmelCase__=None, lowerCAmelCase__=1e-9, lowerCAmelCase__="mean", lowerCAmelCase__="relative_shift", lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Union[str, Any]: snake_case_ = vocab_size snake_case_ = block_sizes snake_case_ = [1] * len(lowerCAmelCase__) if block_repeats is None else block_repeats assert len(lowerCAmelCase__) == len( self.block_repeats), "`block_sizes` and `block_repeats` should have the same length." snake_case_ = num_decoder_layers snake_case_ = d_model snake_case_ = n_head snake_case_ = d_head snake_case_ = d_inner snake_case_ = hidden_act snake_case_ = hidden_dropout snake_case_ = attention_dropout snake_case_ = activation_dropout snake_case_ = initializer_range snake_case_ = initializer_std snake_case_ = layer_norm_eps assert pooling_type in [ "mean", "max", ], f'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.' snake_case_ = pooling_type assert attention_type in [ "relative_shift", "factorized", ], f'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.' snake_case_ = attention_type snake_case_ = separate_cls snake_case_ = truncate_seq snake_case_ = pool_q_only super().__init__(**lowerCAmelCase__) @property def a_ ( self) -> Optional[Any]: return sum(self.block_sizes) @num_hidden_layers.setter def a_ ( self, lowerCAmelCase__) -> Tuple: raise NotImplementedError( 'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.') @property def a_ ( self) -> Optional[int]: return len(self.block_sizes) @num_blocks.setter def a_ ( self, lowerCAmelCase__) -> List[Any]: raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.')
69
import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def lowerCAmelCase__(__snake_case ) -> int: # picklable for multiprocessing '''simple docstring''' return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def lowerCAmelCase__() -> Any: '''simple docstring''' with parallel_backend('''spark''' ): assert ParallelBackendConfig.backend_name == "spark" lowerCamelCase__ = [1, 2, 3] with pytest.raises(__snake_case ): with parallel_backend('''unsupported backend''' ): map_nested(__snake_case ,__snake_case ,num_proc=2 ) with pytest.raises(__snake_case ): with parallel_backend('''unsupported backend''' ): map_nested(__snake_case ,__snake_case ,num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize('''num_proc''' ,[2, -1] ) def lowerCAmelCase__(__snake_case ) -> Tuple: '''simple docstring''' lowerCamelCase__ = [1, 2] lowerCamelCase__ = {'''a''': 1, '''b''': 2} lowerCamelCase__ = {'''a''': [1, 2], '''b''': [3, 4]} lowerCamelCase__ = {'''a''': {'''1''': 1}, '''b''': 2} lowerCamelCase__ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4} lowerCamelCase__ = [2, 3] lowerCamelCase__ = {'''a''': 2, '''b''': 3} lowerCamelCase__ = {'''a''': [2, 3], '''b''': [4, 5]} lowerCamelCase__ = {'''a''': {'''1''': 2}, '''b''': 3} lowerCamelCase__ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5} with parallel_backend('''spark''' ): assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
209
0
'''simple docstring''' import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class lowerCamelCase : '''simple docstring''' def __init__( self : List[Any] , lowerCAmelCase_ : str = "cpu" , lowerCAmelCase_ : str = "openai/clip-vit-large-patch14" ) -> None: '''simple docstring''' A__ : Dict =device A__ : Optional[Any] =CLIPTokenizerFast.from_pretrained(lowerCAmelCase_ ) A__ : List[Any] =[0.48145466, 0.4578275, 0.40821073] A__ : Optional[int] =[0.26862954, 0.26130258, 0.27577711] A__ : Union[str, Any] =torchvision.transforms.Normalize(self.image_mean , self.image_std ) A__ : Dict =torchvision.transforms.Resize(2_24 ) A__ : List[Any] =torchvision.transforms.CenterCrop(2_24 ) def lowercase__ ( self : Dict , lowerCAmelCase_ : str ) -> int: '''simple docstring''' A__ : Union[str, Any] =self.resize(lowerCAmelCase_ ) A__ : Union[str, Any] =self.center_crop(lowerCAmelCase_ ) A__ : List[Any] =self.normalize(lowerCAmelCase_ ) return images def __call__( self : Dict , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : List[str] ) -> Optional[Any]: '''simple docstring''' A__ : int =self.tokenizer(text=lowerCAmelCase_ , **lowerCAmelCase_ ) A__ : Optional[int] =self.preprocess_img(lowerCAmelCase_ ) A__ : List[str] ={key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class lowerCamelCase ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] , lowerCAmelCase_ : int=10 , lowerCAmelCase_ : Dict=0.01 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[Any]="image" , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[str]=False , ) -> None: '''simple docstring''' super().__init__() A__ : Optional[Any] =None A__ : Any =device if device else get_device() if vqgan: A__ : Union[str, Any] =vqgan else: A__ : str =load_vqgan(self.device , conf_path=lowerCAmelCase_ , ckpt_path=lowerCAmelCase_ ) self.vqgan.eval() if clip: A__ : Optional[int] =clip else: A__ : Dict =CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" ) self.clip.to(self.device ) A__ : str =ProcessorGradientFlow(device=self.device ) A__ : Union[str, Any] =iterations A__ : str =lr A__ : List[str] =log A__ : Any =make_grid A__ : Dict =return_val A__ : str =quantize A__ : str =self.vqgan.decoder.z_shape def lowercase__ ( self : Tuple , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[int]=5 , lowerCAmelCase_ : List[str]=True ) -> Any: '''simple docstring''' A__ : Optional[Any] =[] if output_path is None: A__ : str ="""./animation.gif""" if input_path is None: A__ : List[Any] =self.save_path A__ : Any =sorted(glob(input_path + """/*""" ) ) if not len(lowerCAmelCase_ ): raise ValueError( """No images found in save path, aborting (did you pass save_intermediate=True to the generate""" """ function?)""" ) if len(lowerCAmelCase_ ) == 1: print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" ) A__ : Tuple =total_duration / len(lowerCAmelCase_ ) A__ : Union[str, Any] =[frame_duration] * len(lowerCAmelCase_ ) if extend_frames: A__ : Any =1.5 A__ : Tuple =3 for file_name in paths: if file_name.endswith(""".png""" ): images.append(imageio.imread(lowerCAmelCase_ ) ) imageio.mimsave(lowerCAmelCase_ , lowerCAmelCase_ , duration=lowerCAmelCase_ ) print(f"gif saved to {output_path}" ) def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=None ) -> List[Any]: '''simple docstring''' if not (path or img): raise ValueError("""Input either path or tensor""" ) if img is not None: raise NotImplementedError A__ : Optional[Any] =preprocess(Image.open(lowerCAmelCase_ ) , target_image_size=2_56 ).to(self.device ) A__ : Tuple =preprocess_vqgan(lowerCAmelCase_ ) A__ , *A__ : Dict =self.vqgan.encode(lowerCAmelCase_ ) return z def lowercase__ ( self : Any , lowerCAmelCase_ : Optional[int] ) -> int: '''simple docstring''' A__ : Union[str, Any] =self.latent.detach().requires_grad_() A__ : List[str] =base_latent + transform_vector if self.quantize: A__ , *A__ : List[Any] =self.vqgan.quantize(lowerCAmelCase_ ) else: A__ : int =trans_latent return self.vqgan.decode(lowerCAmelCase_ ) def lowercase__ ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int=None ) -> Union[str, Any]: '''simple docstring''' A__ : int =self.clip_preprocessor(text=lowerCAmelCase_ , images=lowerCAmelCase_ , return_tensors="""pt""" , padding=lowerCAmelCase_ ) A__ : int =self.clip(**lowerCAmelCase_ ) A__ : List[Any] =clip_outputs.logits_per_image if weights is not None: A__ : Dict =similarity_logits * weights return similarity_logits.sum() def lowercase__ ( self : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any ) -> Tuple: '''simple docstring''' A__ : Dict =self._get_clip_similarity(pos_prompts["""prompts"""] , lowerCAmelCase_ , weights=(1 / pos_prompts["""weights"""]) ) if neg_prompts: A__ : Any =self._get_clip_similarity(neg_prompts["""prompts"""] , lowerCAmelCase_ , weights=neg_prompts["""weights"""] ) else: A__ : Optional[int] =torch.tensor([1] , device=self.device ) A__ : List[str] =-torch.log(lowerCAmelCase_ ) + torch.log(lowerCAmelCase_ ) return loss def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict ) -> Dict: '''simple docstring''' A__ : Optional[int] =torch.randn_like(self.latent , requires_grad=lowerCAmelCase_ , device=self.device ) A__ : str =torch.optim.Adam([vector] , lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() A__ : str =self._add_vector(lowerCAmelCase_ ) A__ : List[Any] =loop_post_process(lowerCAmelCase_ ) A__ : Optional[Any] =self._get_CLIP_loss(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) print("""CLIP loss""" , lowerCAmelCase_ ) if self.log: wandb.log({"""CLIP Loss""": clip_loss} ) clip_loss.backward(retain_graph=lowerCAmelCase_ ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def lowercase__ ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict ) -> List[str]: '''simple docstring''' wandb.init(reinit=lowerCAmelCase_ , project="""face-editor""" ) wandb.config.update({"""Positive Prompts""": positive_prompts} ) wandb.config.update({"""Negative Prompts""": negative_prompts} ) wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} ) if image_path: A__ : str =Image.open(lowerCAmelCase_ ) A__ : Optional[Any] =image.resize((2_56, 2_56) ) wandb.log("""Original Image""" , wandb.Image(lowerCAmelCase_ ) ) def lowercase__ ( self : List[str] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' if not prompts: return [] A__ : List[Any] =[] A__ : int =[] if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): A__ : Any =[prompt.strip() for prompt in prompts.split("""|""" )] for prompt in prompts: if isinstance(lowerCAmelCase_ , (tuple, list) ): A__ : Any =prompt[0] A__ : Tuple =float(prompt[1] ) elif ":" in prompt: A__ , A__ : Dict =prompt.split(""":""" ) A__ : Union[str, Any] =float(lowerCAmelCase_ ) else: A__ : Union[str, Any] =prompt A__ : Dict =1.0 processed_prompts.append(lowerCAmelCase_ ) weights.append(lowerCAmelCase_ ) return { "prompts": processed_prompts, "weights": torch.tensor(lowerCAmelCase_ , device=self.device ), } def lowercase__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : str=False , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=None , ) -> Optional[Any]: '''simple docstring''' if image_path: A__ : Union[str, Any] =self._get_latent(lowerCAmelCase_ ) else: A__ : str =torch.randn(self.latent_dim , device=self.device ) if self.log: self._init_logging(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) assert pos_prompts, "You must provide at least one positive prompt." A__ : List[str] =self.process_prompts(lowerCAmelCase_ ) A__ : List[str] =self.process_prompts(lowerCAmelCase_ ) if save_final and save_path is None: A__ : List[str] =os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) ) if not os.path.exists(lowerCAmelCase_ ): os.makedirs(lowerCAmelCase_ ) else: A__ : List[Any] =save_path + """_""" + get_timestamp() os.makedirs(lowerCAmelCase_ ) A__ : List[Any] =save_path A__ : Dict =self.vqgan.decode(self.latent )[0] if show_intermediate: print("""Original Image""" ) show_pil(custom_to_pil(lowerCAmelCase_ ) ) A__ : str =loop_post_process(lowerCAmelCase_ ) for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ): if show_intermediate: show_pil(lowerCAmelCase_ ) if save_intermediate: transformed_img.save(os.path.join(self.save_path , f"iter_{iter:03d}.png" ) ) if self.log: wandb.log({"""Image""": wandb.Image(lowerCAmelCase_ )} ) if show_final: show_pil(lowerCAmelCase_ ) if save_final: transformed_img.save(os.path.join(self.save_path , f"iter_{iter:03d}_final.png" ) )
136
'''simple docstring''' def __lowerCamelCase ( __snake_case : int ) -> bool: """simple docstring""" if p < 2: raise ValueError("""p should not be less than 2!""" ) elif p == 2: return True A__ : Any =4 A__ : int =(1 << p) - 1 for _ in range(p - 2 ): A__ : Dict =((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(11))
136
1
'''simple docstring''' from __future__ import annotations from functools import lru_cache from math import ceil lowercase__ = 100 lowercase__ = set(range(3, NUM_PRIMES, 2)) primes.add(2) lowercase__ = 42 for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=1_00 ) def UpperCamelCase( UpperCAmelCase_ ): if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} UpperCAmelCase : Union[str, Any] = set() UpperCAmelCase : str = 42 UpperCAmelCase : Dict = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def UpperCamelCase( UpperCAmelCase_ = 50_00 ): for number_to_partition in range(1 , __lowercase ): if len(partition(__lowercase ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(f'''{solution() = }''')
151
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class SCREAMING_SNAKE_CASE__ : '''simple docstring''' @property def A ( self : List[str] ): '''simple docstring''' return self.get_dummy_input() @property def A ( self : Any ): '''simple docstring''' if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' ) def A ( self : Union[str, Any] , lowercase : Any=True , lowercase : List[Any]=False , lowercase : List[str]=False , lowercase : Dict=False , ): '''simple docstring''' _snake_case = 4 _snake_case = 32 _snake_case = (32, 32) _snake_case = torch.manual_seed(0 ) _snake_case = torch.device(lowercase ) _snake_case = (batch_size, num_channels) + sizes _snake_case = randn_tensor(lowercase , generator=lowercase , device=lowercase ) _snake_case = {'hidden_states': hidden_states} if include_temb: _snake_case = 128 _snake_case = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase ) if include_res_hidden_states_tuple: _snake_case = torch.manual_seed(1 ) _snake_case = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),) if include_encoder_hidden_states: _snake_case = floats_tensor((batch_size, 32, 32) ).to(lowercase ) if include_skip_sample: _snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase ) return dummy_input def A ( self : Any ): '''simple docstring''' _snake_case = { 'in_channels': 32, 'out_channels': 32, 'temb_channels': 128, } if self.block_type == "up": _snake_case = 32 if self.block_type == "mid": init_dict.pop('out_channels' ) _snake_case = self.dummy_input return init_dict, inputs_dict def A ( self : Dict , lowercase : Optional[int] ): '''simple docstring''' _snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common() _snake_case = self.block_class(**lowercase ) unet_block.to(lowercase ) unet_block.eval() with torch.no_grad(): _snake_case = unet_block(**lowercase ) if isinstance(lowercase , lowercase ): _snake_case = output[0] self.assertEqual(output.shape , self.output_shape ) _snake_case = output[0, -1, -3:, -3:] _snake_case = torch.tensor(lowercase ).to(lowercase ) assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 ) @unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' ) def A ( self : Dict ): '''simple docstring''' _snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common() _snake_case = self.block_class(**lowercase ) model.to(lowercase ) model.train() _snake_case = model(**lowercase ) if isinstance(lowercase , lowercase ): _snake_case = output[0] _snake_case = torch.device(lowercase ) _snake_case = randn_tensor(output.shape , device=lowercase ) _snake_case = torch.nn.functional.mse_loss(lowercase , lowercase ) loss.backward()
282
0
import math class A__ : """simple docstring""" def __init__( self , __snake_case=0 ): # a graph with Node 0,1,...,N-1 snake_case = n snake_case = [ [math.inf for j in range(0 , __snake_case )] for i in range(0 , __snake_case ) ] # adjacency matrix for weight snake_case = [ [math.inf for j in range(0 , __snake_case )] for i in range(0 , __snake_case ) ] # dp[i][j] stores minimum distance from i to j def a_ ( self , __snake_case , __snake_case , __snake_case ): snake_case = w def a_ ( self ): for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): snake_case = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def a_ ( self , __snake_case , __snake_case ): return self.dp[u][v] if __name__ == "__main__": _SCREAMING_SNAKE_CASE : Any = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
367
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _SCREAMING_SNAKE_CASE : Dict = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Dict = [ "VAN_PRETRAINED_MODEL_ARCHIVE_LIST", "VanForImageClassification", "VanModel", "VanPreTrainedModel", ] if TYPE_CHECKING: from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_van import ( VAN_PRETRAINED_MODEL_ARCHIVE_LIST, VanForImageClassification, VanModel, VanPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
213
0
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
133
import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate lowercase_ : Optional[Any] = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow('', '|', '|'), datarow=DataRow('', '|', '|'), padding=1, with_header_hide=None, ) lowercase_ : str = [] lowercase_ : int = [] lowercase_ : Dict = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}} lowercase_ : int = [ { 'type': 'header', 'text': { 'type': 'plain_text', 'text': f"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""", 'emoji': True, }, } ] lowercase_ : int = 0 for log in Path().glob('*.log'): lowercase_ : int = 0 with open(log, 'r') as f: for line in f: lowercase_ : List[str] = json.loads(line) if line.get('nodeid', '') != "": lowercase_ : List[str] = line['nodeid'] if line.get('duration', None) is not None: lowercase_ : Tuple = f"""{line["duration"]:.4f}""" if line.get('outcome', '') == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split('_')[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) lowercase_ : List[Any] = [] log.unlink() lowercase_ : int = '' lowercase_ : int = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += f"*{name[1:]}: {num_failed} failed test*\n" else: message += f"*{name[1:]}: {num_failed} failed tests*\n" lowercase_ : Optional[Any] = [] lowercase_ : Any = {} for test in failed_tests: lowercase_ : List[str] = test[0].split('::') lowercase_ : int = data[0].split('/')[-1] if data[0] not in filesafailed: lowercase_ : Dict = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) lowercase_ : Any = [test[0] for test in failed_table] lowercase_ : Optional[Any] = list(set(files)) # Count number of instances in failed_tests lowercase_ : Optional[Any] = [] for file in individual_files: table.append([file, len(filesafailed[file])]) lowercase_ : Optional[Any] = tabulate( table, headers=['Test Location', 'Num Failed'], tablefmt=hf_table_format, stralign='right', ) message += f"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 30_00: lowercase_ : List[Any] = 'Too many failed tests, please see the full report in the Action results.' lowercase_ : Union[str, Any] = len(err) + 10 lowercase_ : Tuple = message[: 30_00 - offset] + f"""\n...\n```\n{err}""" print(f"""### {message}""") else: lowercase_ : int = 'No failed tests! 🤗' print(f"""## {message}""") payload.append(no_error_payload) if os.environ.get('TEST_TYPE', '') != "": from slack_sdk import WebClient lowercase_ : Union[str, Any] = WebClient(token=os.environ['SLACK_API_TOKEN']) if message != "No failed tests! 🤗": lowercase_ : List[Any] = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': message, }, } payload.append(md_report) lowercase_ : List[Any] = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': '*For more details:*', }, 'accessory': { 'type': 'button', 'text': { 'type': 'plain_text', 'text': 'Check Action results', 'emoji': True, }, 'url': f"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""", }, } payload.append(action_button) lowercase_ : List[str] = { 'type': 'context', 'elements': [ { 'type': 'plain_text', 'text': f"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""", } ], } payload.append(date_report) lowercase_ : Any = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload) lowercase_ : Any = response.data['ts'] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name lowercase_ : Optional[int] = '' for i, row in enumerate(test_failures): if row[0] != test_class: lowercase_ : Tuple = row[0] else: lowercase_ : Tuple = '' lowercase_ : int = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""", }, } client.chat_postMessage( channel='#accelerate-ci-daily', thread_ts=ts, blocks=[payload], )
133
1
"""simple docstring""" from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split snake_case_ = datasets.load_iris() snake_case_ = np.array(data["""data"""]) snake_case_ = np.array(data["""target"""]) snake_case_ = data["""target_names"""] snake_case_ = train_test_split(X, y) def _lowerCAmelCase ( lowercase_ , lowercase_ ): return np.linalg.norm(np.array(lowercase_ ) - np.array(lowercase_ ) ) def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=5 ): UpperCAmelCase = zip(lowercase_ , lowercase_ ) # List of distances of all points from the point to be classified UpperCAmelCase = [] for data_point in data: UpperCAmelCase = euclidean_distance(data_point[0] , lowercase_ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. UpperCAmelCase = [i[1] for i in sorted(lowercase_ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified UpperCAmelCase = Counter(lowercase_ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
359
"""simple docstring""" from collections import deque class A_ : """simple docstring""" def __init__( self :Any , lowercase_ :str , lowercase_ :int , lowercase_ :int ) -> None: UpperCAmelCase = process_name # process name UpperCAmelCase = arrival_time # arrival time of the process # completion time of finished process or last interrupted time UpperCAmelCase = arrival_time UpperCAmelCase = burst_time # remaining burst time UpperCAmelCase = 0 # total time of the process wait in ready queue UpperCAmelCase = 0 # time from arrival time to completion time class A_ : """simple docstring""" def __init__( self :Any , lowercase_ :int , lowercase_ :list[int] , lowercase_ :deque[Process] , lowercase_ :int , ) -> None: # total number of mlfq's queues UpperCAmelCase = number_of_queues # time slice of queues that round robin algorithm applied UpperCAmelCase = time_slices # unfinished process is in this ready_queue UpperCAmelCase = queue # current time UpperCAmelCase = current_time # finished process is in this sequence queue UpperCAmelCase = deque() def UpperCAmelCase__ ( self :Optional[int] ) -> list[str]: UpperCAmelCase = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def UpperCAmelCase__ ( self :List[str] , lowercase_ :list[Process] ) -> list[int]: UpperCAmelCase = [] for i in range(len(lowercase_ ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def UpperCAmelCase__ ( self :List[str] , lowercase_ :list[Process] ) -> list[int]: UpperCAmelCase = [] for i in range(len(lowercase_ ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def UpperCAmelCase__ ( self :Dict , lowercase_ :list[Process] ) -> list[int]: UpperCAmelCase = [] for i in range(len(lowercase_ ) ): completion_times.append(queue[i].stop_time ) return completion_times def UpperCAmelCase__ ( self :str , lowercase_ :deque[Process] ) -> list[int]: return [q.burst_time for q in queue] def UpperCAmelCase__ ( self :int , lowercase_ :Process ) -> int: process.waiting_time += self.current_time - process.stop_time return process.waiting_time def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :deque[Process] ) -> deque[Process]: UpperCAmelCase = deque() # sequence deque of finished process while len(lowercase_ ) != 0: UpperCAmelCase = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(lowercase_ ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 UpperCAmelCase = 0 # set the process's turnaround time because it is finished UpperCAmelCase = self.current_time - cp.arrival_time # set the completion time UpperCAmelCase = self.current_time # add the process to queue that has finished queue finished.append(lowercase_ ) self.finish_queue.extend(lowercase_ ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def UpperCAmelCase__ ( self :Tuple , lowercase_ :deque[Process] , lowercase_ :int ) -> tuple[deque[Process], deque[Process]]: UpperCAmelCase = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(lowercase_ ) ): UpperCAmelCase = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(lowercase_ ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time UpperCAmelCase = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(lowercase_ ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished UpperCAmelCase = 0 # set the finish time UpperCAmelCase = self.current_time # update the process' turnaround time because it is finished UpperCAmelCase = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(lowercase_ ) self.finish_queue.extend(lowercase_ ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def UpperCAmelCase__ ( self :Optional[Any] ) -> deque[Process]: # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): UpperCAmelCase , UpperCAmelCase = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest snake_case_ = Process("""P1""", 0, 53) snake_case_ = Process("""P2""", 0, 17) snake_case_ = Process("""P3""", 0, 68) snake_case_ = Process("""P4""", 0, 24) snake_case_ = 3 snake_case_ = [17, 25] snake_case_ = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])}) snake_case_ = Process("""P1""", 0, 53) snake_case_ = Process("""P2""", 0, 17) snake_case_ = Process("""P3""", 0, 68) snake_case_ = Process("""P4""", 0, 24) snake_case_ = 3 snake_case_ = [17, 25] snake_case_ = deque([Pa, Pa, Pa, Pa]) snake_case_ = MLFQ(number_of_queues, time_slices, queue, 0) snake_case_ = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( f'''waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print completion times of processes(P1, P2, P3, P4) print( f'''completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print total turnaround times of processes(P1, P2, P3, P4) print( f'''turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print sequence of finished processes print( f'''sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}''' )
181
0
def lowerCAmelCase__ ( a__: Optional[int] ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = 1 _UpperCAmelCase = 2 while i * i <= n: _UpperCAmelCase = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def lowerCAmelCase__ ( ) -> Dict: '''simple docstring''' _UpperCAmelCase = 1 _UpperCAmelCase = 1 while True: i += 1 t_num += i if count_divisors(UpperCamelCase_ ) > 5_0_0: break return t_num if __name__ == "__main__": print(solution())
329
import os import unittest from transformers import LxmertTokenizer, LxmertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase__ ( _UpperCAmelCase , unittest.TestCase ): A__ : Dict =LxmertTokenizer A__ : List[Any] =LxmertTokenizerFast A__ : Any =True A__ : List[Any] =True def A_ ( self : Optional[Any] ): super().setUp() SCREAMING_SNAKE_CASE__ = [ '[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def A_ ( self : int , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE__ = 'UNwant\u00E9d,running' SCREAMING_SNAKE_CASE__ = 'unwanted, running' return input_text, output_text def A_ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file ) SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(UpperCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [7, 4, 5, 10, 8, 9] ) def A_ ( self : List[str] ): if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE__ = self.get_tokenizer() SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.' SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
176
0
import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class _snake_case : def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=128 , a=32 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> List[str]: SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = seq_length SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_input_mask SCREAMING_SNAKE_CASE = use_token_type_ids SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = type_vocab_size SCREAMING_SNAKE_CASE = type_sequence_label_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = num_choices SCREAMING_SNAKE_CASE = scope def SCREAMING_SNAKE_CASE__ ( self) -> str: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE = None if self.use_input_mask: SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length]) SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices) SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE__ ( self) -> Any: return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: ( SCREAMING_SNAKE_CASE ) = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Dict: SCREAMING_SNAKE_CASE = NezhaModel(config=__A) model.to(__A) model.eval() SCREAMING_SNAKE_CASE = model(__A , attention_mask=__A , token_type_ids=__A) SCREAMING_SNAKE_CASE = model(__A , token_type_ids=__A) SCREAMING_SNAKE_CASE = model(__A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> List[Any]: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = NezhaModel(__A) model.to(__A) model.eval() SCREAMING_SNAKE_CASE = model( __A , attention_mask=__A , token_type_ids=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , ) SCREAMING_SNAKE_CASE = model( __A , attention_mask=__A , token_type_ids=__A , encoder_hidden_states=__A , ) SCREAMING_SNAKE_CASE = model(__A , attention_mask=__A , token_type_ids=__A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> List[Any]: SCREAMING_SNAKE_CASE = NezhaForMaskedLM(config=__A) model.to(__A) model.eval() SCREAMING_SNAKE_CASE = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Optional[int]: SCREAMING_SNAKE_CASE = NezhaForNextSentencePrediction(config=__A) model.to(__A) model.eval() SCREAMING_SNAKE_CASE = model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> str: SCREAMING_SNAKE_CASE = NezhaForPreTraining(config=__A) model.to(__A) model.eval() SCREAMING_SNAKE_CASE = model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , next_sentence_label=__A , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Dict: SCREAMING_SNAKE_CASE = NezhaForQuestionAnswering(config=__A) model.to(__A) model.eval() SCREAMING_SNAKE_CASE = model( __A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Optional[int]: SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = NezhaForSequenceClassification(__A) model.to(__A) model.eval() SCREAMING_SNAKE_CASE = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Optional[int]: SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = NezhaForTokenClassification(config=__A) model.to(__A) model.eval() SCREAMING_SNAKE_CASE = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Optional[Any]: SCREAMING_SNAKE_CASE = self.num_choices SCREAMING_SNAKE_CASE = NezhaForMultipleChoice(config=__A) model.to(__A) model.eval() SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() SCREAMING_SNAKE_CASE = model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( SCREAMING_SNAKE_CASE ) = config_and_inputs SCREAMING_SNAKE_CASE = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _snake_case ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): _lowercase : Optional[int] = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) _lowercase : str = ( { '''feature-extraction''': NezhaModel, '''fill-mask''': NezhaForMaskedLM, '''question-answering''': NezhaForQuestionAnswering, '''text-classification''': NezhaForSequenceClassification, '''token-classification''': NezhaForTokenClassification, '''zero-shot''': NezhaForSequenceClassification, } if is_torch_available() else {} ) _lowercase : Union[str, Any] = True def SCREAMING_SNAKE_CASE__ ( self , a , a , a=False) -> str: SCREAMING_SNAKE_CASE = super()._prepare_for_class(__A , __A , return_labels=__A) if return_labels: if model_class in get_values(__A): SCREAMING_SNAKE_CASE = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A) SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A) return inputs_dict def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = NezhaModelTester(self) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__A , hidden_size=37) def SCREAMING_SNAKE_CASE__ ( self) -> Dict: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A) def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__A) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: # This regression test was failing with PyTorch < 1.3 ( SCREAMING_SNAKE_CASE ) = self.model_tester.prepare_config_and_inputs_for_decoder() SCREAMING_SNAKE_CASE = None self.model_tester.create_and_check_model_as_decoder( __A , __A , __A , __A , __A , __A , __A , __A , __A , ) def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__A) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__A) def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*__A) def SCREAMING_SNAKE_CASE__ ( self) -> List[str]: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__A) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__A) def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__A) def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__A) @slow def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained(__A) self.assertIsNotNone(__A) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = model_class(config=__A) SCREAMING_SNAKE_CASE = self._prepare_for_class(__A , __A) SCREAMING_SNAKE_CASE = torch.jit.trace( __A , (inputs_dict['input_ids'].to('cpu'), inputs_dict['attention_mask'].to('cpu'))) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__A , os.path.join(__A , 'bert.pt')) SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(__A , 'bert.pt') , map_location=__A) loaded(inputs_dict['input_ids'].to(__A) , inputs_dict['attention_mask'].to(__A)) @require_torch class _snake_case ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained('sijunhe/nezha-cn-base') SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]]) SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1]]) with torch.no_grad(): SCREAMING_SNAKE_CASE = model(__A , attention_mask=__A)[0] SCREAMING_SNAKE_CASE = torch.Size((1, 6, 768)) self.assertEqual(output.shape , __A) SCREAMING_SNAKE_CASE = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4)) @slow def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]: SCREAMING_SNAKE_CASE = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base') SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]]) SCREAMING_SNAKE_CASE = torch.tensor([[1, 1, 1, 1, 1, 1]]) with torch.no_grad(): SCREAMING_SNAKE_CASE = model(__A , attention_mask=__A)[0] SCREAMING_SNAKE_CASE = torch.Size((1, 6, 2_1128)) self.assertEqual(output.shape , __A) SCREAMING_SNAKE_CASE = torch.tensor( [[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4))
361
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _snake_case ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self) -> int: SCREAMING_SNAKE_CASE = TFCamembertModel.from_pretrained('jplu/tf-camembert-base') SCREAMING_SNAKE_CASE = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" SCREAMING_SNAKE_CASE = model(a)['last_hidden_state'] SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10, 768)) self.assertEqual(output.shape , a) # compare the actual values for a slice. SCREAMING_SNAKE_CASE = tf.convert_to_tensor( [[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
327
0
"""simple docstring""" import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _UpperCAmelCase ( unittest.TestCase ): def a ( self : int ): super().tearDown() gc.collect() def a ( self : Tuple ): __UpperCAmelCase , __UpperCAmelCase = FlaxControlNetModel.from_pretrained( '''lllyasviel/sd-controlnet-canny''' , from_pt=UpperCamelCase__ , dtype=jnp.bfloataa ) __UpperCAmelCase , __UpperCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , controlnet=UpperCamelCase__ , from_pt=UpperCamelCase__ , dtype=jnp.bfloataa ) __UpperCAmelCase = controlnet_params __UpperCAmelCase = '''bird''' __UpperCAmelCase = jax.device_count() __UpperCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples ) __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ) __UpperCAmelCase = pipe.prepare_image_inputs([canny_image] * num_samples ) __UpperCAmelCase = jax.random.PRNGKey(0 ) __UpperCAmelCase = jax.random.split(UpperCamelCase__ , jax.device_count() ) __UpperCAmelCase = replicate(UpperCamelCase__ ) __UpperCAmelCase = shard(UpperCamelCase__ ) __UpperCAmelCase = shard(UpperCamelCase__ ) __UpperCAmelCase = pipe( prompt_ids=UpperCamelCase__ , image=UpperCamelCase__ , params=UpperCamelCase__ , prng_seed=UpperCamelCase__ , num_inference_steps=50 , jit=UpperCamelCase__ , ).images assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3) __UpperCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __UpperCAmelCase = images[0, 2_53:2_56, 2_53:2_56, -1] __UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __UpperCAmelCase = jnp.array( [0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def a ( self : List[str] ): __UpperCAmelCase , __UpperCAmelCase = FlaxControlNetModel.from_pretrained( '''lllyasviel/sd-controlnet-openpose''' , from_pt=UpperCamelCase__ , dtype=jnp.bfloataa ) __UpperCAmelCase , __UpperCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , controlnet=UpperCamelCase__ , from_pt=UpperCamelCase__ , dtype=jnp.bfloataa ) __UpperCAmelCase = controlnet_params __UpperCAmelCase = '''Chef in the kitchen''' __UpperCAmelCase = jax.device_count() __UpperCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples ) __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' ) __UpperCAmelCase = pipe.prepare_image_inputs([pose_image] * num_samples ) __UpperCAmelCase = jax.random.PRNGKey(0 ) __UpperCAmelCase = jax.random.split(UpperCamelCase__ , jax.device_count() ) __UpperCAmelCase = replicate(UpperCamelCase__ ) __UpperCAmelCase = shard(UpperCamelCase__ ) __UpperCAmelCase = shard(UpperCamelCase__ ) __UpperCAmelCase = pipe( prompt_ids=UpperCamelCase__ , image=UpperCamelCase__ , params=UpperCamelCase__ , prng_seed=UpperCamelCase__ , num_inference_steps=50 , jit=UpperCamelCase__ , ).images assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3) __UpperCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __UpperCAmelCase = images[0, 2_53:2_56, 2_53:2_56, -1] __UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __UpperCAmelCase = jnp.array( [[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
332
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __UpperCamelCase ( _A ): lowerCAmelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2] lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] lowerCAmelCase_ = [5, 5, 5, 5] elif "fl4" in model_name: lowerCAmelCase_ = [4, 4, 4, 4] lowerCAmelCase_ = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] if "lrf" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] else: lowerCAmelCase_ = [2, 2, 2, 2] if "tiny" in model_name: lowerCAmelCase_ = 96 elif "small" in model_name: lowerCAmelCase_ = 96 elif "base" in model_name: lowerCAmelCase_ = 128 elif "large" in model_name: lowerCAmelCase_ = 192 elif "xlarge" in model_name: lowerCAmelCase_ = 256 elif "huge" in model_name: lowerCAmelCase_ = 352 # set label information lowerCAmelCase_ = '''huggingface/label-files''' if "large" in model_name or "huge" in model_name: lowerCAmelCase_ = '''imagenet-22k-id2label.json''' else: lowerCAmelCase_ = '''imagenet-1k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = {v: k for k, v in idalabel.items()} lowerCAmelCase_ = FocalNetConfig( embed_dim=_A , depths=_A , focal_levels=_A , focal_windows=_A , use_conv_embed=_A , idalabel=_A , labelaid=_A , use_post_layernorm=_A , use_layerscale=_A , ) return config def __UpperCamelCase ( _A ): if "patch_embed.proj" in name: lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowerCAmelCase_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: lowerCAmelCase_ = '''encoder.''' + name if "encoder.layers" in name: lowerCAmelCase_ = name.replace('''encoder.layers''' , '''encoder.stages''' ) if "downsample.proj" in name: lowerCAmelCase_ = name.replace('''downsample.proj''' , '''downsample.projection''' ) if "blocks" in name: lowerCAmelCase_ = name.replace('''blocks''' , '''layers''' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: lowerCAmelCase_ = name.replace('''modulation.f''' , '''modulation.projection_in''' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: lowerCAmelCase_ = name.replace('''modulation.h''' , '''modulation.projection_context''' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: lowerCAmelCase_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' ) if name == "norm.weight": lowerCAmelCase_ = '''layernorm.weight''' if name == "norm.bias": lowerCAmelCase_ = '''layernorm.bias''' if "head" in name: lowerCAmelCase_ = name.replace('''head''' , '''classifier''' ) else: lowerCAmelCase_ = '''focalnet.''' + name return name def __UpperCamelCase ( _A , _A , _A=False ): # fmt: off lowerCAmelCase_ = { '''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''', '''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''', '''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''', '''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''', '''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''', '''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''', '''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''', '''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''', '''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''', '''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''', } # fmt: on lowerCAmelCase_ = model_name_to_url[model_name] print('''Checkpoint URL: ''' , _A ) lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )['''model'''] # rename keys for key in state_dict.copy().keys(): lowerCAmelCase_ = state_dict.pop(_A ) lowerCAmelCase_ = val lowerCAmelCase_ = get_focalnet_config(_A ) lowerCAmelCase_ = FocalNetForImageClassification(_A ) model.eval() # load state dict model.load_state_dict(_A ) # verify conversion lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = BitImageProcessor( do_resize=_A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_A , crop_size=224 , do_normalize=_A , image_mean=_A , image_std=_A , ) lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' ) lowerCAmelCase_ = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) lowerCAmelCase_ = image_transforms(_A ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , _A , atol=1E-4 ) lowerCAmelCase_ = model(**_A ) lowerCAmelCase_ = outputs.logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) print('''First values of logits:''' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": lowerCAmelCase_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ) elif model_name == "focalnet-tiny-lrf": lowerCAmelCase_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] ) elif model_name == "focalnet-small": lowerCAmelCase_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] ) elif model_name == "focalnet-small-lrf": lowerCAmelCase_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] ) elif model_name == "focalnet-base": lowerCAmelCase_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] ) elif model_name == "focalnet-base-lrf": lowerCAmelCase_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] ) assert torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) processor.save_pretrained(_A ) if push_to_hub: print(f"Pushing model and processor of {model_name} to the hub..." ) model.push_to_hub(f"{model_name}" ) processor.push_to_hub(f"{model_name}" ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) _A = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
278
0
import importlib import inspect import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __UpperCAmelCase = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. __UpperCAmelCase = importlib.util.spec_from_file_location( "transformers", os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) __UpperCAmelCase = spec.loader.load_module() __UpperCAmelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __UpperCAmelCase = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)") __UpperCAmelCase = { "CLIPConfigMixin", "DecisionTransformerConfigMixin", "EncoderDecoderConfigMixin", "RagConfigMixin", "SpeechEncoderDecoderConfigMixin", "VisionEncoderDecoderConfigMixin", "VisionTextDualEncoderConfigMixin", } def A__ ( ): SCREAMING_SNAKE_CASE_ = [] for config_class in list(CONFIG_MAPPING.values() ): SCREAMING_SNAKE_CASE_ = False # source code of `config_class` SCREAMING_SNAKE_CASE_ = inspect.getsource(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ = _re_checkpoint.findall(__lowerCamelCase ) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = checkpoint # verify the checkpoint name corresponds to the checkpoint link SCREAMING_SNAKE_CASE_ = F'''https://huggingface.co/{ckpt_name}''' if ckpt_link == ckpt_link_from_name: SCREAMING_SNAKE_CASE_ = True break SCREAMING_SNAKE_CASE_ = config_class.__name__ if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(__lowerCamelCase ) if len(__lowerCamelCase ) > 0: SCREAMING_SNAKE_CASE_ = '''\n'''.join(sorted(__lowerCamelCase ) ) raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
257
from __future__ import annotations __UpperCAmelCase = 10 def A__ ( __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = max(__lowerCamelCase ) while placement <= max_digit: # declare and initialize empty buckets SCREAMING_SNAKE_CASE_ = [[] for _ in range(__lowerCamelCase )] # split list_of_ints between the buckets for i in list_of_ints: SCREAMING_SNAKE_CASE_ = int((i / placement) % RADIX ) buckets[tmp].append(__lowerCamelCase ) # put each buckets' contents into list_of_ints SCREAMING_SNAKE_CASE_ = 0 for b in range(__lowerCamelCase ): for i in buckets[b]: SCREAMING_SNAKE_CASE_ = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
257
1
"""simple docstring""" from __future__ import annotations from typing import Any def __lowerCAmelCase (_UpperCamelCase ): create_state_space_tree(_UpperCamelCase , [] , 0 ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if index == len(_UpperCamelCase ): print(_UpperCamelCase ) return create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 ) current_subsequence.pop() if __name__ == "__main__": lowerCamelCase__ = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(["""A""", """B""", """C"""]) generate_all_subsequences(seq)
86
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def SCREAMING_SNAKE_CASE_ ( ) -> Any: """simple docstring""" a_ : Optional[Any] = HfArgumentParser(__A ) a_ : Optional[int] = parser.parse_args_into_dataclasses()[0] a_ : List[Any] = TensorFlowBenchmark(args=__A ) try: a_ : List[str] = parser.parse_args_into_dataclasses()[0] except ValueError as e: a_ : Dict = 'Arg --no_{0} is no longer used, please use --no-{0} instead.' a_ : Dict = ' '.join(str(__A ).split(' ' )[:-1] ) a_ : int = '' a_ : int = eval(str(__A ).split(' ' )[-1] ) a_ : Any = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__A ) if len(__A ) > 0: a_ : str = full_error_msg + begin_error_msg + str(__A ) raise ValueError(__A ) benchmark.run() if __name__ == "__main__": main()
32
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Any ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def a (self : int ): """simple docstring""" __snake_case = 1 __snake_case = 3 __snake_case = (32, 32) __snake_case = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowercase ) return image @property def a (self : List[str] ): """simple docstring""" torch.manual_seed(0 ) __snake_case = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__lowercase , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def a (self : List[str] ): """simple docstring""" torch.manual_seed(0 ) __snake_case = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def a (self : List[str] ): """simple docstring""" torch.manual_seed(0 ) __snake_case = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , ) return CLIPTextModel(__lowercase ) def a (self : List[Any] ): """simple docstring""" __snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator __snake_case = self.dummy_cond_unet_upscale __snake_case = DDPMScheduler() __snake_case = DDIMScheduler(prediction_type='''v_prediction''' ) __snake_case = self.dummy_vae __snake_case = self.dummy_text_encoder __snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __snake_case = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] __snake_case = Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((64, 64) ) # make sure here that pndm scheduler skips prk __snake_case = StableDiffusionUpscalePipeline( unet=__lowercase , low_res_scheduler=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , max_noise_level=350 , ) __snake_case = sd_pipe.to(__lowercase ) sd_pipe.set_progress_bar_config(disable=__lowercase ) __snake_case = '''A painting of a squirrel eating a burger''' __snake_case = torch.Generator(device=__lowercase ).manual_seed(0 ) __snake_case = sd_pipe( [prompt] , image=__lowercase , generator=__lowercase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) __snake_case = output.images __snake_case = torch.Generator(device=__lowercase ).manual_seed(0 ) __snake_case = sd_pipe( [prompt] , image=__lowercase , generator=__lowercase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=__lowercase , )[0] __snake_case = image[0, -3:, -3:, -1] __snake_case = image_from_tuple[0, -3:, -3:, -1] __snake_case = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) __snake_case = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def a (self : Optional[int] ): """simple docstring""" __snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator __snake_case = self.dummy_cond_unet_upscale __snake_case = DDPMScheduler() __snake_case = DDIMScheduler(prediction_type='''v_prediction''' ) __snake_case = self.dummy_vae __snake_case = self.dummy_text_encoder __snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __snake_case = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] __snake_case = Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((64, 64) ) # make sure here that pndm scheduler skips prk __snake_case = StableDiffusionUpscalePipeline( unet=__lowercase , low_res_scheduler=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , max_noise_level=350 , ) __snake_case = sd_pipe.to(__lowercase ) sd_pipe.set_progress_bar_config(disable=__lowercase ) __snake_case = '''A painting of a squirrel eating a burger''' __snake_case = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) __snake_case = output.images assert image.shape[0] == 2 __snake_case = torch.Generator(device=__lowercase ).manual_seed(0 ) __snake_case = sd_pipe( [prompt] , image=__lowercase , generator=__lowercase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) __snake_case = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def a (self : Union[str, Any] ): """simple docstring""" __snake_case = self.dummy_cond_unet_upscale __snake_case = DDPMScheduler() __snake_case = DDIMScheduler(prediction_type='''v_prediction''' ) __snake_case = self.dummy_vae __snake_case = self.dummy_text_encoder __snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __snake_case = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] __snake_case = Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 __snake_case = unet.half() __snake_case = text_encoder.half() # make sure here that pndm scheduler skips prk __snake_case = StableDiffusionUpscalePipeline( unet=__lowercase , low_res_scheduler=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , max_noise_level=350 , ) __snake_case = sd_pipe.to(__lowercase ) sd_pipe.set_progress_bar_config(disable=__lowercase ) __snake_case = '''A painting of a squirrel eating a burger''' __snake_case = torch.manual_seed(0 ) __snake_case = sd_pipe( [prompt] , image=__lowercase , generator=__lowercase , num_inference_steps=2 , output_type='''np''' , ).images __snake_case = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Union[str, Any] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a (self : Union[str, Any] ): """simple docstring""" __snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''' ) __snake_case = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale''' '''/upsampled_cat.npy''' ) __snake_case = '''stabilityai/stable-diffusion-x4-upscaler''' __snake_case = StableDiffusionUpscalePipeline.from_pretrained(__lowercase ) pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) pipe.enable_attention_slicing() __snake_case = '''a cat sitting on a park bench''' __snake_case = torch.manual_seed(0 ) __snake_case = pipe( prompt=__lowercase , image=__lowercase , generator=__lowercase , output_type='''np''' , ) __snake_case = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-3 def a (self : Any ): """simple docstring""" __snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''' ) __snake_case = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale''' '''/upsampled_cat_fp16.npy''' ) __snake_case = '''stabilityai/stable-diffusion-x4-upscaler''' __snake_case = StableDiffusionUpscalePipeline.from_pretrained( __lowercase , torch_dtype=torch.floataa , ) pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) pipe.enable_attention_slicing() __snake_case = '''a cat sitting on a park bench''' __snake_case = torch.manual_seed(0 ) __snake_case = pipe( prompt=__lowercase , image=__lowercase , generator=__lowercase , output_type='''np''' , ) __snake_case = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def a (self : List[Any] ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''' ) __snake_case = '''stabilityai/stable-diffusion-x4-upscaler''' __snake_case = StableDiffusionUpscalePipeline.from_pretrained( __lowercase , torch_dtype=torch.floataa , ) pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __snake_case = '''a cat sitting on a park bench''' __snake_case = torch.manual_seed(0 ) __snake_case = pipe( prompt=__lowercase , image=__lowercase , generator=__lowercase , num_inference_steps=5 , output_type='''np''' , ) __snake_case = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
367
def lowerCamelCase__ ( snake_case_ : int = 1000 ) -> int: __snake_case = 2**power __snake_case = str(snake_case_ ) __snake_case = list(snake_case_ ) __snake_case = 0 for i in list_num: sum_of_num += int(snake_case_ ) return sum_of_num if __name__ == "__main__": snake_case_ = int(input('Enter the power of 2: ').strip()) print('2 ^ ', power, ' = ', 2**power) snake_case_ = solution(power) print('Sum of the digits is: ', result)
238
0
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : list ): _enforce_args(__UpperCamelCase ,__UpperCamelCase ) if n == 0: return 0 lowerCAmelCase_ : Any = float('''-inf''' ) for i in range(1 ,n + 1 ): lowerCAmelCase_ : int = max( __UpperCamelCase ,prices[i - 1] + naive_cut_rod_recursive(n - i ,__UpperCamelCase ) ) return max_revue def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : list ): _enforce_args(__UpperCamelCase ,__UpperCamelCase ) lowerCAmelCase_ : Optional[Any] = [float('''-inf''' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : list ,__UpperCamelCase : list ): if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: lowerCAmelCase_ : Union[str, Any] = float('''-inf''' ) for i in range(1 ,n + 1 ): lowerCAmelCase_ : List[Any] = max( __UpperCamelCase ,prices[i - 1] + _top_down_cut_rod_recursive(n - i ,__UpperCamelCase ,__UpperCamelCase ) ,) lowerCAmelCase_ : Tuple = max_revenue return max_rev[n] def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : list ): _enforce_args(__UpperCamelCase ,__UpperCamelCase ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. lowerCAmelCase_ : str = [float('''-inf''' ) for _ in range(n + 1 )] lowerCAmelCase_ : Dict = 0 for i in range(1 ,n + 1 ): lowerCAmelCase_ : List[Any] = max_rev[i] for j in range(1 ,i + 1 ): lowerCAmelCase_ : Optional[int] = max(__UpperCamelCase ,prices[j - 1] + max_rev[i - j] ) lowerCAmelCase_ : Optional[int] = max_revenue_i return max_rev[n] def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : list ): if n < 0: lowerCAmelCase_ : int = f"""n must be greater than or equal to 0. Got n = {n}""" raise ValueError(__UpperCamelCase ) if n > len(__UpperCamelCase ): lowerCAmelCase_ : Union[str, Any] = ( '''Each integral piece of rod must have a corresponding price. ''' f"""Got n = {n} but length of prices = {len(__UpperCamelCase )}""" ) raise ValueError(__UpperCamelCase ) def UpperCamelCase( ): lowerCAmelCase_ : Union[str, Any] = [6, 10, 12, 15, 20, 23] lowerCAmelCase_ : int = len(__UpperCamelCase ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. lowerCAmelCase_ : List[str] = 36 lowerCAmelCase_ : Optional[int] = top_down_cut_rod(__UpperCamelCase ,__UpperCamelCase ) lowerCAmelCase_ : Optional[int] = bottom_up_cut_rod(__UpperCamelCase ,__UpperCamelCase ) lowerCAmelCase_ : Optional[Any] = naive_cut_rod_recursive(__UpperCamelCase ,__UpperCamelCase ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
103
from pathlib import Path import fire def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : int ): lowerCAmelCase_ : List[str] = Path(__UpperCamelCase ) lowerCAmelCase_ : Union[str, Any] = Path(__UpperCamelCase ) dest_dir.mkdir(exist_ok=__UpperCamelCase ) for path in src_dir.iterdir(): lowerCAmelCase_ : Optional[Any] = [x.rstrip() for x in list(path.open().readlines() )][:n] lowerCAmelCase_ : List[str] = dest_dir.joinpath(path.name ) print(__UpperCamelCase ) dest_path.open('''w''' ).write('''\n'''.join(__UpperCamelCase ) ) if __name__ == "__main__": fire.Fire(minify)
103
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :List[Any] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> Optional[Any]: """simple docstring""" for attribute in key.split("." ): __A = getattr(a_ , a_ ) if weight_type is not None: __A = getattr(a_ , a_ ).shape else: __A = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": __A = value elif weight_type == "weight_g": __A = value elif weight_type == "weight_v": __A = value elif weight_type == "bias": __A = value else: __A = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]: """simple docstring""" __A = [] __A = fairseq_model.state_dict() __A = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __A = False if "conv_layers" in name: load_conv_layer( a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == "group" , ) __A = True else: for key, mapped_key in MAPPING.items(): __A = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned): __A = True if "*" in mapped_key: __A = name.split(a_ )[0].split("." )[-2] __A = mapped_key.replace("*" , a_ ) if "weight_g" in name: __A = "weight_g" elif "weight_v" in name: __A = "weight_v" elif "weight" in name: __A = "weight" elif "bias" in name: __A = "bias" else: __A = None set_recursively(a_ , a_ , a_ , a_ , a_ ) continue if not is_used: unused_weights.append(a_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> int: """simple docstring""" __A = full_name.split("conv_layers." )[-1] __A = name.split("." ) __A = int(items[0] ) __A = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __A = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __A = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(a_ ) @torch.no_grad() def UpperCAmelCase ( a_ , a_ , a_=None , a_=None , a_=True ) -> Union[str, Any]: """simple docstring""" if config_path is not None: __A = HubertConfig.from_pretrained(a_ ) else: __A = HubertConfig() if is_finetuned: if dict_path: __A = Dictionary.load(a_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __A = target_dict.pad_index __A = target_dict.bos_index __A = target_dict.eos_index __A = len(target_dict.symbols ) __A = os.path.join(a_ , "vocab.json" ) if not os.path.isdir(a_ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(a_ ) ) return os.makedirs(a_ , exist_ok=a_ ) with open(a_ , "w" , encoding="utf-8" ) as vocab_handle: json.dump(target_dict.indices , a_ ) __A = WavaVecaCTCTokenizer( a_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=a_ , ) __A = True if config.feat_extract_norm == "layer" else False __A = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=a_ , return_attention_mask=a_ , ) __A = WavaVecaProcessor(feature_extractor=a_ , tokenizer=a_ ) processor.save_pretrained(a_ ) __A = HubertForCTC(a_ ) else: __A = HubertModel(a_ ) if is_finetuned: __A , __A , __A = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: __A , __A , __A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __A = model[0].eval() recursively_load_weights(a_ , a_ , a_ ) hf_wavavec.save_pretrained(a_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :Optional[int] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action="https://huggingface.co/datasets/infinityofspace/python_codestyles-mixed1-500/viewer/default/store_true", help='Whether the model to convert is a fine-tuned model or not' ) SCREAMING_SNAKE_CASE :str = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
359
import copy import re class UpperCAmelCase : '''simple docstring''' snake_case_ = "hp" snake_case_ = {} snake_case_ = None @classmethod def UpperCamelCase_ ( cls : Dict ,A : Dict ,A : Any ): __A = prefix __A = defaults cls.build_naming_info() @staticmethod def UpperCamelCase_ ( A : Dict ,A : int ): if len(A ) == 0: return "" __A = None if any(char.isdigit() for char in word ): raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 ,len(A ) + 1 ): __A = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: __A = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(A : str ): __A = "" while integer != 0: __A = chr(ord("A" ) + integer % 10 ) + s integer //= 10 return s __A = 0 while True: __A = word + "#" + int_to_alphabetic(A ) if sword in info["reverse_short_word"]: continue else: __A = sword break __A = short_word __A = word return short_word @staticmethod def UpperCamelCase_ ( A : int ,A : Tuple ): __A = param_name.split("_" ) __A = [TrialShortNamer.shortname_for_word(A ,A ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name __A = ["", "_"] for separator in separators: __A = separator.join(A ) if shortname not in info["reverse_short_param"]: __A = shortname __A = param_name return shortname return param_name @staticmethod def UpperCamelCase_ ( A : Optional[Any] ,A : Tuple ): __A = TrialShortNamer.shortname_for_key(A ,A ) __A = short_name __A = param_name @classmethod def UpperCamelCase_ ( cls : Dict ): if cls.NAMING_INFO is not None: return __A = { "short_word": {}, "reverse_short_word": {}, "short_param": {}, "reverse_short_param": {}, } __A = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(A ,A ) __A = info @classmethod def UpperCamelCase_ ( cls : Dict ,A : List[str] ): cls.build_naming_info() assert cls.PREFIX is not None __A = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue __A = cls.NAMING_INFO["short_param"][k] if isinstance(A ,A ): __A = 1 if v else 0 __A = "" if isinstance(A ,(int, float) ) else "-" __A = f'''{key}{sep}{v}''' name.append(A ) return "_".join(A ) @classmethod def UpperCamelCase_ ( cls : Tuple ,A : Tuple ): __A = repr[len(cls.PREFIX ) + 1 :] if repr == "": __A = [] else: __A = repr.split("_" ) __A = {} for value in values: if "-" in value: __A , __A = value.split("-" ) else: __A = re.sub("[0-9.]" ,"" ,A ) __A = float(re.sub("[^0-9.]" ,"" ,A ) ) __A = cls.NAMING_INFO["reverse_short_param"][p_k] __A = p_v for k in cls.DEFAULTS: if k not in parameters: __A = cls.DEFAULTS[k] return parameters
124
0
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase__: Optional[Any] = logging.get_logger(__name__) UpperCamelCase__: Any = "▁" UpperCamelCase__: List[str] = { "vocab_file": "vocab.json", "spm_file": "sentencepiece.bpe.model", } UpperCamelCase__: Any = { "vocab_file": { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json" ), }, "spm_file": { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model" ) }, } UpperCamelCase__: Optional[int] = { "facebook/s2t-small-librispeech-asr": 1024, } UpperCamelCase__: Union[str, Any] = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"] UpperCamelCase__: Optional[int] = {"mustc": MUSTC_LANGS} class SCREAMING_SNAKE_CASE( A__ ): """simple docstring""" lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = MAX_MODEL_INPUT_SIZES lowerCamelCase__ = ["""input_ids""", """attention_mask"""] lowerCamelCase__ = [] def __init__( self : List[Any] , __snake_case : str , __snake_case : Any , __snake_case : List[str]="<s>" , __snake_case : str="</s>" , __snake_case : str="<pad>" , __snake_case : Any="<unk>" , __snake_case : str=False , __snake_case : List[Any]=False , __snake_case : List[Any]=None , __snake_case : Optional[Any]=None , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Optional[Any] , ) -> None: UpperCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , do_upper_case=__snake_case , do_lower_case=__snake_case , tgt_lang=__snake_case , lang_codes=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , ) UpperCAmelCase : Union[str, Any] = do_upper_case UpperCAmelCase : int = do_lower_case UpperCAmelCase : Dict = load_json(__snake_case ) UpperCAmelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()} UpperCAmelCase : Tuple = spm_file UpperCAmelCase : Any = load_spm(__snake_case , self.sp_model_kwargs ) if lang_codes is not None: UpperCAmelCase : List[str] = lang_codes UpperCAmelCase : int = LANGUAGES[lang_codes] UpperCAmelCase : Any = [F"""<lang:{lang}>""" for lang in self.langs] UpperCAmelCase : Optional[int] = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs} UpperCAmelCase : Optional[int] = self.lang_tokens UpperCAmelCase : List[str] = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: UpperCAmelCase : str = {} @property def A ( self : Dict ) -> int: return len(self.encoder ) @property def A ( self : Optional[Any] ) -> str: return self._tgt_lang @tgt_lang.setter def A ( self : List[str] , __snake_case : int ) -> None: UpperCAmelCase : List[Any] = new_tgt_lang self.set_tgt_lang_special_tokens(__snake_case ) def A ( self : int , __snake_case : str ) -> None: UpperCAmelCase : Optional[Any] = self.lang_code_to_id[tgt_lang] UpperCAmelCase : Any = [lang_code_id] def A ( self : Tuple , __snake_case : str ) -> List[str]: return self.sp_model.encode(__snake_case , out_type=__snake_case ) def A ( self : str , __snake_case : Union[str, Any] ) -> Union[str, Any]: return self.encoder.get(__snake_case , self.encoder[self.unk_token] ) def A ( self : str , __snake_case : int ) -> str: return self.decoder.get(__snake_case , self.unk_token ) def A ( self : Tuple , __snake_case : List[str] ) -> str: UpperCAmelCase : List[Any] = [] UpperCAmelCase : Tuple = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: UpperCAmelCase : Any = self.sp_model.decode(__snake_case ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " UpperCAmelCase : List[Any] = [] else: current_sub_tokens.append(__snake_case ) UpperCAmelCase : Optional[Any] = self.sp_model.decode(__snake_case ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def A ( self : Any , __snake_case : List[str] , __snake_case : Optional[int]=None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def A ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case ) UpperCAmelCase : Tuple = [1] * len(self.prefix_tokens ) UpperCAmelCase : Optional[int] = [1] if token_ids_a is None: return prefix_ones + ([0] * len(__snake_case )) + suffix_ones return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones def A ( self : Union[str, Any] ) -> Dict: UpperCAmelCase : List[Any] = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ) -> Dict: UpperCAmelCase : Optional[int] = self.__dict__.copy() UpperCAmelCase : str = None return state def __setstate__( self : str , __snake_case : Dict ) -> None: UpperCAmelCase : Tuple = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCAmelCase : List[str] = {} UpperCAmelCase : Optional[Any] = load_spm(self.spm_file , self.sp_model_kwargs ) def A ( self : List[Any] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]: UpperCAmelCase : List[Any] = Path(__snake_case ) assert save_dir.is_dir(), F"""{save_directory} should be a directory""" UpperCAmelCase : Dict = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) UpperCAmelCase : Union[str, Any] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , __snake_case ) if os.path.abspath(self.spm_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , __snake_case ) elif not os.path.isfile(self.spm_file ): with open(__snake_case , '''wb''' ) as fi: UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto() fi.write(__snake_case ) return (str(__snake_case ), str(__snake_case )) def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: UpperCAmelCase : int = sentencepiece.SentencePieceProcessor(**_lowerCAmelCase ) spm.Load(str(_lowerCAmelCase ) ) return spm def snake_case_ ( _lowerCAmelCase : str ) -> Union[Dict, List]: with open(_lowerCAmelCase , '''r''' ) as f: return json.load(_lowerCAmelCase ) def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str ) -> None: with open(_lowerCAmelCase , '''w''' ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase , indent=2 )
23
from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def a ( A__ : bool = True , *A__ : int , **A__ : Union[str, Any] ) -> List[str]: """simple docstring""" if not is_tqdm_available(): raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' ) _lowercase =False if main_process_only: _lowercase =PartialState().local_process_index == 0 return _tqdm(*A__ , **A__ , disable=A__ )
205
0
"""simple docstring""" from __future__ import annotations def lowercase__ ( lowercase_ ) -> bool: """simple docstring""" _UpperCamelCase : Optional[int] = str(lowercase_ ) return n == n[::-1] def lowercase__ ( lowercase_ = 1_000_000 ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase : List[Any] = 0 for i in range(1 ,lowercase_ ): if is_palindrome(lowercase_ ) and is_palindrome(bin(lowercase_ ).split("b" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
369
"""simple docstring""" import torch from transformers import AutoModel class __SCREAMING_SNAKE_CASE ( torch.nn.Module ): '''simple docstring''' def __init__( self : Dict , __a : Tuple="sayef/fsner-bert-base-uncased" ) -> Dict: super(__a , self ).__init__() _UpperCamelCase : Optional[Any] = AutoModel.from_pretrained(__a , return_dict=__a ) _UpperCamelCase : str = torch.nn.CosineSimilarity(3 , 1e-0_8 ) _UpperCamelCase : List[str] = torch.nn.Softmax(dim=1 ) def __SCREAMING_SNAKE_CASE ( self : int , **__a : Tuple ) -> Optional[Any]: return self.bert(**__a ).last_hidden_state def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Optional[Any] ) -> Optional[int]: return token_embeddings.sum(2 , keepdim=__a ) def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : List[Any] , __a : Tuple=1 ) -> List[Any]: return self.softmax(T * self.cos(__a , __a ) ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Dict ) -> Union[str, Any]: _UpperCamelCase : str = W_supports["sizes"].tolist() _UpperCamelCase : Any = W_supports["start_token_id"].item() _UpperCamelCase : Optional[Any] = W_supports["end_token_id"].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] _UpperCamelCase : str = self.BERT(**__a ) _UpperCamelCase : int = self.BERT(**__a ) _UpperCamelCase : int = None _UpperCamelCase : Optional[int] = None _UpperCamelCase : List[Any] = W_supports["input_ids"] == start_token_id _UpperCamelCase : Optional[int] = W_supports["input_ids"] == end_token_id for i, size in enumerate(__a ): if i == 0: _UpperCamelCase : Dict = 0 else: _UpperCamelCase : Any = support_sizes[i - 1] _UpperCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]] _UpperCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]] _UpperCamelCase : List[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) _UpperCamelCase : Any = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: _UpperCamelCase : Any = torch.vstack((p_starts, p_start) ) _UpperCamelCase : Any = torch.vstack((p_ends, p_end) ) else: _UpperCamelCase : Optional[Any] = p_start _UpperCamelCase : str = p_end return p_starts, p_ends
310
0
from graphs.minimum_spanning_tree_kruskal import kruskal def a_ ( ): __lowerCAmelCase = 9 __lowerCAmelCase = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] __lowerCAmelCase = kruskal(lowerCAmelCase_, lowerCAmelCase_ ) __lowerCAmelCase = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(lowerCAmelCase_ ) == sorted(lowerCAmelCase_ )
284
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def a_ ( lowerCAmelCase_ : Optional[int] ): __lowerCAmelCase = filter(lambda lowerCAmelCase_ : p.requires_grad, model.parameters() ) __lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] ) return params _snake_case : Dict = logging.getLogger(__name__) def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Optional[int] ): if metric == "rouge2": __lowerCAmelCase = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": __lowerCAmelCase = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": __lowerCAmelCase = '{val_avg_em:.4f}-{step_count}' else: raise NotImplementedError( F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" ' function.' ) __lowerCAmelCase = ModelCheckpoint( dirpath=lowerCAmelCase_, filename=lowerCAmelCase_, monitor=F"""val_{metric}""", mode='max', save_top_k=3, every_n_epochs=1, ) return checkpoint_callback def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Any ): return EarlyStopping( monitor=F"""val_{metric}""", mode='min' if 'loss' in metric else 'max', patience=lowerCAmelCase_, verbose=lowerCAmelCase_, ) class _UpperCAmelCase ( pl.Callback ): """simple docstring""" def lowercase ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ) -> Any: __lowerCAmelCase = {f"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(lowerCAmelCase_ ) @rank_zero_only def lowercase ( self : Optional[int] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=True ) -> None: logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" ) __lowerCAmelCase = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results __lowerCAmelCase = Path(pl_module.hparams.output_dir ) if type_path == "test": __lowerCAmelCase = od / 'test_results.txt' __lowerCAmelCase = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt""" __lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=lowerCAmelCase_ ) generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ ) with open(lowerCAmelCase_ , 'a+' ) as writer: for key in sorted(lowerCAmelCase_ ): if key in ["log", "progress_bar", "preds"]: continue __lowerCAmelCase = metrics[key] if isinstance(lowerCAmelCase_ , torch.Tensor ): __lowerCAmelCase = val.item() __lowerCAmelCase = f"""{key}: {val:.6f}\n""" writer.write(lowerCAmelCase_ ) if not save_generations: return if "preds" in metrics: __lowerCAmelCase = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(lowerCAmelCase_ ) @rank_zero_only def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> Dict: try: __lowerCAmelCase = pl_module.model.model.num_parameters() except AttributeError: __lowerCAmelCase = pl_module.model.num_parameters() __lowerCAmelCase = count_trainable_parameters(lowerCAmelCase_ ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} ) @rank_zero_only def lowercase ( self : int , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule ) -> Any: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , 'test' ) @rank_zero_only def lowercase ( self : List[Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : Any ) -> int: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
284
1
'''simple docstring''' from decimal import Decimal, getcontext from math import ceil, factorial def __lowerCamelCase ( lowerCAmelCase_ ) -> str: if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise TypeError('Undefined for non-integers' ) elif precision < 1: raise ValueError('Undefined for non-natural numbers' ) _a : Tuple = precision _a : int = ceil(precision / 14 ) _a : List[Any] = 426880 * Decimal(10005 ).sqrt() _a : Any = 1 _a : Tuple = 13591409 _a : Optional[Any] = Decimal(lowerCAmelCase_ ) for k in range(1 , lowerCAmelCase_ ): _a : List[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCAmelCase_ ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": __lowerCAmelCase = 50 print(f"""The first {n} digits of pi is: {pi(n)}""")
107
'''simple docstring''' from math import sqrt def __lowerCamelCase ( lowerCAmelCase_ ) -> int: _a : Dict = 0 for i in range(1 , int(sqrt(lowerCAmelCase_ ) + 1 ) ): if n % i == 0 and i != sqrt(lowerCAmelCase_ ): total += i + n // i elif i == sqrt(lowerCAmelCase_ ): total += i return total - n def __lowerCamelCase ( lowerCAmelCase_ = 10000 ) -> int: _a : Union[str, Any] = sum( i for i in range(1 , lowerCAmelCase_ ) if sum_of_divisors(sum_of_divisors(lowerCAmelCase_ ) ) == i and sum_of_divisors(lowerCAmelCase_ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
107
1
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A_ ( unittest.TestCase ): '''simple docstring''' def __init__( self , snake_case , snake_case=3 , snake_case=32 , snake_case=3 , snake_case=10 , snake_case=[10, 20, 30, 40] , snake_case=[1, 1, 2, 1] , snake_case=True , snake_case=True , snake_case="relu" , snake_case=3 , snake_case=None , ): lowercase = parent lowercase = batch_size lowercase = image_size lowercase = num_channels lowercase = embeddings_size lowercase = hidden_sizes lowercase = depths lowercase = is_training lowercase = use_labels lowercase = hidden_act lowercase = num_labels lowercase = scope lowercase = len(snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase = self.get_config() return config, pixel_values def SCREAMING_SNAKE_CASE__ ( self ): return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ): lowercase = FlaxRegNetModel(config=snake_case ) lowercase = model(snake_case ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ): lowercase = self.num_labels lowercase = FlaxRegNetForImageClassification(config=snake_case ) lowercase = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.prepare_config_and_inputs() lowercase , lowercase = config_and_inputs lowercase = {'pixel_values': pixel_values} return config, inputs_dict @require_flax class A_ ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' _UpperCamelCase : Dict = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () _UpperCamelCase : str = False _UpperCamelCase : str = False _UpperCamelCase : Tuple = False def SCREAMING_SNAKE_CASE__ ( self ): lowercase = FlaxRegNetModelTester(self ) lowercase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self ): return def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @unittest.skip(reason='RegNet does not use inputs_embeds' ) def SCREAMING_SNAKE_CASE__ ( self ): pass @unittest.skip(reason='RegNet does not support input and output embeddings' ) def SCREAMING_SNAKE_CASE__ ( self ): pass def SCREAMING_SNAKE_CASE__ ( self ): lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = model_class(snake_case ) lowercase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase = [*signature.parameters.keys()] lowercase = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): def check_hidden_states_output(snake_case , snake_case , snake_case ): lowercase = model_class(snake_case ) lowercase = model(**self._prepare_for_class(snake_case , snake_case ) ) lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase = self.model_tester.num_stages self.assertEqual(len(snake_case ) , expected_num_stages + 1 ) lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = True check_hidden_states_output(snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase = True check_hidden_states_output(snake_case , snake_case , snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase = self._prepare_for_class(snake_case , snake_case ) lowercase = model_class(snake_case ) @jax.jit def model_jitted(snake_case , **snake_case ): return model(pixel_values=snake_case , **snake_case ) with self.subTest('JIT Enabled' ): lowercase = model_jitted(**snake_case ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): lowercase = model_jitted(**snake_case ).to_tuple() self.assertEqual(len(snake_case ) , len(snake_case ) ) for jitted_output, output in zip(snake_case , snake_case ): self.assertEqual(jitted_output.shape , output.shape ) def UpperCAmelCase_ ( ): lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_flax class A_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self ): return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE__ ( self ): lowercase = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' ) lowercase = self.default_image_processor lowercase = prepare_img() lowercase = image_processor(images=snake_case , return_tensors='np' ) lowercase = model(**snake_case ) # verify the logits lowercase = (1, 1000) self.assertEqual(outputs.logits.shape , snake_case ) lowercase = jnp.array([-0.4_180, -1.5_051, -3.4_836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
195
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowercase = 1.5 lowercase = int(factor * num_class_images ) lowercase = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=__SCREAMING_SNAKE_CASE ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: lowercase = client.query(text=__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1e4: break else: lowercase = int(factor * num_images ) lowercase = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , ) lowercase = 0 lowercase = 0 lowercase = tqdm(desc='downloading real regularization images' , total=__SCREAMING_SNAKE_CASE ) with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open( F'''{class_data_dir}/images.txt''' , 'w' ) as fa: while total < num_class_images: lowercase = class_images[count] count += 1 try: lowercase = requests.get(images['url'] ) if img.status_code == 200: lowercase = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f: f.write(img.content ) fa.write(images['caption'] + '\n' ) fa.write(images['url'] + '\n' ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def UpperCAmelCase_ ( ): lowercase = argparse.ArgumentParser('' , add_help=__SCREAMING_SNAKE_CASE ) parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE ) parser.add_argument('--class_data_dir' , help='path to save images' , required=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE ) parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=__SCREAMING_SNAKE_CASE ) return parser.parse_args() if __name__ == "__main__": UpperCAmelCase = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
195
1
from __future__ import annotations import queue class A__ : """simple docstring""" def __init__( self , __snake_case ): snake_case = data snake_case = None snake_case = None def UpperCAmelCase__ (): """simple docstring""" print('''\n********Press N to stop entering at any point of time********\n''' ) snake_case = input('''Enter the value of the root node: ''' ).strip().lower() snake_case = queue.Queue() snake_case = TreeNode(int(UpperCamelCase_ ) ) q.put(UpperCamelCase_ ) while not q.empty(): snake_case = q.get() snake_case = F'''Enter the left node of {node_found.data}: ''' snake_case = input(UpperCamelCase_ ).strip().lower() or '''n''' if check == "n": return tree_node snake_case = TreeNode(int(UpperCamelCase_ ) ) snake_case = left_node q.put(UpperCamelCase_ ) snake_case = F'''Enter the right node of {node_found.data}: ''' snake_case = input(UpperCamelCase_ ).strip().lower() or '''n''' if check == "n": return tree_node snake_case = TreeNode(int(UpperCamelCase_ ) ) snake_case = right_node q.put(UpperCamelCase_ ) raise def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" if not isinstance(UpperCamelCase_ ,UpperCamelCase_ ) or not node: return print(node.data ,end=''',''' ) pre_order(node.left ) pre_order(node.right ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" if not isinstance(UpperCamelCase_ ,UpperCamelCase_ ) or not node: return in_order(node.left ) print(node.data ,end=''',''' ) in_order(node.right ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" if not isinstance(UpperCamelCase_ ,UpperCamelCase_ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data ,end=''',''' ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" if not isinstance(UpperCamelCase_ ,UpperCamelCase_ ) or not node: return snake_case = queue.Queue() q.put(UpperCamelCase_ ) while not q.empty(): snake_case = q.get() print(node_dequeued.data ,end=''',''' ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" if not isinstance(UpperCamelCase_ ,UpperCamelCase_ ) or not node: return snake_case = queue.Queue() q.put(UpperCamelCase_ ) while not q.empty(): snake_case = [] while not q.empty(): snake_case = q.get() print(node_dequeued.data ,end=''',''' ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(UpperCamelCase_ ) def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" if not isinstance(UpperCamelCase_ ,UpperCamelCase_ ) or not node: return snake_case = [] snake_case = node while n or stack: while n: # start from root node, find its left child print(n.data ,end=''',''' ) stack.append(UpperCamelCase_ ) snake_case = n.left # end of while means current node doesn't have left child snake_case = stack.pop() # start to traverse its right child snake_case = n.right def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" if not isinstance(UpperCamelCase_ ,UpperCamelCase_ ) or not node: return snake_case = [] snake_case = node while n or stack: while n: stack.append(UpperCamelCase_ ) snake_case = n.left snake_case = stack.pop() print(n.data ,end=''',''' ) snake_case = n.right def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" if not isinstance(UpperCamelCase_ ,UpperCamelCase_ ) or not node: return snake_case , snake_case = [], [] snake_case = node stacka.append(UpperCamelCase_ ) while stacka: # to find the reversed order of post order, store it in stack2 snake_case = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(UpperCamelCase_ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data ,end=''',''' ) def UpperCAmelCase__ (UpperCamelCase_ = "" ,UpperCamelCase_=50 ,UpperCamelCase_="*" ): """simple docstring""" if not s: return "\n" + width * char snake_case , snake_case = divmod(width - len(UpperCamelCase_ ) - 2 ,2 ) return F'''{left * char} {s} {(left + extra) * char}''' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("Binary Tree Traversals")) _SCREAMING_SNAKE_CASE : TreeNode = build_tree() print(prompt("Pre Order Traversal")) pre_order(node) print(prompt() + "\n") print(prompt("In Order Traversal")) in_order(node) print(prompt() + "\n") print(prompt("Post Order Traversal")) post_order(node) print(prompt() + "\n") print(prompt("Level Order Traversal")) level_order(node) print(prompt() + "\n") print(prompt("Actual Level Order Traversal")) level_order_actual(node) print("*" * 50 + "\n") print(prompt("Pre Order Traversal - Iteration Version")) pre_order_iter(node) print(prompt() + "\n") print(prompt("In Order Traversal - Iteration Version")) in_order_iter(node) print(prompt() + "\n") print(prompt("Post Order Traversal - Iteration Version")) post_order_iter(node) print(prompt())
213
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class A__ ( snake_case__ , unittest.TestCase ): """simple docstring""" __magic_name__ = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline' def a_ ( self , __snake_case=0 ): snake_case = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__snake_case ) ) snake_case = np.random.RandomState(__snake_case ) snake_case = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''strength''': 0.75, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def a_ ( self ): snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=__snake_case ) snake_case = self.get_dummy_inputs() snake_case = pipe(**__snake_case ).images snake_case = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_2_8, 1_2_8, 3) snake_case = np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def a_ ( self ): snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) snake_case = self.get_dummy_inputs() snake_case = pipe(**__snake_case ).images snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) snake_case = np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a_ ( self ): snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__snake_case ) # warmup pass to apply optimizations snake_case = pipe(**self.get_dummy_inputs() ) snake_case = self.get_dummy_inputs() snake_case = pipe(**__snake_case ).images snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) snake_case = np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a_ ( self ): snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__snake_case ) snake_case = self.get_dummy_inputs() snake_case = pipe(**__snake_case ).images snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) snake_case = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a_ ( self ): snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__snake_case ) snake_case = self.get_dummy_inputs() snake_case = pipe(**__snake_case ).images snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) snake_case = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def a_ ( self ): snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__snake_case ) snake_case = self.get_dummy_inputs() snake_case = pipe(**__snake_case ).images snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) snake_case = np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class A__ ( unittest.TestCase ): """simple docstring""" @property def a_ ( self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a_ ( self ): snake_case = ort.SessionOptions() snake_case = False return options def a_ ( self ): snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) snake_case = init_image.resize((7_6_8, 5_1_2) ) # using the PNDM scheduler by default snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__snake_case ) snake_case = '''A fantasy landscape, trending on artstation''' snake_case = np.random.RandomState(0 ) snake_case = pipe( prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__snake_case , output_type='''np''' , ) snake_case = output.images snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 7_6_8, 3) snake_case = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def a_ ( self ): snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) snake_case = init_image.resize((7_6_8, 5_1_2) ) snake_case = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__snake_case ) snake_case = '''A fantasy landscape, trending on artstation''' snake_case = np.random.RandomState(0 ) snake_case = pipe( prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__snake_case , output_type='''np''' , ) snake_case = output.images snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 7_6_8, 3) snake_case = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
213
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __snake_case :str = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :List[Any] = ['''YolosFeatureExtractor'''] __snake_case :Optional[Any] = ['''YolosImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :List[str] = [ '''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''YolosForObjectDetection''', '''YolosModel''', '''YolosPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys __snake_case :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
49
import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets __lowerCAmelCase : Optional[int] ='\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n' __lowerCAmelCase : Optional[Any] ='\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n' __lowerCAmelCase : Dict ='\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n' def _UpperCamelCase ( lowercase__ , lowercase__ ): return float((preds == labels).mean() ) def _UpperCamelCase ( lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[Any] = simple_accuracy(lowercase__ , lowercase__ ) __SCREAMING_SNAKE_CASE : List[str] = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) ) return { "accuracy": acc, "f1": fa, } def _UpperCamelCase ( lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[int] = np.array(lowercase__ ) __SCREAMING_SNAKE_CASE : str = np.array(lowercase__ ) __SCREAMING_SNAKE_CASE : str = en_sentvecs.shape[0] # mean centering __SCREAMING_SNAKE_CASE : Tuple = en_sentvecs - np.mean(lowercase__ , axis=0 ) __SCREAMING_SNAKE_CASE : Optional[int] = in_sentvecs - np.mean(lowercase__ , axis=0 ) __SCREAMING_SNAKE_CASE : str = cdist(lowercase__ , lowercase__ , '''cosine''' ) __SCREAMING_SNAKE_CASE : int = np.array(range(lowercase__ ) ) __SCREAMING_SNAKE_CASE : Optional[Any] = sim.argsort(axis=1 )[:, :10] __SCREAMING_SNAKE_CASE : str = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowercase ( datasets.Metric ): '''simple docstring''' def __magic_name__( self :Tuple ) -> Tuple: if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ''' '''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ''' '''"wiki-ner"]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' ) if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''' ) ), '''references''': datasets.Value('''int64''' ) if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''' ) ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , ) def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple ) -> str: if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(lowerCAmelCase__ , lowerCAmelCase__ )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ''' '''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ''' '''"wiki-ner"]''' )
9
0
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = ['image_processor', 'tokenizer'] _a = 'CLIPImageProcessor' _a = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__( self : List[str], lowerCamelCase : Dict=None, lowerCamelCase : str=None, **lowerCamelCase : int )-> Tuple: lowerCamelCase__ : List[str] =None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCamelCase, ) lowerCamelCase__ : Optional[Any] =kwargs.pop('''feature_extractor''' ) lowerCamelCase__ : Optional[int] =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCamelCase, lowerCamelCase ) def __call__( self : Any, lowerCamelCase : Tuple=None, lowerCamelCase : Any=None, lowerCamelCase : Any=None, **lowerCamelCase : List[Any] )-> Optional[int]: if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: lowerCamelCase__ : int =self.tokenizer(lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase ) if images is not None: lowerCamelCase__ : List[str] =self.image_processor(lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase ) if text is not None and images is not None: lowerCamelCase__ : List[Any] =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCamelCase ), tensor_type=lowerCamelCase ) def snake_case ( self : str, *lowerCamelCase : Any, **lowerCamelCase : List[str] )-> int: return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase ) def snake_case ( self : List[str], *lowerCamelCase : Tuple, **lowerCamelCase : Optional[int] )-> Any: return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase ) @property def snake_case ( self : List[str] )-> Optional[int]: lowerCamelCase__ : Any =self.tokenizer.model_input_names lowerCamelCase__ : List[Any] =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
272
"""simple docstring""" import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def snake_case__ ( __lowerCamelCase : str ): """simple docstring""" if "model" in orig_key: lowerCamelCase__ : Optional[int] =orig_key.replace('''model.''' , '''''' ) if "norm1" in orig_key: lowerCamelCase__ : Union[str, Any] =orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' ) if "norm2" in orig_key: lowerCamelCase__ : List[Any] =orig_key.replace('''norm2''' , '''output.LayerNorm''' ) if "norm" in orig_key: lowerCamelCase__ : List[str] =orig_key.replace('''norm''' , '''LayerNorm''' ) if "transformer" in orig_key: lowerCamelCase__ : str =orig_key.split('''.''' )[0].split('''_''' )[-1] lowerCamelCase__ : Dict =orig_key.replace(f'''transformer_{layer_num}''' , f'''encoder.layer.{layer_num}''' ) if "mha.attn" in orig_key: lowerCamelCase__ : Union[str, Any] =orig_key.replace('''mha.attn''' , '''attention.self''' ) if "mha" in orig_key: lowerCamelCase__ : str =orig_key.replace('''mha''' , '''attention''' ) if "W_q" in orig_key: lowerCamelCase__ : Union[str, Any] =orig_key.replace('''W_q''' , '''self.query''' ) if "W_k" in orig_key: lowerCamelCase__ : Optional[int] =orig_key.replace('''W_k''' , '''self.key''' ) if "W_v" in orig_key: lowerCamelCase__ : List[str] =orig_key.replace('''W_v''' , '''self.value''' ) if "ff1" in orig_key: lowerCamelCase__ : Dict =orig_key.replace('''ff1''' , '''intermediate.dense''' ) if "ff2" in orig_key: lowerCamelCase__ : Union[str, Any] =orig_key.replace('''ff2''' , '''output.dense''' ) if "ff" in orig_key: lowerCamelCase__ : str =orig_key.replace('''ff''' , '''output.dense''' ) if "mlm_class" in orig_key: lowerCamelCase__ : Tuple =orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' ) if "mlm" in orig_key: lowerCamelCase__ : Optional[int] =orig_key.replace('''mlm''' , '''cls.predictions.transform''' ) if "cls" not in orig_key: lowerCamelCase__ : Optional[int] ='''yoso.''' + orig_key return orig_key def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Any ): """simple docstring""" for key in orig_state_dict.copy().keys(): lowerCamelCase__ : Optional[Any] =orig_state_dict.pop(__lowerCamelCase ) if ("pooler" in key) or ("sen_class" in key): continue else: lowerCamelCase__ : List[str] =val lowerCamelCase__ : Optional[int] =orig_state_dict['''cls.predictions.decoder.bias'''] lowerCamelCase__ : str =torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2 return orig_state_dict def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict'''] lowerCamelCase__ : List[Any] =YosoConfig.from_json_file(__lowerCamelCase ) lowerCamelCase__ : List[str] =YosoForMaskedLM(__lowerCamelCase ) lowerCamelCase__ : Tuple =convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase ) print(model.load_state_dict(__lowerCamelCase ) ) model.eval() model.save_pretrained(__lowerCamelCase ) print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' ) if __name__ == "__main__": _lowercase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for YOSO model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _lowercase : Optional[Any] = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
272
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor a_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ): def __init__( self : Any , *__lowercase : Dict , **__lowercase : List[Any] ) -> List[str]: warnings.warn( '''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use FlavaImageProcessor instead.''' , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
152
import os def _lowercase ( ) -> List[str]: '''simple docstring''' with open(os.path.dirname(UpperCamelCase_ ) + '/p022_names.txt' ) as file: SCREAMING_SNAKE_CASE__ = str(file.readlines()[0] ) SCREAMING_SNAKE_CASE__ = names.replace('"' , '' ).split(',' ) names.sort() SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = 0 for i, name in enumerate(UpperCamelCase_ ): for letter in name: name_score += ord(UpperCamelCase_ ) - 64 total_score += (i + 1) * name_score SCREAMING_SNAKE_CASE__ = 0 return total_score if __name__ == "__main__": print(solution())
176
0
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class __lowerCAmelCase ( _UpperCamelCase ): lowerCamelCase_ : Any = 'gpt_neo' lowerCamelCase_ : List[str] = ['past_key_values'] lowerCamelCase_ : str = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__(self , __magic_name__=5_0257 , __magic_name__=2048 , __magic_name__=2048 , __magic_name__=24 , __magic_name__=[[["global", "local"], 12]] , __magic_name__=16 , __magic_name__=None , __magic_name__=256 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=5_0256 , __magic_name__=5_0256 , **__magic_name__ , ) -> List[Any]: '''simple docstring''' snake_case_ : Union[str, Any] = vocab_size snake_case_ : Any = max_position_embeddings snake_case_ : Union[str, Any] = hidden_size snake_case_ : Tuple = num_layers snake_case_ : Optional[Any] = num_heads snake_case_ : List[str] = intermediate_size snake_case_ : str = window_size snake_case_ : str = activation_function snake_case_ : List[Any] = resid_dropout snake_case_ : Optional[Any] = embed_dropout snake_case_ : Tuple = attention_dropout snake_case_ : List[str] = classifier_dropout snake_case_ : List[str] = layer_norm_epsilon snake_case_ : Optional[int] = initializer_range snake_case_ : Any = use_cache snake_case_ : Tuple = bos_token_id snake_case_ : List[str] = eos_token_id snake_case_ : List[str] = attention_types snake_case_ : Optional[Any] = self.expand_attention_types_params(_SCREAMING_SNAKE_CASE ) if len(self.attention_layers ) != self.num_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.attention_layers)` == `config.num_layers` ''' F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' F'''`config.num_layers = {self.num_layers}`. ''' '''`config.attention_layers` is prepared using `config.attention_types`. ''' '''Please verify the value of `config.attention_types` argument.''' ) super().__init__(bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @staticmethod def lowerCamelCase (__magic_name__ ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Any = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" import torch snake_case_ : Union[str, Any] = input.size() snake_case_ : List[Any] = len(_UpperCamelCase ) snake_case_ : Any = shape[dimension] snake_case_ : str = torch.arange(0 , _UpperCamelCase , _UpperCamelCase ) snake_case_ : Optional[Any] = torch.div(sizedim - size , _UpperCamelCase , rounding_mode='''floor''' ) + 1 snake_case_ : Optional[int] = torch.arange(_UpperCamelCase ) + low_indices[:min_length][:, None] snake_case_ : List[str] = [slice(_UpperCamelCase )] * rank snake_case_ : Optional[int] = indices snake_case_ : Union[str, Any] = input[s] snake_case_ : int = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" import torch snake_case_ : str = torch.arange(1 , _UpperCamelCase ) snake_case_ : Dict = torch.remainder(_UpperCamelCase , _UpperCamelCase ) snake_case_ : Dict = remainders == 0 snake_case_ : List[Any] = candidates[divisor_indices] snake_case_ : int = torch.max(_UpperCamelCase ) return largest_divisor, torch.div(_UpperCamelCase , _UpperCamelCase , rounding_mode='''floor''' ) class __lowerCAmelCase ( _UpperCamelCase ): @property def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' snake_case_ : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction='''inputs''' ) snake_case_ : int = {0: '''batch''', 1: '''past_sequence + sequence'''} else: snake_case_ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def lowerCamelCase (self ) -> int: '''simple docstring''' return self._config.num_heads def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]: '''simple docstring''' snake_case_ : Any = super(_SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( _SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() snake_case_ : List[str] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch snake_case_ , snake_case_ : Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values snake_case_ : int = seqlen + 2 snake_case_ : Tuple = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) snake_case_ : Any = [ (torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] snake_case_ : str = common_inputs['''attention_mask'''] if self.use_past: snake_case_ : Optional[Any] = ordered_inputs['''attention_mask'''].dtype snake_case_ : int = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def lowerCamelCase (self ) -> int: '''simple docstring''' return 13
368
import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __lowerCAmelCase ( unittest.TestCase ): @property def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) snake_case_ : int = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Any = self.dummy_uncond_unet snake_case_ : Optional[Any] = PNDMScheduler() snake_case_ : Optional[Any] = PNDMPipeline(unet=__magic_name__ , scheduler=__magic_name__ ) pndm.to(__magic_name__ ) pndm.set_progress_bar_config(disable=__magic_name__ ) snake_case_ : str = torch.manual_seed(0 ) snake_case_ : Dict = pndm(generator=__magic_name__ , num_inference_steps=20 , output_type='''numpy''' ).images snake_case_ : str = torch.manual_seed(0 ) snake_case_ : str = pndm(generator=__magic_name__ , num_inference_steps=20 , output_type='''numpy''' , return_dict=__magic_name__ )[0] snake_case_ : Any = image[0, -3:, -3:, -1] snake_case_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case_ : str = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Tuple = '''google/ddpm-cifar10-32''' snake_case_ : Tuple = UNetaDModel.from_pretrained(__magic_name__ ) snake_case_ : Optional[Any] = PNDMScheduler() snake_case_ : Any = PNDMPipeline(unet=__magic_name__ , scheduler=__magic_name__ ) pndm.to(__magic_name__ ) pndm.set_progress_bar_config(disable=__magic_name__ ) snake_case_ : int = torch.manual_seed(0 ) snake_case_ : Tuple = pndm(generator=__magic_name__ , output_type='''numpy''' ).images snake_case_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case_ : str = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
279
0
import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py snake_case_ = 'src/transformers' # This is to make sure the transformers module imported is the one in the repo. snake_case_ = direct_transformers_import(PATH_TO_TRANSFORMERS) snake_case_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING snake_case_ = { # used to compute the property `self.chunk_length` 'EncodecConfig': ['overlap'], # used as `self.bert_model = BertModel(config, ...)` 'DPRConfig': True, # not used in modeling files, but it's an important information 'FSMTConfig': ['langs'], # used internally in the configuration class file 'GPTNeoConfig': ['attention_types'], # used internally in the configuration class file 'EsmConfig': ['is_folding_model'], # used during training (despite we don't have training script for these models yet) 'Mask2FormerConfig': ['ignore_value'], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) 'OneFormerConfig': ['ignore_value', 'norm'], # used during preprocessing and collation, see `collating_graphormer.py` 'GraphormerConfig': ['spatial_pos_max'], # used internally in the configuration class file 'T5Config': ['feed_forward_proj'], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally 'MT5Config': ['feed_forward_proj', 'tokenizer_class'], 'UMT5Config': ['feed_forward_proj', 'tokenizer_class'], # used internally in the configuration class file 'LongT5Config': ['feed_forward_proj'], # used internally in the configuration class file 'SwitchTransformersConfig': ['feed_forward_proj'], # having default values other than `1e-5` - we can't fix them without breaking 'BioGptConfig': ['layer_norm_eps'], # having default values other than `1e-5` - we can't fix them without breaking 'GLPNConfig': ['layer_norm_eps'], # having default values other than `1e-5` - we can't fix them without breaking 'SegformerConfig': ['layer_norm_eps'], # having default values other than `1e-5` - we can't fix them without breaking 'CvtConfig': ['layer_norm_eps'], # having default values other than `1e-5` - we can't fix them without breaking 'PerceiverConfig': ['layer_norm_eps'], # used internally to calculate the feature size 'InformerConfig': ['num_static_real_features', 'num_time_features'], # used internally to calculate the feature size 'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'], # used internally to calculate the feature size 'AutoformerConfig': ['num_static_real_features', 'num_time_features'], # used internally to calculate `mlp_dim` 'SamVisionConfig': ['mlp_ratio'], # For (head) training, but so far not implemented 'ClapAudioConfig': ['num_classes'], # Not used, but providing useful information to users 'SpeechT5HifiGanConfig': ['sampling_rate'], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { 'CLIPSegConfig': True, 'DeformableDetrConfig': True, 'DetaConfig': True, 'DinatConfig': True, 'DonutSwinConfig': True, 'EfficientFormerConfig': True, 'FSMTConfig': True, 'JukeboxConfig': True, 'LayoutLMv2Config': True, 'MaskFormerSwinConfig': True, 'MT5Config': True, 'NatConfig': True, 'OneFormerConfig': True, 'PerceiverConfig': True, 'RagConfig': True, 'SpeechT5Config': True, 'SwinConfig': True, 'Swin2SRConfig': True, 'Swinv2Config': True, 'SwitchTransformersConfig': True, 'TableTransformerConfig': True, 'TapasConfig': True, 'TransfoXLConfig': True, 'UniSpeechConfig': True, 'UniSpeechSatConfig': True, 'WavLMConfig': True, 'WhisperConfig': True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) 'JukeboxPriorConfig': True, # TODO: @Younes (for `is_decoder`) 'Pix2StructTextConfig': True, } ) def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] ) -> Union[str, Any]: __snake_case = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( f"""config.{attribute}""" in modeling_source or f"""getattr(config, \"{attribute}\"""" in modeling_source or f"""getattr(self.config, \"{attribute}\"""" in modeling_source ): __snake_case = True # Deal with multi-line cases elif ( re.search( Rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , snake_case_ , ) is not None ): __snake_case = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: __snake_case = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files __snake_case = [ '''bos_index''', '''eos_index''', '''pad_index''', '''unk_index''', '''mask_index''', '''image_size''', '''use_cache''', '''out_features''', '''out_indices''', ] __snake_case = ['''encoder_no_repeat_ngram_size'''] # Special cases to be allowed __snake_case = True if not attribute_used: __snake_case = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: __snake_case = True elif attribute in ["tie_word_embeddings"] and default_value is False: __snake_case = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: __snake_case = True elif attribute.endswith('''_token_id''' ): __snake_case = True # configuration class specific cases if not case_allowed: __snake_case = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) __snake_case = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> Tuple: __snake_case = dict(inspect.signature(config_class.__init__ ).parameters ) __snake_case = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']] __snake_case = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass __snake_case = {} if len(config_class.attribute_map ) > 0: __snake_case = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files __snake_case = inspect.getsourcefile(snake_case_ ) __snake_case = os.path.dirname(snake_case_ ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. __snake_case = [os.path.join(snake_case_ , snake_case_ ) for fn in os.listdir(snake_case_ ) if fn.startswith('''modeling_''' )] # Get the source code strings __snake_case = [] for path in modeling_paths: if os.path.isfile(snake_case_ ): with open(snake_case_ ) as fp: modeling_sources.append(fp.read() ) __snake_case = [] for config_param, default_value in zip(snake_case_ , snake_case_ ): # `attributes` here is all the variant names for `config_param` __snake_case = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(snake_case_ , snake_case_ , snake_case_ , snake_case_ ): unused_attributes.append(attributes[0] ) return sorted(snake_case_ ) def lowerCamelCase__ ( ) -> Union[str, Any]: __snake_case = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) __snake_case = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda snake_case_ : inspect.isclass(snake_case_ ) and issubclass(snake_case_ , snake_case_ ) and inspect.getmodule(snake_case_ ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: __snake_case = check_config_attributes_being_used(snake_case_ ) if len(snake_case_ ) > 0: __snake_case = unused_attributes if len(snake_case_ ) > 0: __snake_case = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n''' for name, attributes in configs_with_unused_attributes.items(): error += f"""{name}: {attributes}\n""" raise ValueError(snake_case_ ) if __name__ == "__main__": check_config_attributes()
24
'''simple docstring''' import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer snake_case_ : List[Any] = logging.get_logger(__name__) class lowercase__ ( lowercase ): lowercase__ = """AutoTokenizer""" lowercase__ = ["""tokenizer"""] lowercase__ = { """semantic_prompt""": 1, """coarse_prompt""": 2, """fine_prompt""": 2, } def __init__( self : List[str] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Tuple=None ): '''simple docstring''' super().__init__(lowerCamelCase__ ) _UpperCamelCase : Dict = speaker_embeddings @classmethod def UpperCamelCase_ ( cls : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : str="speaker_embeddings_path.json" ,**lowerCamelCase__ : Optional[Any] ): '''simple docstring''' if speaker_embeddings_dict_path is not None: _UpperCamelCase : Optional[Any] = get_file_from_repo( lowerCamelCase__ ,lowerCamelCase__ ,subfolder=kwargs.pop('subfolder' ,lowerCamelCase__ ) ,cache_dir=kwargs.pop('cache_dir' ,lowerCamelCase__ ) ,force_download=kwargs.pop('force_download' ,lowerCamelCase__ ) ,proxies=kwargs.pop('proxies' ,lowerCamelCase__ ) ,resume_download=kwargs.pop('resume_download' ,lowerCamelCase__ ) ,local_files_only=kwargs.pop('local_files_only' ,lowerCamelCase__ ) ,use_auth_token=kwargs.pop('use_auth_token' ,lowerCamelCase__ ) ,revision=kwargs.pop('revision' ,lowerCamelCase__ ) ,) if speaker_embeddings_path is None: logger.warning( F'`{os.path.join(lowerCamelCase__ ,lowerCamelCase__ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' ) _UpperCamelCase : Union[str, Any] = None else: with open(lowerCamelCase__ ) as speaker_embeddings_json: _UpperCamelCase : Optional[int] = json.load(lowerCamelCase__ ) else: _UpperCamelCase : Tuple = None _UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ ) return cls(tokenizer=lowerCamelCase__ ,speaker_embeddings=lowerCamelCase__ ) def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : int="speaker_embeddings_path.json" ,lowerCamelCase__ : Dict="speaker_embeddings" ,lowerCamelCase__ : bool = False ,**lowerCamelCase__ : Tuple ,): '''simple docstring''' if self.speaker_embeddings is not None: os.makedirs(os.path.join(lowerCamelCase__ ,lowerCamelCase__ ,'v2' ) ,exist_ok=lowerCamelCase__ ) _UpperCamelCase : Tuple = {} _UpperCamelCase : Optional[Any] = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": _UpperCamelCase : Any = self._load_voice_preset(lowerCamelCase__ ) _UpperCamelCase : Union[str, Any] = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['repo_or_path'] ,lowerCamelCase__ ,F'{prompt_key}_{key}' ) ,voice_preset[key] ,allow_pickle=lowerCamelCase__ ,) _UpperCamelCase : List[str] = os.path.join(lowerCamelCase__ ,F'{prompt_key}_{key}.npy' ) _UpperCamelCase : str = tmp_dict with open(os.path.join(lowerCamelCase__ ,lowerCamelCase__ ) ,'w' ) as fp: json.dump(lowerCamelCase__ ,lowerCamelCase__ ) super().save_pretrained(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ) def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str = None ,**lowerCamelCase__ : Dict ): '''simple docstring''' _UpperCamelCase : Tuple = self.speaker_embeddings[voice_preset] _UpperCamelCase : Union[str, Any] = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' ) _UpperCamelCase : Dict = get_file_from_repo( self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] ,subfolder=kwargs.pop('subfolder' ,lowerCamelCase__ ) ,cache_dir=kwargs.pop('cache_dir' ,lowerCamelCase__ ) ,force_download=kwargs.pop('force_download' ,lowerCamelCase__ ) ,proxies=kwargs.pop('proxies' ,lowerCamelCase__ ) ,resume_download=kwargs.pop('resume_download' ,lowerCamelCase__ ) ,local_files_only=kwargs.pop('local_files_only' ,lowerCamelCase__ ) ,use_auth_token=kwargs.pop('use_auth_token' ,lowerCamelCase__ ) ,revision=kwargs.pop('revision' ,lowerCamelCase__ ) ,) if path is None: raise ValueError( F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" ,"/" ) ,voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' ) _UpperCamelCase : List[str] = np.load(lowerCamelCase__ ) return voice_preset_dict def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Optional[dict] = None ): '''simple docstring''' for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' ) if not isinstance(voice_preset[key] ,np.ndarray ): raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' ) def __call__( self : Any ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Any="pt" ,lowerCamelCase__ : Dict=256 ,lowerCamelCase__ : int=False ,lowerCamelCase__ : int=True ,lowerCamelCase__ : List[str]=False ,**lowerCamelCase__ : Union[str, Any] ,): '''simple docstring''' if voice_preset is not None and not isinstance(lowerCamelCase__ ,lowerCamelCase__ ): if ( isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): _UpperCamelCase : Optional[int] = self._load_voice_preset(lowerCamelCase__ ) else: if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and not voice_preset.endswith('.npz' ): _UpperCamelCase : Tuple = voice_preset + '.npz' _UpperCamelCase : str = np.load(lowerCamelCase__ ) if voice_preset is not None: self._validate_voice_preset_dict(lowerCamelCase__ ,**lowerCamelCase__ ) _UpperCamelCase : Union[str, Any] = BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ ) _UpperCamelCase : Union[str, Any] = self.tokenizer( lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,padding='max_length' ,max_length=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,) if voice_preset is not None: _UpperCamelCase : Optional[Any] = voice_preset return encoded_text
83
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Union[str, Any] = logging.get_logger(__name__) A : List[Any] = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class A ( UpperCAmelCase__ ): '''simple docstring''' A__ = '''roberta''' def __init__(self : Union[str, Any] , _UpperCAmelCase : Union[str, Any]=5_0265 , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Tuple=3072 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : int=1E-1_2 , _UpperCAmelCase : str=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : List[Any]="absolute" , _UpperCAmelCase : Any=True , _UpperCAmelCase : Any=None , **_UpperCAmelCase : str , ) -> int: """simple docstring""" super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = position_embedding_type lowercase__ = use_cache lowercase__ = classifier_dropout class A ( UpperCAmelCase__ ): '''simple docstring''' @property def lowerCamelCase__ (self : Any ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase__ = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
369
A : Tuple = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def UpperCamelCase ( ) -> None: """simple docstring""" lowercase__ = input("""Enter message: """ ) lowercase__ = input("""Enter key [alphanumeric]: """ ) lowercase__ = input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): lowercase__ = """encrypt""" lowercase__ = encrypt_message(__magic_name__ , __magic_name__ ) elif mode.lower().startswith("""d""" ): lowercase__ = """decrypt""" lowercase__ = decrypt_message(__magic_name__ , __magic_name__ ) print(f'''\n{mode.title()}ed message:''' ) print(__magic_name__ ) def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str ) -> str: """simple docstring""" return translate_message(__magic_name__ , __magic_name__ , """encrypt""" ) def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str ) -> str: """simple docstring""" return translate_message(__magic_name__ , __magic_name__ , """decrypt""" ) def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : str ) -> str: """simple docstring""" lowercase__ = [] lowercase__ = 0 lowercase__ = key.upper() for symbol in message: lowercase__ = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(__magic_name__ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(__magic_name__ ): lowercase__ = 0 else: translated.append(__magic_name__ ) return "".join(__magic_name__ ) if __name__ == "__main__": main()
146
0
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class UpperCAmelCase_ : def __init__( self, __a, __a=2, __a=3, __a=4, __a=2, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=36, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=6, __a=6, __a=3, __a=4, __a=None, __a=1000, ): '''simple docstring''' _lowerCAmelCase : Dict = parent _lowerCAmelCase : str = batch_size _lowerCAmelCase : List[Any] = num_channels _lowerCAmelCase : List[Any] = image_size _lowerCAmelCase : Dict = patch_size _lowerCAmelCase : List[Any] = is_training _lowerCAmelCase : Optional[Any] = use_input_mask _lowerCAmelCase : Union[str, Any] = use_token_type_ids _lowerCAmelCase : Optional[Any] = use_labels _lowerCAmelCase : Union[str, Any] = vocab_size _lowerCAmelCase : Tuple = hidden_size _lowerCAmelCase : Any = num_hidden_layers _lowerCAmelCase : Dict = num_attention_heads _lowerCAmelCase : List[str] = intermediate_size _lowerCAmelCase : Tuple = hidden_act _lowerCAmelCase : Tuple = hidden_dropout_prob _lowerCAmelCase : Dict = attention_probs_dropout_prob _lowerCAmelCase : Optional[int] = max_position_embeddings _lowerCAmelCase : Optional[Any] = type_vocab_size _lowerCAmelCase : str = type_sequence_label_size _lowerCAmelCase : Optional[Any] = initializer_range _lowerCAmelCase : Tuple = coordinate_size _lowerCAmelCase : Tuple = shape_size _lowerCAmelCase : int = num_labels _lowerCAmelCase : List[str] = num_choices _lowerCAmelCase : Union[str, Any] = scope _lowerCAmelCase : Tuple = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) _lowerCAmelCase : int = text_seq_length _lowerCAmelCase : Dict = (image_size // patch_size) ** 2 + 1 _lowerCAmelCase : int = self.text_seq_length + self.image_seq_length def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : int = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size) _lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox) _lowerCAmelCase : Optional[Any] = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: _lowerCAmelCase : Dict = bbox[i, j, 3] _lowerCAmelCase : Any = bbox[i, j, 1] _lowerCAmelCase : Union[str, Any] = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: _lowerCAmelCase : Optional[Any] = bbox[i, j, 2] _lowerCAmelCase : str = bbox[i, j, 0] _lowerCAmelCase : Any = tmp_coordinate _lowerCAmelCase : str = tf.constant(__a) _lowerCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _lowerCAmelCase : Dict = None if self.use_input_mask: _lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length]) _lowerCAmelCase : Any = None if self.use_token_type_ids: _lowerCAmelCase : Any = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size) _lowerCAmelCase : str = None _lowerCAmelCase : Optional[int] = None if self.use_labels: _lowerCAmelCase : str = ids_tensor([self.batch_size], self.type_sequence_label_size) _lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels) _lowerCAmelCase : str = LayoutLMvaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def snake_case__ ( self, __a, __a, __a, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : Optional[Any] = TFLayoutLMvaModel(config=__a) # text + image _lowerCAmelCase : List[Any] = model(__a, pixel_values=__a, training=__a) _lowerCAmelCase : Any = model( __a, bbox=__a, pixel_values=__a, attention_mask=__a, token_type_ids=__a, training=__a, ) _lowerCAmelCase : int = model(__a, bbox=__a, pixel_values=__a, training=__a) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) # text only _lowerCAmelCase : Optional[int] = model(__a, training=__a) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size)) # image only _lowerCAmelCase : Union[str, Any] = model({"pixel_values": pixel_values}, training=__a) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size)) def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : int = self.num_labels _lowerCAmelCase : int = TFLayoutLMvaForSequenceClassification(config=__a) _lowerCAmelCase : Optional[Any] = model( __a, bbox=__a, pixel_values=__a, attention_mask=__a, token_type_ids=__a, labels=__a, training=__a, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : Dict = self.num_labels _lowerCAmelCase : str = TFLayoutLMvaForTokenClassification(config=__a) _lowerCAmelCase : Optional[Any] = model( __a, bbox=__a, pixel_values=__a, attention_mask=__a, token_type_ids=__a, labels=__a, training=__a, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels)) def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : str = 2 _lowerCAmelCase : Tuple = TFLayoutLMvaForQuestionAnswering(config=__a) _lowerCAmelCase : List[Any] = model( __a, bbox=__a, pixel_values=__a, attention_mask=__a, token_type_ids=__a, start_positions=__a, end_positions=__a, training=__a, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Any = self.prepare_config_and_inputs() ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Any = config_and_inputs _lowerCAmelCase : str = { "input_ids": input_ids, "bbox": bbox, "pixel_values": pixel_values, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class UpperCAmelCase_ ( a , a , unittest.TestCase): lowerCamelCase__ = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) lowerCamelCase__ = ( {'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel} if is_tf_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def snake_case__ ( self, __a, __a, __a, __a, __a): '''simple docstring''' return True def snake_case__ ( self, __a, __a, __a=False): '''simple docstring''' _lowerCAmelCase : Any = copy.deepcopy(__a) if model_class in get_values(__a): _lowerCAmelCase : Any = { k: tf.tile(tf.expand_dims(__a, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1)) if isinstance(__a, tf.Tensor) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__a): _lowerCAmelCase : Any = tf.ones(self.model_tester.batch_size, dtype=tf.intaa) elif model_class in get_values(__a): _lowerCAmelCase : Any = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa) _lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa) elif model_class in get_values(__a): _lowerCAmelCase : Any = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa) elif model_class in get_values(__a): _lowerCAmelCase : Union[str, Any] = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=tf.intaa) return inputs_dict def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = TFLayoutLMvaModelTester(self) _lowerCAmelCase : Dict = ConfigTester(self, config_class=__a, hidden_size=37) def snake_case__ ( self): '''simple docstring''' self.config_tester.run_common_tests() def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : List[str] = model_class(__a) if getattr(__a, "hf_compute_loss", __a): # The number of elements in the loss should be the same as the number of elements in the label _lowerCAmelCase : int = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a) _lowerCAmelCase : int = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=__a)[0] ] _lowerCAmelCase : str = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs _lowerCAmelCase : str = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a) _lowerCAmelCase : Optional[int] = prepared_for_class.pop("input_ids") _lowerCAmelCase : Optional[int] = model(__a, **__a)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) # Test that model correctly compute the loss when we mask some positions _lowerCAmelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a) _lowerCAmelCase : Union[str, Any] = prepared_for_class.pop("input_ids") if "labels" in prepared_for_class: _lowerCAmelCase : List[str] = prepared_for_class["labels"].numpy() if len(labels.shape) > 1 and labels.shape[1] != 1: _lowerCAmelCase : Optional[int] = -100 _lowerCAmelCase : Optional[Any] = tf.convert_to_tensor(__a) _lowerCAmelCase : Any = model(__a, **__a)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) self.assertTrue(not np.any(np.isnan(loss.numpy()))) # Test that model correctly compute the loss with a dict _lowerCAmelCase : Tuple = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a) _lowerCAmelCase : Optional[int] = model(__a)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) # Test that model correctly compute the loss with a tuple _lowerCAmelCase : Tuple = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a) # Get keys that were added with the _prepare_for_class function _lowerCAmelCase : int = prepared_for_class.keys() - inputs_dict.keys() _lowerCAmelCase : str = inspect.signature(model.call).parameters _lowerCAmelCase : int = list(signature.keys()) # Create a dictionary holding the location of the tensors in the tuple _lowerCAmelCase : Optional[int] = {0: "input_ids"} for label_key in label_keys: _lowerCAmelCase : str = signature_names.index(__a) _lowerCAmelCase : Dict = label_key _lowerCAmelCase : Optional[int] = sorted(tuple_index_mapping.items()) # Initialize a list with their default values, update the values and convert to a tuple _lowerCAmelCase : Any = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default) for index, value in sorted_tuple_index_mapping: _lowerCAmelCase : Optional[Any] = prepared_for_class[value] _lowerCAmelCase : Union[str, Any] = tuple(__a) # Send to model _lowerCAmelCase : str = model(tuple_input[:-1])[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) def snake_case__ ( self): '''simple docstring''' ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__a, __a, __a, __a, __a, __a) def snake_case__ ( self): '''simple docstring''' ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCAmelCase : List[str] = type self.model_tester.create_and_check_model(__a, __a, __a, __a, __a, __a) def snake_case__ ( self): '''simple docstring''' ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __a, __a, __a, __a, __a, __a, __a) def snake_case__ ( self): '''simple docstring''' ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __a, __a, __a, __a, __a, __a, __a) def snake_case__ ( self): '''simple docstring''' ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __a, __a, __a, __a, __a, __a, __a) @slow def snake_case__ ( self): '''simple docstring''' for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : str = TFLayoutLMvaModel.from_pretrained(__a) self.assertIsNotNone(__a) def A ( ): '''simple docstring''' _lowerCAmelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf class UpperCAmelCase_ ( unittest.TestCase): @cached_property def snake_case__ ( self): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=__a) if is_vision_available() else None @slow def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : List[str] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base") _lowerCAmelCase : str = self.default_image_processor _lowerCAmelCase : List[Any] = prepare_img() _lowerCAmelCase : Union[str, Any] = image_processor(images=__a, return_tensors="tf").pixel_values _lowerCAmelCase : List[str] = tf.constant([[1, 2]]) _lowerCAmelCase : Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]), axis=0) # forward pass _lowerCAmelCase : Optional[Any] = model(input_ids=__a, bbox=__a, pixel_values=__a, training=__a) # verify the logits _lowerCAmelCase : Any = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape, __a) _lowerCAmelCase : List[Any] = tf.constant( [[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]]) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], __a, atol=1E-4))
36
"""simple docstring""" def __magic_name__ ( __snake_case : list ) -> list: if len(__snake_case ) < 2: return collection def circle_sort_util(__snake_case : list , __snake_case : int , __snake_case : int ) -> bool: lowercase : List[Any] = False if low == high: return swapped lowercase : Union[str, Any] = low lowercase : str = high while left < right: if collection[left] > collection[right]: lowercase , lowercase : Optional[Any] = ( collection[right], collection[left], ) lowercase : Tuple = True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: lowercase , lowercase : str = ( collection[right + 1], collection[left], ) lowercase : Union[str, Any] = True lowercase : Any = low + int((high - low) / 2 ) lowercase : Tuple = circle_sort_util(__snake_case , __snake_case , __snake_case ) lowercase : List[Any] = circle_sort_util(__snake_case , mid + 1 , __snake_case ) return swapped or left_swap or right_swap lowercase : int = True while is_not_sorted is True: lowercase : int = circle_sort_util(__snake_case , 0 , len(__snake_case ) - 1 ) return collection if __name__ == "__main__": _A : str = input("""Enter numbers separated by a comma:\n""").strip() _A : Dict = [int(item) for item in user_input.split(""",""")] print(circle_sort(unsorted))
202
0
'''simple docstring''' import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( A , A , A ) -> Tuple: lowerCAmelCase__ = WavaVecaForSequenceClassification.from_pretrained(A , config=A ) lowerCAmelCase__ = downstream_dict['''projector.weight'''] lowerCAmelCase__ = downstream_dict['''projector.bias'''] lowerCAmelCase__ = downstream_dict['''model.post_net.linear.weight'''] lowerCAmelCase__ = downstream_dict['''model.post_net.linear.bias'''] return model def _snake_case ( A , A , A ) -> List[str]: lowerCAmelCase__ = WavaVecaForAudioFrameClassification.from_pretrained(A , config=A ) lowerCAmelCase__ = downstream_dict['''model.linear.weight'''] lowerCAmelCase__ = downstream_dict['''model.linear.bias'''] return model def _snake_case ( A , A , A ) -> str: lowerCAmelCase__ = WavaVecaForXVector.from_pretrained(A , config=A ) lowerCAmelCase__ = downstream_dict['''connector.weight'''] lowerCAmelCase__ = downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): lowerCAmelCase__ = downstream_dict[ F"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] lowerCAmelCase__ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] lowerCAmelCase__ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] lowerCAmelCase__ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] lowerCAmelCase__ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] lowerCAmelCase__ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] lowerCAmelCase__ = downstream_dict['''objective.W'''] return model @torch.no_grad() def _snake_case ( A , A , A , A ) -> int: lowerCAmelCase__ = torch.load(A , map_location='''cpu''' ) lowerCAmelCase__ = checkpoint['''Downstream'''] lowerCAmelCase__ = WavaVecaConfig.from_pretrained(A ) lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained( A , return_attention_mask=A , do_normalize=A ) lowerCAmelCase__ = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): lowerCAmelCase__ = convert_classification(A , A , A ) elif arch.endswith('''ForAudioFrameClassification''' ): lowerCAmelCase__ = convert_diarization(A , A , A ) elif arch.endswith('''ForXVector''' ): lowerCAmelCase__ = convert_xvector(A , A , A ) else: raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: lowerCAmelCase__ = checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(A ) hf_model.save_pretrained(A ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.''' ) parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''') parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''') __UpperCAmelCase = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
363
'''simple docstring''' def _snake_case ( A = 10 ) -> str: if not isinstance(A , A ) or n < 0: raise ValueError('''Invalid input''' ) lowerCAmelCase__ = 10**n lowerCAmelCase__ = 28433 * (pow(2 , 7830457 , A )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f"""{solution(10) = }""")
228
0
from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer snake_case_ = logging.get_logger(__name__) snake_case_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} snake_case_ = { '''vocab_file''': { '''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json''' }, '''merges_file''': { '''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt''' }, } snake_case_ = {'''allegro/herbert-base-cased''': 514} snake_case_ = {} class SCREAMING_SNAKE_CASE__ (__snake_case ): __lowerCamelCase : Tuple = VOCAB_FILES_NAMES __lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION __lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : List[Any] = HerbertTokenizer def __init__( self , a=None , a=None , a=None , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a="</s>" , **a , ): super().__init__( a , a , tokenizer_file=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , sep_token=a , **a , ) def snake_case_ ( self , a , a = None): lowercase__ : Optional[int] = [self.cls_token_id] lowercase__ : List[str] = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def snake_case_ ( self , a , a = None , a = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a , token_ids_a=a , already_has_special_tokens=a) if token_ids_a is None: return [1] + ([0] * len(a)) + [1] return [1] + ([0] * len(a)) + [1] + ([0] * len(a)) + [1] def snake_case_ ( self , a , a = None): lowercase__ : str = [self.sep_token_id] lowercase__ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def snake_case_ ( self , a , a = None): lowercase__ : List[Any] = self._tokenizer.model.save(a , name=a) return tuple(a)
214
import unittest from transformers import DonutProcessor snake_case_ = '''naver-clova-ix/donut-base''' class SCREAMING_SNAKE_CASE__ (unittest.TestCase ): def snake_case_ ( self): lowercase__ : Dict = DonutProcessor.from_pretrained(a) def snake_case_ ( self): lowercase__ : Tuple = { 'name': 'John Doe', 'age': '99', 'city': 'Atlanta', 'state': 'GA', 'zip': '30301', 'phone': '123-4567', 'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}], } lowercase__ : Tuple = ( '<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>' '<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>' '<s_nicknames><s_nickname>Johnny</s_nickname>' '<sep/><s_nickname>JD</s_nickname></s_nicknames>' ) lowercase__ : str = self.processor.tokenajson(a) self.assertDictEqual(a , a)
214
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class a__ : _lowerCamelCase = MBartConfig _lowerCamelCase = {} _lowerCamelCase = 'gelu' def __init__( self : Union[str, Any], lowerCAmelCase : Tuple, lowerCAmelCase : Optional[Any]=13, lowerCAmelCase : Optional[int]=7, lowerCAmelCase : Optional[Any]=True, lowerCAmelCase : str=False, lowerCAmelCase : int=99, lowerCAmelCase : List[Any]=32, lowerCAmelCase : List[str]=2, lowerCAmelCase : Tuple=4, lowerCAmelCase : List[str]=37, lowerCAmelCase : Dict=0.1, lowerCAmelCase : List[str]=0.1, lowerCAmelCase : str=20, lowerCAmelCase : Any=2, lowerCAmelCase : List[Any]=1, lowerCAmelCase : Optional[Any]=0, ) -> Optional[Any]: lowercase : Any = parent lowercase : Dict = batch_size lowercase : List[str] = seq_length lowercase : str = is_training lowercase : Union[str, Any] = use_labels lowercase : str = vocab_size lowercase : List[Any] = hidden_size lowercase : Optional[Any] = num_hidden_layers lowercase : Optional[int] = num_attention_heads lowercase : Dict = intermediate_size lowercase : int = hidden_dropout_prob lowercase : Tuple = attention_probs_dropout_prob lowercase : Tuple = max_position_embeddings lowercase : Tuple = eos_token_id lowercase : Any = pad_token_id lowercase : int = bos_token_id def lowercase ( self : Optional[Any] ) -> Optional[int]: lowercase : Any = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ) lowercase : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 ) lowercase : Tuple = tf.concat([input_ids, eos_tensor], axis=1 ) lowercase : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowercase : List[Any] = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) lowercase : Union[str, Any] = prepare_mbart_inputs_dict(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) return config, inputs_dict def lowercase ( self : str, lowerCAmelCase : Any, lowerCAmelCase : Tuple ) -> List[Any]: lowercase : List[str] = TFMBartModel(config=lowerCAmelCase ).get_decoder() lowercase : int = inputs_dict['input_ids'] lowercase : Any = input_ids[:1, :] lowercase : Any = inputs_dict['attention_mask'][:1, :] lowercase : List[str] = inputs_dict['head_mask'] lowercase : Tuple = 1 # first forward pass lowercase : Optional[Any] = model(lowerCAmelCase, attention_mask=lowerCAmelCase, head_mask=lowerCAmelCase, use_cache=lowerCAmelCase ) lowercase , lowercase : Tuple = outputs.to_tuple() lowercase : Dict = past_key_values[1] def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> Union[str, Any]: '''simple docstring''' if attention_mask is None: lowercase : List[str] = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowercase : int = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowercase : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowercase : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowercase : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ): _lowerCamelCase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () _lowerCamelCase = (TFMBartForConditionalGeneration,) if is_tf_available() else () _lowerCamelCase = ( { 'conversational': TFMBartForConditionalGeneration, 'feature-extraction': TFMBartModel, 'summarization': TFMBartForConditionalGeneration, 'text2text-generation': TFMBartForConditionalGeneration, 'translation': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) _lowerCamelCase = True _lowerCamelCase = False _lowerCamelCase = False def lowercase ( self : Dict, lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Optional[int], lowerCAmelCase : int, lowerCAmelCase : Dict ) -> List[str]: if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def lowercase ( self : Union[str, Any] ) -> int: lowercase : Any = TFMBartModelTester(self ) lowercase : Optional[int] = ConfigTester(self, config_class=lowerCAmelCase ) def lowercase ( self : List[Any] ) -> Union[str, Any]: self.config_tester.run_common_tests() def lowercase ( self : str ) -> Dict: lowercase : int = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class a__ ( unittest.TestCase ): _lowerCamelCase = [ ' UN Chief Says There Is No Military Solution in Syria', ] _lowerCamelCase = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', ] _lowerCamelCase = 'facebook/mbart-large-en-ro' @cached_property def lowercase ( self : List[str] ) -> Optional[int]: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowercase ( self : List[Any] ) -> Union[str, Any]: lowercase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowercase ( self : int, **lowerCAmelCase : Tuple ) -> Tuple: lowercase : Optional[Any] = self.translate_src_text(**lowerCAmelCase ) self.assertListEqual(self.expected_text, lowerCAmelCase ) def lowercase ( self : Dict, **lowerCAmelCase : Any ) -> List[Any]: lowercase : Optional[Any] = self.tokenizer(self.src_text, **lowerCAmelCase, return_tensors='tf' ) lowercase : Any = self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2 ) lowercase : str = self.tokenizer.batch_decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase ) return generated_words @slow def lowercase ( self : Tuple ) -> Optional[int]: self._assert_generated_batch_equal_expected()
53
"""simple docstring""" # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers _UpperCamelCase: Any = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
53
1
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler a__: Union[str, Any] = 16 a__: Union[str, Any] = 32 def UpperCamelCase__( UpperCamelCase__ : Accelerator , UpperCamelCase__ : int = 16 , UpperCamelCase__ : str = "bert-base-cased" )->Tuple: A__ = AutoTokenizer.from_pretrained(UpperCamelCase__ ) A__ = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(UpperCamelCase__ : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) A__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset A__ = datasets.map( UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=UpperCamelCase__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A__ = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(UpperCamelCase__ : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(UpperCamelCase__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' ) return tokenizer.pad(UpperCamelCase__ , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. A__ = DataLoader( tokenized_datasets['''train'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ ) A__ = DataLoader( tokenized_datasets['''validation'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ ) return train_dataloader, eval_dataloader def UpperCamelCase__( UpperCamelCase__ : List[str] , UpperCamelCase__ : int )->Dict: # Initialize accelerator A__ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A__ = config['''lr'''] A__ = int(config['''num_epochs'''] ) A__ = int(config['''seed'''] ) A__ = int(config['''batch_size'''] ) A__ = args.model_name_or_path set_seed(UpperCamelCase__ ) A__ , A__ = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A__ = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase__ , return_dict=UpperCamelCase__ ) # Instantiate optimizer A__ = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) A__ = optimizer_cls(params=model.parameters() , lr=UpperCamelCase__ ) if accelerator.state.deepspeed_plugin is not None: A__ = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: A__ = 1 A__ = (len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): A__ = get_linear_schedule_with_warmup( optimizer=UpperCamelCase__ , num_warmup_steps=0 , num_training_steps=UpperCamelCase__ , ) else: A__ = DummyScheduler(UpperCamelCase__ , total_num_steps=UpperCamelCase__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A__ , A__ , A__ , A__ , A__ = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # We need to keep track of how many total steps we have iterated over A__ = 0 # We also need to keep track of the stating epoch so files are named properly A__ = 0 # Now we train the model A__ = evaluate.load('''glue''' , '''mrpc''' ) A__ = 0 A__ = {} for epoch in range(UpperCamelCase__ , UpperCamelCase__ ): model.train() for step, batch in enumerate(UpperCamelCase__ ): A__ = model(**UpperCamelCase__ ) A__ = outputs.loss A__ = loss / gradient_accumulation_steps accelerator.backward(UpperCamelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() A__ = 0 for step, batch in enumerate(UpperCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A__ = model(**UpperCamelCase__ ) A__ = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times A__ , A__ = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(UpperCamelCase__ ) - 1: A__ = predictions[: len(eval_dataloader.dataset ) - samples_seen] A__ = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=UpperCamelCase__ , references=UpperCamelCase__ , ) A__ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:" , UpperCamelCase__ ) A__ = eval_metric['''accuracy'''] if best_performance < eval_metric["accuracy"]: A__ = eval_metric['''accuracy'''] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f: json.dump(UpperCamelCase__ , UpperCamelCase__ ) def UpperCamelCase__( )->Optional[int]: A__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=UpperCamelCase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=UpperCamelCase__ , ) parser.add_argument( '''--output_dir''' , type=UpperCamelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--performance_lower_bound''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , ) parser.add_argument( '''--num_epochs''' , type=UpperCamelCase__ , default=3 , help='''Number of train epochs.''' , ) A__ = parser.parse_args() A__ = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": main()
193
import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a__: Union[str, Any] = logging.get_logger(__name__) a__: Union[str, Any] = {'vocab_file': 'spiece.model'} a__: Tuple = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } a__: Any = { 'google/bigbird-roberta-base': 4_096, 'google/bigbird-roberta-large': 4_096, 'google/bigbird-base-trivia-itc': 4_096, } class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ): __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask'''] __SCREAMING_SNAKE_CASE = [] def __init__( self,__lowerCamelCase,__lowerCamelCase="<unk>",__lowerCamelCase="<s>",__lowerCamelCase="</s>",__lowerCamelCase="<pad>",__lowerCamelCase="[SEP]",__lowerCamelCase="[MASK]",__lowerCamelCase="[CLS]",__lowerCamelCase = None,**__lowerCamelCase,): A__ = AddedToken(__lowerCamelCase,lstrip=__lowerCamelCase,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else bos_token A__ = AddedToken(__lowerCamelCase,lstrip=__lowerCamelCase,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else eos_token A__ = AddedToken(__lowerCamelCase,lstrip=__lowerCamelCase,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else unk_token A__ = AddedToken(__lowerCamelCase,lstrip=__lowerCamelCase,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else pad_token A__ = AddedToken(__lowerCamelCase,lstrip=__lowerCamelCase,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else cls_token A__ = AddedToken(__lowerCamelCase,lstrip=__lowerCamelCase,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it A__ = AddedToken(__lowerCamelCase,lstrip=__lowerCamelCase,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else mask_token A__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__lowerCamelCase,eos_token=__lowerCamelCase,unk_token=__lowerCamelCase,pad_token=__lowerCamelCase,sep_token=__lowerCamelCase,mask_token=__lowerCamelCase,cls_token=__lowerCamelCase,sp_model_kwargs=self.sp_model_kwargs,**__lowerCamelCase,) A__ = vocab_file A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCamelCase ) @property def UpperCamelCase ( self ): return self.sp_model.get_piece_size() def UpperCamelCase ( self ): A__ = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): A__ = self.__dict__.copy() A__ = None return state def __setstate__( self,__lowerCamelCase ): A__ = d # for backward compatibility if not hasattr(self,'''sp_model_kwargs''' ): A__ = {} A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase ( self,__lowerCamelCase ): return self.sp_model.encode(__lowerCamelCase,out_type=__lowerCamelCase ) def UpperCamelCase ( self,__lowerCamelCase ): return self.sp_model.piece_to_id(__lowerCamelCase ) def UpperCamelCase ( self,__lowerCamelCase ): A__ = self.sp_model.IdToPiece(__lowerCamelCase ) return token def UpperCamelCase ( self,__lowerCamelCase ): A__ = [] A__ = '''''' A__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__lowerCamelCase ) + token A__ = True A__ = [] else: current_sub_tokens.append(__lowerCamelCase ) A__ = False out_string += self.sp_model.decode(__lowerCamelCase ) return out_string.strip() def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = False,__lowerCamelCase = None,__lowerCamelCase = True,**__lowerCamelCase,): A__ = kwargs.pop('''use_source_tokenizer''',__lowerCamelCase ) A__ = self.convert_ids_to_tokens(__lowerCamelCase,skip_special_tokens=__lowerCamelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 A__ = [] A__ = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) ) A__ = [] sub_texts.append(__lowerCamelCase ) else: current_sub_text.append(__lowerCamelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: A__ = re.sub(r''' (\[(MASK|SEP)\])''',r'''\1''',''' '''.join(__lowerCamelCase ) ) else: A__ = ''''''.join(__lowerCamelCase ) A__ = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: A__ = self.clean_up_tokenization(__lowerCamelCase ) return clean_text else: return text def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ): if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return A__ = os.path.join( __lowerCamelCase,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file,__lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase,'''wb''' ) as fi: A__ = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,) def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A__ = [self.cls_token_id] A__ = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase,token_ids_a=__lowerCamelCase,already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1] def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ): A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
193
1
"""simple docstring""" import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--txt2img_unclip""", default="""kakaobrain/karlo-v1-alpha""", type=str, required=False, help="""The pretrained txt2img unclip.""", ) __snake_case = parser.parse_args() __snake_case = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) __snake_case = CLIPImageProcessor() __snake_case = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""") __snake_case = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
371
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case = { """configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""", """NezhaForNextSentencePrediction""", """NezhaForMaskedLM""", """NezhaForPreTraining""", """NezhaForMultipleChoice""", """NezhaForQuestionAnswering""", """NezhaForSequenceClassification""", """NezhaForTokenClassification""", """NezhaModel""", """NezhaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
169
0
from __future__ import annotations from fractions import Fraction def UpperCAmelCase__ ( _A : int , _A : int ): '''simple docstring''' return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def UpperCAmelCase__ ( _A : int ): '''simple docstring''' a__ =[] a__ =11 a__ =int('''1''' + '''0''' * digit_len ) for num in range(lowerCAmelCase_ , lowerCAmelCase_ ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(lowerCAmelCase_ , lowerCAmelCase_ ): solutions.append(F"""{num}/{den}""" ) den += 1 num += 1 a__ =10 return solutions def UpperCAmelCase__ ( _A : int = 2 ): '''simple docstring''' a__ =1.0 for fraction in fraction_list(lowerCAmelCase_ ): a__ =Fraction(lowerCAmelCase_ ) result *= frac.denominator / frac.numerator return int(lowerCAmelCase_ ) if __name__ == "__main__": print(solution())
188
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class lowerCAmelCase ( __a ): '''simple docstring''' _A : List[str] = '''naver-clova-ix/donut-base-finetuned-docvqa''' _A : Any = ( '''This is a tool that answers a question about an document (pdf). It takes an input named `document` which ''' '''should be the document containing the information, as well as a `question` that is the question about the ''' '''document. It returns a text that contains the answer to the question.''' ) _A : Tuple = '''document_qa''' _A : Dict = AutoProcessor _A : Tuple = VisionEncoderDecoderModel _A : Optional[int] = ['''image''', '''text'''] _A : Optional[int] = ['''text'''] def __init__( self : Any , *__a : List[str] , **__a : Any ) -> Optional[Any]: """simple docstring""" if not is_vision_available(): raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" ) super().__init__(*__a , **__a ) def lowerCAmelCase ( self : List[Any] , __a : "Image" , __a : str ) -> List[str]: """simple docstring""" __lowercase : int = """<s_docvqa><s_question>{user_input}</s_question><s_answer>""" __lowercase : str = task_prompt.replace("""{user_input}""" , __a ) __lowercase : Union[str, Any] = self.pre_processor.tokenizer( __a , add_special_tokens=__a , return_tensors="""pt""" ).input_ids __lowercase : int = self.pre_processor(__a , return_tensors="""pt""" ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def lowerCAmelCase ( self : Optional[int] , __a : int ) -> int: """simple docstring""" return self.model.generate( inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__a , ).sequences def lowerCAmelCase ( self : Union[str, Any] , __a : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __lowercase : Tuple = self.pre_processor.batch_decode(__a )[0] __lowercase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" ) __lowercase : Union[str, Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" ) __lowercase : Optional[Any] = re.sub(r"""<.*?>""" , """""" , __a , count=1 ).strip() # remove first task start token __lowercase : Dict = self.pre_processor.tokenajson(__a ) return sequence["answer"]
233
0
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar lowerCamelCase__ = TypeVar('''KEY''') lowerCamelCase__ = TypeVar('''VAL''') @dataclass(frozen=__lowercase , slots=__lowercase ) class __magic_name__ (Generic[KEY, VAL] ): lowerCamelCase__ = 42 lowerCamelCase__ = 42 class __magic_name__ (_Item ): def __init__( self ) -> None: super().__init__(_a , _a ) def __bool__( self ) -> bool: return False lowerCamelCase__ = _DeletedItem() class __magic_name__ (MutableMapping[KEY, VAL] ): def __init__( self , _a = 8 , _a = 0.7_5 ) -> None: lowerCAmelCase_ = initial_block_size lowerCAmelCase_ = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 lowerCAmelCase_ = capacity_factor lowerCAmelCase_ = 0 def __a ( self , _a ) -> int: return hash(_a ) % len(self._buckets ) def __a ( self , _a ) -> int: return (ind + 1) % len(self._buckets ) def __a ( self , _a , _a , _a ) -> bool: lowerCAmelCase_ = self._buckets[ind] if not stored: lowerCAmelCase_ = _Item(_a , _a ) self._len += 1 return True elif stored.key == key: lowerCAmelCase_ = _Item(_a , _a ) return True else: return False def __a ( self ) -> bool: lowerCAmelCase_ = len(self._buckets ) * self._capacity_factor return len(self ) >= int(_a ) def __a ( self ) -> bool: if len(self._buckets ) <= self._initial_block_size: return False lowerCAmelCase_ = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def __a ( self , _a ) -> None: lowerCAmelCase_ = self._buckets lowerCAmelCase_ = [None] * new_size lowerCAmelCase_ = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def __a ( self ) -> None: self._resize(len(self._buckets ) * 2 ) def __a ( self ) -> None: self._resize(len(self._buckets ) // 2 ) def __a ( self , _a ) -> Iterator[int]: lowerCAmelCase_ = self._get_bucket_index(_a ) for _ in range(len(self._buckets ) ): yield ind lowerCAmelCase_ = self._get_next_ind(_a ) def __a ( self , _a , _a ) -> None: for ind in self._iterate_buckets(_a ): if self._try_set(_a , _a , _a ): break def __setitem__( self , _a , _a ) -> None: if self._is_full(): self._size_up() self._add_item(_a , _a ) def __delitem__( self , _a ) -> None: for ind in self._iterate_buckets(_a ): lowerCAmelCase_ = self._buckets[ind] if item is None: raise KeyError(_a ) if item is _deleted: continue if item.key == key: lowerCAmelCase_ = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , _a ) -> VAL: for ind in self._iterate_buckets(_a ): lowerCAmelCase_ = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(_a ) def __len__( self ) -> int: return self._len def __iter__( self ) -> Iterator[KEY]: yield from (item.key for item in self._buckets if item) def __repr__( self ) -> str: lowerCAmelCase_ = " ,".join( f"{item.key}: {item.val}" for item in self._buckets if item ) return f"HashMap({val_string})"
22
import math from collections.abc import Iterator from itertools import takewhile def A(__a: int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A(): lowerCAmelCase_ = 2 while True: if is_prime(__a ): yield num num += 1 def A(__a: int = 200_0000 ): return sum(takewhile(lambda __a : x < n , prime_generator() ) ) if __name__ == "__main__": print(F'''{solution() = }''')
22
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=lowerCamelCase__ ) class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization __lowerCamelCase : str =field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} ) __lowerCamelCase : ClassVar[Features] =Features({'text': Value('string' )} ) __lowerCamelCase : ClassVar[Features] =Features({'labels': ClassLabel} ) __lowerCamelCase : str ="text" __lowerCamelCase : str ="labels" def UpperCamelCase_ ( self : Any , __lowercase : str ): '''simple docstring''' if self.label_column not in features: raise ValueError(F"Column {self.label_column} is not present in features." ) if not isinstance(features[self.label_column] , __lowercase ): raise ValueError(F"Column {self.label_column} is not a ClassLabel." ) __a = copy.deepcopy(self ) __a = self.label_schema.copy() __a = features[self.label_column] __a = label_schema return task_template @property def UpperCamelCase_ ( self : Any ): '''simple docstring''' return { self.text_column: "text", self.label_column: "labels", }
302
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
302
1
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase_ = '''▁''' lowerCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = BertGenerationTokenizer SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = True def UpperCAmelCase_ (self ): super().setUp() UpperCamelCase__ = BertGenerationTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase_ (self ): UpperCamelCase__ = """<s>""" UpperCamelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 10_02 ) def UpperCAmelCase_ (self ): self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def UpperCAmelCase_ (self ): UpperCamelCase__ = BertGenerationTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [2_85, 46, 10, 1_70, 3_82] , ) UpperCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) UpperCamelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def UpperCAmelCase_ (self ): return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = """Hello World!""" UpperCamelCase__ = [1_85_36, 22_60, 1_01] self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) UpperCamelCase__ = [ 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) @require_torch @slow def UpperCAmelCase_ (self ): import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence UpperCamelCase__ = list(self.big_tokenizer.get_vocab().keys() )[:10] UpperCamelCase__ = """ """.join(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.big_tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.big_tokenizer.batch_encode_plus( [sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = BertGenerationConfig() UpperCamelCase__ = BertGenerationEncoder(SCREAMING_SNAKE_CASE_ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**SCREAMING_SNAKE_CASE_ ) model(**SCREAMING_SNAKE_CASE_ ) @slow def UpperCAmelCase_ (self ): # fmt: off UpperCamelCase__ = {"""input_ids""": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE_ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
178
from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image lowerCamelCase_ = ['''text''', '''image''', '''audio'''] def __magic_name__ ( __a : List[str] ): '''simple docstring''' UpperCamelCase__ = [] for input_type in input_types: if input_type == "text": inputs.append("""Text input""" ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) ) elif input_type == "audio": inputs.append(torch.ones(3_000 ) ) elif isinstance(__a , __a ): inputs.append(create_inputs(__a ) ) else: raise ValueError(f"Invalid type requested: {input_type}" ) return inputs def __magic_name__ ( __a : List ): '''simple docstring''' UpperCamelCase__ = [] for output in outputs: if isinstance(__a , (str, AgentText) ): output_types.append("""text""" ) elif isinstance(__a , (Image.Image, AgentImage) ): output_types.append("""image""" ) elif isinstance(__a , (torch.Tensor, AgentAudio) ): output_types.append("""audio""" ) else: raise ValueError(f"Invalid output: {output}" ) return output_types @is_tool_test class __A: """simple docstring""" def UpperCAmelCase_ (self ): self.assertTrue(hasattr(self.tool , """inputs""" ) ) self.assertTrue(hasattr(self.tool , """outputs""" ) ) UpperCamelCase__ = self.tool.inputs for _input in inputs: if isinstance(_input , SCREAMING_SNAKE_CASE_ ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) UpperCamelCase__ = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def UpperCAmelCase_ (self ): UpperCamelCase__ = create_inputs(self.tool.inputs ) UpperCamelCase__ = self.tool(*SCREAMING_SNAKE_CASE_ ) # There is a single output if len(self.tool.outputs ) == 1: UpperCamelCase__ = [outputs] self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_ ) , self.tool.outputs ) def UpperCAmelCase_ (self ): self.assertTrue(hasattr(self.tool , """description""" ) ) self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) ) self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = create_inputs(self.tool.inputs ) UpperCamelCase__ = self.tool(*SCREAMING_SNAKE_CASE_ ) if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = [outputs] self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(self.tool.outputs ) ) for output, output_type in zip(SCREAMING_SNAKE_CASE_ , self.tool.outputs ): UpperCamelCase__ = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = create_inputs(self.tool.inputs ) UpperCamelCase__ = [] for _input, input_type in zip(SCREAMING_SNAKE_CASE_ , self.tool.inputs ): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error UpperCamelCase__ = self.tool(*SCREAMING_SNAKE_CASE_ ) if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = [outputs] self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(self.tool.outputs ) )
178
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class _A ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : str=7 , __UpperCAmelCase : str=3 , __UpperCAmelCase : Tuple=18 , __UpperCAmelCase : Dict=30 , __UpperCAmelCase : Any=400 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Tuple=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __UpperCAmelCase : Optional[Any]=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __UpperCAmelCase : Union[str, Any]=True , ): a : int = size if size is not None else {"height": 224, "width": 224} a : List[str] = crop_size if crop_size is not None else {"height": 18, "width": 18} a : List[Any] = parent a : Any = batch_size a : str = num_channels a : Optional[int] = image_size a : Tuple = min_resolution a : str = max_resolution a : Dict = do_resize a : Any = size a : Dict = do_center_crop a : List[str] = crop_size a : str = do_normalize a : Optional[int] = image_mean a : Tuple = image_std a : Any = do_convert_rgb def __snake_case ( self : Union[str, Any]): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def __snake_case ( self : Dict , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Any=False): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: a : str = [] for i in range(self.batch_size): image_inputs.append( np.random.randint( 255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta)) else: a : Tuple = [] for i in range(self.batch_size): a , a : List[str] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2) image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta)) if not numpify and not torchify: # PIL expects the channel dimension as last dimension a : List[Any] = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1)) for x in image_inputs] if torchify: a : Optional[Any] = [torch.from_numpy(__UpperCAmelCase) for x in image_inputs] return image_inputs @require_torch @require_vision class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Union[str, Any] = ChineseCLIPImageProcessor if is_vision_available() else None def __snake_case ( self : List[str]): a : Dict = ChineseCLIPImageProcessingTester(self , do_center_crop=__UpperCAmelCase) @property def __snake_case ( self : Union[str, Any]): return self.image_processor_tester.prepare_image_processor_dict() def __snake_case ( self : str): a : Optional[int] = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__UpperCAmelCase , "do_resize")) self.assertTrue(hasattr(__UpperCAmelCase , "size")) self.assertTrue(hasattr(__UpperCAmelCase , "do_center_crop")) self.assertTrue(hasattr(__UpperCAmelCase , "center_crop")) self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize")) self.assertTrue(hasattr(__UpperCAmelCase , "image_mean")) self.assertTrue(hasattr(__UpperCAmelCase , "image_std")) self.assertTrue(hasattr(__UpperCAmelCase , "do_convert_rgb")) def __snake_case ( self : Any): a : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"height": 224, "width": 224}) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18}) a : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84) self.assertEqual(image_processor.size , {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84}) def __snake_case ( self : str): pass def __snake_case ( self : Tuple): # Initialize image_processing a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images a : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__UpperCAmelCase) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , Image.Image) # Test not batched input a : str = image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a : Dict = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __snake_case ( self : List[Any]): # Initialize image_processing a : str = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors a : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , np.ndarray) # Test not batched input a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a : Union[str, Any] = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __snake_case ( self : List[str]): # Initialize image_processing a : str = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors a : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , torch.Tensor) # Test not batched input a : List[str] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a : str = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) @require_torch @require_vision class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None def __snake_case ( self : Union[str, Any]): a : str = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__UpperCAmelCase) a : Dict = 3 @property def __snake_case ( self : Optional[Any]): return self.image_processor_tester.prepare_image_processor_dict() def __snake_case ( self : Optional[int]): a : Tuple = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__UpperCAmelCase , "do_resize")) self.assertTrue(hasattr(__UpperCAmelCase , "size")) self.assertTrue(hasattr(__UpperCAmelCase , "do_center_crop")) self.assertTrue(hasattr(__UpperCAmelCase , "center_crop")) self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize")) self.assertTrue(hasattr(__UpperCAmelCase , "image_mean")) self.assertTrue(hasattr(__UpperCAmelCase , "image_std")) self.assertTrue(hasattr(__UpperCAmelCase , "do_convert_rgb")) def __snake_case ( self : Any): pass def __snake_case ( self : Union[str, Any]): # Initialize image_processing a : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images a : str = self.image_processor_tester.prepare_inputs(equal_resolution=__UpperCAmelCase) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , Image.Image) # Test not batched input a : Tuple = image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a : Optional[Any] = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
40
"""simple docstring""" import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class __a (unittest.TestCase): '''simple docstring''' def __init__( self , _a , _a = True , _a = None , _a = 32 , _a = True , _a = 1 / 255 , _a = True , _a = True , _a = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _a = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _a = True , _a=7 , _a=30 , _a=400 , _a=3 , ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = parent SCREAMING_SNAKE_CASE__ : Tuple = do_resize SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 288} SCREAMING_SNAKE_CASE__ : List[str] = size_divisor SCREAMING_SNAKE_CASE__ : Tuple = do_rescale SCREAMING_SNAKE_CASE__ : List[str] = rescale_factor SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_normalize SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_mean SCREAMING_SNAKE_CASE__ : List[str] = image_std SCREAMING_SNAKE_CASE__ : List[str] = do_pad SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size SCREAMING_SNAKE_CASE__ : int = num_channels SCREAMING_SNAKE_CASE__ : Dict = min_resolution SCREAMING_SNAKE_CASE__ : str = max_resolution def _a ( self ) -> List[str]: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def _a ( self , _a , _a=False ) -> int: """simple docstring""" if not batched: SCREAMING_SNAKE_CASE__ : List[Any] = self.size["""shortest_edge"""] SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_inputs[0] if isinstance(_a , Image.Image ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = image.size else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = image.shape[1], image.shape[2] SCREAMING_SNAKE_CASE__ : Tuple = size / min(_a , _a ) if h < w: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = size, scale * w else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((1_333 / 800) * size ) if max(_a , _a ) > max_size: SCREAMING_SNAKE_CASE__ : List[str] = max_size / max(_a , _a ) SCREAMING_SNAKE_CASE__ : Any = newh * scale SCREAMING_SNAKE_CASE__ : Any = neww * scale SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = int(newh + 0.5 ), int(neww + 0.5 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: SCREAMING_SNAKE_CASE__ : Dict = [] for image in image_inputs: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) SCREAMING_SNAKE_CASE__ : Any = max(_a , key=lambda _a : item[0] )[0] SCREAMING_SNAKE_CASE__ : Any = max(_a , key=lambda _a : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __a (UpperCamelCase_ , unittest.TestCase): '''simple docstring''' _SCREAMING_SNAKE_CASE :Optional[int] = BridgeTowerImageProcessor if is_vision_available() else None def _a ( self ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = BridgeTowerImageProcessingTester(self ) @property def _a ( self ) -> Optional[int]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _a ( self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , """image_mean""" ) ) self.assertTrue(hasattr(_a , """image_std""" ) ) self.assertTrue(hasattr(_a , """do_normalize""" ) ) self.assertTrue(hasattr(_a , """do_resize""" ) ) self.assertTrue(hasattr(_a , """size""" ) ) self.assertTrue(hasattr(_a , """size_divisor""" ) ) def _a ( self ) -> List[str]: """simple docstring""" pass def _a ( self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.image_processor_tester.get_expected_values(_a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE__ : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.image_processor_tester.get_expected_values(_a , batched=_a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _a ( self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a ) for image in image_inputs: self.assertIsInstance(_a , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE__ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(_a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE__ : Tuple = image_processing(_a , return_tensors="""pt""" ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(_a , batched=_a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _a ( self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a ) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(_a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE__ : Dict = image_processing(_a , return_tensors="""pt""" ).pixel_values SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.image_processor_tester.get_expected_values(_a , batched=_a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
132
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { "configuration_clipseg": [ "CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPSegConfig", "CLIPSegTextConfig", "CLIPSegVisionConfig", ], "processing_clipseg": ["CLIPSegProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ "CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPSegModel", "CLIPSegPreTrainedModel", "CLIPSegTextModel", "CLIPSegVisionModel", "CLIPSegForImageSegmentation", ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
239
"""simple docstring""" def __lowerCamelCase ( a_ : Union[str, Any] , a_ : Optional[Any] ) -> Union[str, Any]: return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def __lowerCamelCase ( a_ : Optional[int] , a_ : Any=0 ) -> Optional[Any]: return sorted(a_ , key=lambda a_ : x[column] ) def __lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[int] , a_ : str=float('''inf''' ) ) -> str: for i in range(points_counts - 1 ): for j in range(i + 1 , a_ ): __SCREAMING_SNAKE_CASE :Dict = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __SCREAMING_SNAKE_CASE :Optional[Any] = current_dis return min_dis def __lowerCamelCase ( a_ : List[Any] , a_ : Any , a_ : Optional[int]=float('''inf''' ) ) -> Optional[Any]: for i in range(min(6 , points_counts - 1 ) , a_ ): for j in range(max(0 , i - 6 ) , a_ ): __SCREAMING_SNAKE_CASE :Dict = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __SCREAMING_SNAKE_CASE :int = current_dis return min_dis def __lowerCamelCase ( a_ : str , a_ : List[Any] , a_ : int ) -> Optional[int]: # base case if points_counts <= 3: return dis_between_closest_pair(a_ , a_ ) # recursion __SCREAMING_SNAKE_CASE :int = points_counts // 2 __SCREAMING_SNAKE_CASE :Dict = closest_pair_of_points_sqr( a_ , points_sorted_on_y[:mid] , a_ ) __SCREAMING_SNAKE_CASE :Any = closest_pair_of_points_sqr( a_ , points_sorted_on_y[mid:] , points_counts - mid ) __SCREAMING_SNAKE_CASE :Union[str, Any] = min(a_ , a_ ) __SCREAMING_SNAKE_CASE :str = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(a_ ) __SCREAMING_SNAKE_CASE :Dict = dis_between_closest_in_strip( a_ , len(a_ ) , a_ ) return min(a_ , a_ ) def __lowerCamelCase ( a_ : int , a_ : Tuple ) -> List[Any]: __SCREAMING_SNAKE_CASE :Union[str, Any] = column_based_sort(a_ , column=0 ) __SCREAMING_SNAKE_CASE :int = column_based_sort(a_ , column=1 ) return ( closest_pair_of_points_sqr( a_ , a_ , a_ ) ) ** 0.5 if __name__ == "__main__": lowerCamelCase_ = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)] print("Distance:", closest_pair_of_points(points, len(points)))
239
1
def SCREAMING_SNAKE_CASE_ ( __A : int = 1_00_00_00 ) -> int: """simple docstring""" a_ : Union[str, Any] = limit + 1 a_ : Optional[Any] = [0] * limit for first_term in range(1 , __A ): for n in range(__A , __A , __A ): a_ : Optional[Any] = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a a_ : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F'{solution() = }')
32
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def SCREAMING_SNAKE_CASE_ ( ) -> Any: """simple docstring""" a_ : Optional[Any] = HfArgumentParser(__A ) a_ : Optional[int] = parser.parse_args_into_dataclasses()[0] a_ : List[Any] = TensorFlowBenchmark(args=__A ) try: a_ : List[str] = parser.parse_args_into_dataclasses()[0] except ValueError as e: a_ : Dict = 'Arg --no_{0} is no longer used, please use --no-{0} instead.' a_ : Dict = ' '.join(str(__A ).split(' ' )[:-1] ) a_ : int = '' a_ : int = eval(str(__A ).split(' ' )[-1] ) a_ : Any = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__A ) if len(__A ) > 0: a_ : str = full_error_msg + begin_error_msg + str(__A ) raise ValueError(__A ) benchmark.run() if __name__ == "__main__": main()
32
1
import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class lowerCamelCase ( lowerCAmelCase_ ): def UpperCAmelCase(self : Optional[Any] ) -> int: snake_case = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , "width_multiplier" ) ) class lowerCamelCase : def __init__(self : str , _A : Tuple , _A : List[str]=1_3 , _A : Optional[Any]=6_4 , _A : Optional[Any]=2 , _A : Tuple=3 , _A : Dict="swish" , _A : str=3 , _A : Dict=3_2 , _A : Tuple=0.1 , _A : Dict=0.02 , _A : Optional[int]=True , _A : Dict=True , _A : Union[str, Any]=1_0 , _A : Optional[int]=None , _A : Any=0.25 , _A : int=0.0 , _A : str=0.0 , ) -> str: snake_case = parent snake_case = batch_size snake_case = image_size snake_case = patch_size snake_case = num_channels snake_case = make_divisible(5_1_2 * width_multiplier , divisor=8 ) snake_case = hidden_act snake_case = conv_kernel_size snake_case = output_stride snake_case = classifier_dropout_prob snake_case = use_labels snake_case = is_training snake_case = num_labels snake_case = initializer_range snake_case = scope snake_case = width_multiplier snake_case = ffn_dropout snake_case = attn_dropout def UpperCAmelCase(self : List[Any] ) -> Optional[int]: snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case = None snake_case = None if self.use_labels: snake_case = ids_tensor([self.batch_size] , self.num_labels ) snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) snake_case = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCAmelCase(self : int ) -> Dict: return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def UpperCAmelCase(self : Tuple , _A : Union[str, Any] , _A : Union[str, Any] , _A : Optional[int] , _A : str ) -> Tuple: snake_case = MobileViTVaModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() snake_case = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCAmelCase(self : Tuple , _A : Optional[int] , _A : Any , _A : Any , _A : List[str] ) -> Optional[Any]: snake_case = self.num_labels snake_case = MobileViTVaForImageClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() snake_case = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase(self : int , _A : Any , _A : List[str] , _A : str , _A : Optional[int] ) -> str: snake_case = self.num_labels snake_case = MobileViTVaForSemanticSegmentation(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() snake_case = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) snake_case = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCAmelCase(self : Optional[int] ) -> int: snake_case = self.prepare_config_and_inputs() snake_case , snake_case , snake_case , snake_case = config_and_inputs snake_case = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): UpperCAmelCase__ : Optional[int] = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) UpperCAmelCase__ : Optional[Any] = ( { """feature-extraction""": MobileViTVaModel, """image-classification""": MobileViTVaForImageClassification, """image-segmentation""": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) UpperCAmelCase__ : str = False UpperCAmelCase__ : Tuple = False UpperCAmelCase__ : Tuple = False UpperCAmelCase__ : Union[str, Any] = False def UpperCAmelCase(self : Dict ) -> Dict: snake_case = MobileViTVaModelTester(self ) snake_case = MobileViTVaConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase(self : List[Any] ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason="MobileViTV2 does not use inputs_embeds" ) def UpperCAmelCase(self : Optional[int] ) -> Optional[int]: pass @unittest.skip(reason="MobileViTV2 does not support input and output embeddings" ) def UpperCAmelCase(self : Tuple ) -> Dict: pass @unittest.skip(reason="MobileViTV2 does not output attentions" ) def UpperCAmelCase(self : str ) -> Any: pass @require_torch_multi_gpu @unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." ) def UpperCAmelCase(self : Tuple ) -> Any: pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def UpperCAmelCase(self : Tuple ) -> Optional[Any]: pass def UpperCAmelCase(self : List[Any] ) -> List[str]: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case = model_class(__SCREAMING_SNAKE_CASE ) snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case = [*signature.parameters.keys()] snake_case = ["pixel_values"] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase(self : List[str] ) -> Any: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase(self : int ) -> List[Any]: def check_hidden_states_output(_A : str , _A : Any , _A : Tuple ): snake_case = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): snake_case = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) snake_case = outputs.hidden_states snake_case = 5 self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. snake_case = 2 for i in range(len(__SCREAMING_SNAKE_CASE ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case = True check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase(self : int ) -> Tuple: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase(self : List[str] ) -> Tuple: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase(self : Optional[Any] ) -> List[str]: for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case = MobileViTVaModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def lowercase_ ( ): """simple docstring""" snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase ( unittest.TestCase ): @cached_property def UpperCAmelCase(self : int ) -> Dict: return ( MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ) if is_vision_available() else None ) @slow def UpperCAmelCase(self : Optional[int] ) -> Union[str, Any]: snake_case = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to( __SCREAMING_SNAKE_CASE ) snake_case = self.default_image_processor snake_case = prepare_img() snake_case = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): snake_case = model(**__SCREAMING_SNAKE_CASE ) # verify the logits snake_case = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE ) snake_case = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) ) @slow def UpperCAmelCase(self : Optional[Any] ) -> Optional[int]: snake_case = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" ) snake_case = model.to(__SCREAMING_SNAKE_CASE ) snake_case = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" ) snake_case = prepare_img() snake_case = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): snake_case = model(**__SCREAMING_SNAKE_CASE ) snake_case = outputs.logits # verify the logits snake_case = torch.Size((1, 2_1, 3_2, 3_2) ) self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE ) snake_case = torch.tensor( [ [[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]], [[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]], [[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]], ] , device=__SCREAMING_SNAKE_CASE , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) ) @slow def UpperCAmelCase(self : str ) -> Dict: snake_case = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" ) snake_case = model.to(__SCREAMING_SNAKE_CASE ) snake_case = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" ) snake_case = prepare_img() snake_case = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): snake_case = model(**__SCREAMING_SNAKE_CASE ) snake_case = outputs.logits.detach().cpu() snake_case = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE , target_sizes=[(5_0, 6_0)] ) snake_case = torch.Size((5_0, 6_0) ) self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE ) snake_case = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE ) snake_case = torch.Size((3_2, 3_2) ) self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
352
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCamelCase ( A_ ): UpperCAmelCase__ : Dict = ["image_processor", "tokenizer"] UpperCAmelCase__ : Dict = "LayoutLMv2ImageProcessor" UpperCAmelCase__ : Optional[Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast") def __init__(self : str , _A : Any=None , _A : Tuple=None , **_A : Optional[Any] ) -> Optional[int]: if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _A , ) snake_case = kwargs.pop("feature_extractor" ) snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_A , _A ) def __call__(self : int , _A : List[str] , _A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _A : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _A : Union[List[List[int]], List[List[List[int]]]] = None , _A : Optional[Union[List[int], List[List[int]]]] = None , _A : bool = True , _A : Union[bool, str, PaddingStrategy] = False , _A : Union[bool, str, TruncationStrategy] = None , _A : Optional[int] = None , _A : int = 0 , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = True , _A : Optional[Union[str, TensorType]] = None , **_A : Dict , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes " "if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." ) # first, apply the image processor snake_case = self.image_processor(images=_A , return_tensors=_A ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(_A , _A ): snake_case = [text] # add batch dimension (as the image processor always adds a batch dimension) snake_case = features["words"] snake_case = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_token_type_ids=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , ) # add pixel values snake_case = features.pop("pixel_values" ) if return_overflowing_tokens is True: snake_case = self.get_overflowing_images(_A , encoded_inputs["overflow_to_sample_mapping"] ) snake_case = images return encoded_inputs def UpperCAmelCase(self : Dict , _A : Dict , _A : List[str] ) -> Optional[int]: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image snake_case = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(_A ) != len(_A ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" f' {len(_A )} and {len(_A )}' ) return images_with_overflow def UpperCAmelCase(self : Tuple , *_A : int , **_A : Dict ) -> str: return self.tokenizer.batch_decode(*_A , **_A ) def UpperCAmelCase(self : str , *_A : List[Any] , **_A : List[Any] ) -> Optional[Any]: return self.tokenizer.decode(*_A , **_A ) @property def UpperCAmelCase(self : Tuple ) -> Optional[int]: return ["input_ids", "bbox", "attention_mask", "image"] @property def UpperCAmelCase(self : List[Any] ) -> int: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _A , ) return self.image_processor_class @property def UpperCAmelCase(self : Dict ) -> Union[str, Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _A , ) return self.image_processor
137
0
import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) snake_case_ = logging.getLogger(__name__) @dataclass(frozen=_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ : A_ : str A_ : str A_ : Optional[str] = None A_ : Optional[str] = None A_ : Optional[str] = None @dataclass(frozen=_UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ : A_ : List[int] A_ : Optional[List[int]] = None A_ : Optional[List[int]] = None A_ : Optional[Union[int, float]] = None A_ : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : List[InputFeatures] def __init__(self : int , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = None , a__ : List[Any]=False , a__ : bool = False , ): """simple docstring""" __snake_case = hans_processors[task]() __snake_case = os.path.join( a__ , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , ) __snake_case = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case = label_list[2], label_list[1] __snake_case = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __snake_case = cached_features_file + '''.lock''' with FileLock(a__ ): if os.path.exists(a__ ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) __snake_case = torch.load(a__ ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) __snake_case = ( processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ ) ) logger.info('''Training examples: %s''' , len(a__ ) ) __snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ ) logger.info('''Saving features into cached file %s''' , a__ ) torch.save(self.features , a__ ) def __len__(self : int ): """simple docstring""" return len(self.features ) def __getitem__(self : Dict , a__ : List[Any] ): """simple docstring""" return self.features[i] def a (self : List[Any] ): """simple docstring""" return self.label_list if is_tf_available(): import tensorflow as tf class SCREAMING_SNAKE_CASE__ : A_ : List[InputFeatures] def __init__(self : Tuple , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = 128 , a__ : Any=False , a__ : bool = False , ): """simple docstring""" __snake_case = hans_processors[task]() __snake_case = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __snake_case , __snake_case = label_list[2], label_list[1] __snake_case = label_list __snake_case = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ ) __snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) __snake_case = tf.data.Dataset.from_generator( a__ , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def a (self : Union[str, Any] ): """simple docstring""" return self.dataset def __len__(self : Dict ): """simple docstring""" return len(self.features ) def __getitem__(self : Any , a__ : Dict ): """simple docstring""" return self.features[i] def a (self : str ): """simple docstring""" return self.label_list class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): def a (self : Dict , a__ : Dict ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_train_set.txt''' ) ) , '''train''' ) def a (self : Optional[int] , a__ : Tuple ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def a (self : int ): """simple docstring""" return ["contradiction", "entailment", "neutral"] def a (self : Any , a__ : Optional[int] , a__ : List[Any] ): """simple docstring""" __snake_case = [] for i, line in enumerate(a__ ): if i == 0: continue __snake_case = '''%s-%s''' % (set_type, line[0]) __snake_case = line[5] __snake_case = line[6] __snake_case = line[7][2:] if line[7].startswith('''ex''' ) else line[7] __snake_case = line[0] examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) ) return examples def lowerCamelCase__ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> List[str]: __snake_case = {label: i for i, label in enumerate(snake_case_ )} __snake_case = [] for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ): if ex_index % 1_0000 == 0: logger.info('''Writing example %d''' % (ex_index) ) __snake_case = tokenizer( example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , ) __snake_case = label_map[example.label] if example.label in label_map else 0 __snake_case = int(example.pairID ) features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features snake_case_ = { 'hans': 3, } snake_case_ = { 'hans': HansProcessor, }
24
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = { 'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : List[Any] = 'vit_msn' def __init__(self : Union[str, Any] , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Optional[int]=3072 , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : List[Any]=1E-06 , a__ : Optional[int]=224 , a__ : str=16 , a__ : Optional[Any]=3 , a__ : int=True , **a__ : List[Any] , ): """simple docstring""" super().__init__(**a__ ) __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = initializer_range __snake_case = layer_norm_eps __snake_case = image_size __snake_case = patch_size __snake_case = num_channels __snake_case = qkv_bias
24
1
'''simple docstring''' import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa __lowerCamelCase = logging.getLogger(__name__) class A__ ( _snake_case ): lowercase = "summarization" lowercase = ["loss"] lowercase = ROUGE_KEYS lowercase = "rouge2" def __init__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Dict: '''simple docstring''' if hparams.sortish_sampler and hparams.gpus > 1: A_ = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" ) if hparams.sortish_sampler: raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" ) super().__init__(UpperCamelCase__ , num_labels=UpperCamelCase__ , mode=self.mode , **UpperCamelCase__ ) use_task_specific_params(self.model , """summarization""" ) save_git_info(self.hparams.output_dir ) A_ = Path(self.output_dir ) / """metrics.json""" A_ = Path(self.output_dir ) / """hparams.pkl""" pickle_save(self.hparams , self.hparams_save_path ) A_ = 0 A_ = defaultdict(UpperCamelCase__ ) A_ = self.config.model_type A_ = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size A_ = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } A_ = { """train""": self.hparams.n_train, """val""": self.hparams.n_val, """test""": self.hparams.n_test, } A_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} A_ = { """train""": self.hparams.max_target_length, """val""": self.hparams.val_max_target_length, """test""": self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}''' assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}''' if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) A_ = get_git_info()["""repo_sha"""] A_ = hparams.num_workers A_ = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , UpperCamelCase__ ): A_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang] A_ = self.decoder_start_token_id A_ = ( SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset ) A_ = False A_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: A_ = self.hparams.eval_max_gen_length else: A_ = self.model.config.max_length A_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def snake_case_ ( self , UpperCamelCase__ ) -> Dict[str, List[str]]: '''simple docstring''' A_ = { k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items() } save_json(UpperCamelCase__ , Path(self.output_dir ) / """text_batch.json""" ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" ) A_ = True return readable_batch def snake_case_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> str: '''simple docstring''' return self.model(UpperCamelCase__ , **UpperCamelCase__ ) def snake_case_ ( self , UpperCamelCase__ ) -> List[str]: '''simple docstring''' A_ = self.tokenizer.batch_decode( UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ ) return lmap(str.strip , UpperCamelCase__ ) def snake_case_ ( self , UpperCamelCase__ ) -> Tuple: '''simple docstring''' A_ = self.tokenizer.pad_token_id A_ , A_ = batch["""input_ids"""], batch["""attention_mask"""] A_ = batch["""labels"""] if isinstance(self.model , UpperCamelCase__ ): A_ = self.model._shift_right(UpperCamelCase__ ) else: A_ = shift_tokens_right(UpperCamelCase__ , UpperCamelCase__ ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero A_ = decoder_input_ids self.save_readable_batch(UpperCamelCase__ ) A_ = self(UpperCamelCase__ , attention_mask=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , use_cache=UpperCamelCase__ ) A_ = outputs["""logits"""] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id A_ = nn.CrossEntropyLoss(ignore_index=UpperCamelCase__ ) assert lm_logits.shape[-1] == self.vocab_size A_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: A_ = nn.functional.log_softmax(UpperCamelCase__ , dim=-1 ) A_ , A_ = label_smoothed_nll_loss( UpperCamelCase__ , UpperCamelCase__ , self.hparams.label_smoothing , ignore_index=UpperCamelCase__ ) return (loss,) @property def snake_case_ ( self ) -> int: '''simple docstring''' return self.tokenizer.pad_token_id def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict: '''simple docstring''' A_ = self._step(UpperCamelCase__ ) A_ = dict(zip(self.loss_names , UpperCamelCase__ ) ) # tokens per batch A_ = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum() A_ = batch["""input_ids"""].shape[0] A_ = batch["""input_ids"""].eq(self.pad ).sum() A_ = batch["""input_ids"""].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict: '''simple docstring''' return self._generative_step(UpperCamelCase__ ) def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__="val" ) -> Dict: '''simple docstring''' self.step_count += 1 A_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} A_ = losses["""loss"""] A_ = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""] } A_ = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) A_ = torch.tensor(UpperCamelCase__ ).type_as(UpperCamelCase__ ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(UpperCamelCase__ ) A_ = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()} A_ = self.step_count self.metrics[prefix].append(UpperCamelCase__ ) # callback writes this to self.metrics_save_path A_ = flatten_list([x["""preds"""] for x in outputs] ) return { "log": all_metrics, "preds": preds, f'''{prefix}_loss''': loss, f'''{prefix}_{self.val_metric}''': metric_tensor, } def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict: '''simple docstring''' return calculate_rouge(UpperCamelCase__ , UpperCamelCase__ ) def snake_case_ ( self , UpperCamelCase__ ) -> dict: '''simple docstring''' A_ = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') A_ = self.model.generate( batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=UpperCamelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) A_ = (time.time() - ta) / batch["""input_ids"""].shape[0] A_ = self.ids_to_clean_text(UpperCamelCase__ ) A_ = self.ids_to_clean_text(batch["""labels"""] ) A_ = self._step(UpperCamelCase__ ) A_ = dict(zip(self.loss_names , UpperCamelCase__ ) ) A_ = self.calc_generative_metrics(UpperCamelCase__ , UpperCamelCase__ ) A_ = np.mean(lmap(UpperCamelCase__ , UpperCamelCase__ ) ) base_metrics.update(gen_time=UpperCamelCase__ , gen_len=UpperCamelCase__ , preds=UpperCamelCase__ , target=UpperCamelCase__ , **UpperCamelCase__ ) return base_metrics def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple: '''simple docstring''' return self._generative_step(UpperCamelCase__ ) def snake_case_ ( self , UpperCamelCase__ ) -> Dict: '''simple docstring''' return self.validation_epoch_end(UpperCamelCase__ , prefix="""test""" ) def snake_case_ ( self , UpperCamelCase__ ) -> SeqaSeqDataset: '''simple docstring''' A_ = self.n_obs[type_path] A_ = self.target_lens[type_path] A_ = self.dataset_class( self.tokenizer , type_path=UpperCamelCase__ , n_obs=UpperCamelCase__ , max_target_length=UpperCamelCase__ , **self.dataset_kwargs , ) return dataset def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader: '''simple docstring''' A_ = self.get_dataset(UpperCamelCase__ ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": A_ = dataset.make_sortish_sampler(UpperCamelCase__ , distributed=self.hparams.gpus > 1 ) return DataLoader( UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase__ , num_workers=self.num_workers , sampler=UpperCamelCase__ , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": A_ = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( UpperCamelCase__ , batch_sampler=UpperCamelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase__ , num_workers=self.num_workers , sampler=UpperCamelCase__ , ) def snake_case_ ( self ) -> DataLoader: '''simple docstring''' A_ = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=UpperCamelCase__ ) return dataloader def snake_case_ ( self ) -> DataLoader: '''simple docstring''' return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size ) def snake_case_ ( self ) -> DataLoader: '''simple docstring''' return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size ) @staticmethod def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: '''simple docstring''' BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ ) add_generic_args(UpperCamelCase__ , UpperCamelCase__ ) parser.add_argument( """--max_source_length""" , default=1024 , type=UpperCamelCase__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--max_target_length""" , default=56 , type=UpperCamelCase__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--val_max_target_length""" , default=142 , type=UpperCamelCase__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--test_max_target_length""" , default=142 , type=UpperCamelCase__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument("""--freeze_encoder""" , action="""store_true""" ) parser.add_argument("""--freeze_embeds""" , action="""store_true""" ) parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=UpperCamelCase__ ) parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=UpperCamelCase__ ) parser.add_argument("""--max_tokens_per_batch""" , type=UpperCamelCase__ , default=UpperCamelCase__ ) parser.add_argument("""--logger_name""" , type=UpperCamelCase__ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" ) parser.add_argument("""--n_train""" , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" ) parser.add_argument("""--n_val""" , type=UpperCamelCase__ , default=500 , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" ) parser.add_argument("""--n_test""" , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" ) parser.add_argument( """--task""" , type=UpperCamelCase__ , default="""summarization""" , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" ) parser.add_argument("""--label_smoothing""" , type=UpperCamelCase__ , default=0.0 , required=UpperCamelCase__ ) parser.add_argument("""--src_lang""" , type=UpperCamelCase__ , default="""""" , required=UpperCamelCase__ ) parser.add_argument("""--tgt_lang""" , type=UpperCamelCase__ , default="""""" , required=UpperCamelCase__ ) parser.add_argument("""--eval_beams""" , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ ) parser.add_argument( """--val_metric""" , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , choices=["""bleu""", """rouge2""", """loss""", None] ) parser.add_argument("""--eval_max_gen_length""" , type=UpperCamelCase__ , default=UpperCamelCase__ , help="""never generate more than n tokens""" ) parser.add_argument("""--save_top_k""" , type=UpperCamelCase__ , default=1 , required=UpperCamelCase__ , help="""How many checkpoints to save""" ) parser.add_argument( """--early_stopping_patience""" , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help=( """-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So""" """ val_check_interval will effect it.""" ) , ) return parser class A__ ( _snake_case ): lowercase = "translation" lowercase = ["loss"] lowercase = ["bleu"] lowercase = "bleu" def __init__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]: '''simple docstring''' super().__init__(UpperCamelCase__ , **UpperCamelCase__ ) A_ = hparams.src_lang A_ = hparams.tgt_lang def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> dict: '''simple docstring''' return calculate_bleu(UpperCamelCase__ , UpperCamelCase__ ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=None ) -> SummarizationModule: Path(args.output_dir ).mkdir(exist_ok=UpperCAmelCase__ ) check_output_dir(UpperCAmelCase__, expected_items=3 ) if model is None: if "summarization" in args.task: A_ = SummarizationModule(UpperCAmelCase__ ) else: A_ = TranslationModule(UpperCAmelCase__ ) A_ = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith("""/tmp""" ) or str(args.output_dir ).startswith("""/var""" ) ): A_ = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger A_ = os.environ.get("""WANDB_PROJECT""", UpperCAmelCase__ ) A_ = WandbLogger(name=model.output_dir.name, project=UpperCAmelCase__ ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger A_ = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}''' ) if args.early_stopping_patience >= 0: A_ = get_early_stopping_callback(model.val_metric, args.early_stopping_patience ) else: A_ = False A_ = args.val_metric == """loss""" A_ = generic_train( UpperCAmelCase__, UpperCAmelCase__, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback( args.output_dir, model.val_metric, args.save_top_k, UpperCAmelCase__ ), early_stopping_callback=UpperCAmelCase__, logger=UpperCAmelCase__, ) pickle_save(model.hparams, model.output_dir / """hparams.pkl""" ) if not args.do_predict: return model A_ = """""" A_ = sorted(glob.glob(os.path.join(args.output_dir, """*.ckpt""" ), recursive=UpperCAmelCase__ ) ) if checkpoints: A_ = checkpoints[-1] A_ = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() __lowerCamelCase = pl.Trainer.add_argparse_args(parser) __lowerCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd()) __lowerCamelCase = parser.parse_args() main(args)
101
'''simple docstring''' import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa __lowerCamelCase = logging.getLogger(__name__) class A__ ( _snake_case ): lowercase = "summarization" lowercase = ["loss"] lowercase = ROUGE_KEYS lowercase = "rouge2" def __init__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Dict: '''simple docstring''' if hparams.sortish_sampler and hparams.gpus > 1: A_ = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" ) if hparams.sortish_sampler: raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" ) super().__init__(UpperCamelCase__ , num_labels=UpperCamelCase__ , mode=self.mode , **UpperCamelCase__ ) use_task_specific_params(self.model , """summarization""" ) save_git_info(self.hparams.output_dir ) A_ = Path(self.output_dir ) / """metrics.json""" A_ = Path(self.output_dir ) / """hparams.pkl""" pickle_save(self.hparams , self.hparams_save_path ) A_ = 0 A_ = defaultdict(UpperCamelCase__ ) A_ = self.config.model_type A_ = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size A_ = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } A_ = { """train""": self.hparams.n_train, """val""": self.hparams.n_val, """test""": self.hparams.n_test, } A_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} A_ = { """train""": self.hparams.max_target_length, """val""": self.hparams.val_max_target_length, """test""": self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}''' assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}''' if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) A_ = get_git_info()["""repo_sha"""] A_ = hparams.num_workers A_ = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , UpperCamelCase__ ): A_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang] A_ = self.decoder_start_token_id A_ = ( SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset ) A_ = False A_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: A_ = self.hparams.eval_max_gen_length else: A_ = self.model.config.max_length A_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def snake_case_ ( self , UpperCamelCase__ ) -> Dict[str, List[str]]: '''simple docstring''' A_ = { k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items() } save_json(UpperCamelCase__ , Path(self.output_dir ) / """text_batch.json""" ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" ) A_ = True return readable_batch def snake_case_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> str: '''simple docstring''' return self.model(UpperCamelCase__ , **UpperCamelCase__ ) def snake_case_ ( self , UpperCamelCase__ ) -> List[str]: '''simple docstring''' A_ = self.tokenizer.batch_decode( UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ ) return lmap(str.strip , UpperCamelCase__ ) def snake_case_ ( self , UpperCamelCase__ ) -> Tuple: '''simple docstring''' A_ = self.tokenizer.pad_token_id A_ , A_ = batch["""input_ids"""], batch["""attention_mask"""] A_ = batch["""labels"""] if isinstance(self.model , UpperCamelCase__ ): A_ = self.model._shift_right(UpperCamelCase__ ) else: A_ = shift_tokens_right(UpperCamelCase__ , UpperCamelCase__ ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero A_ = decoder_input_ids self.save_readable_batch(UpperCamelCase__ ) A_ = self(UpperCamelCase__ , attention_mask=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , use_cache=UpperCamelCase__ ) A_ = outputs["""logits"""] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id A_ = nn.CrossEntropyLoss(ignore_index=UpperCamelCase__ ) assert lm_logits.shape[-1] == self.vocab_size A_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: A_ = nn.functional.log_softmax(UpperCamelCase__ , dim=-1 ) A_ , A_ = label_smoothed_nll_loss( UpperCamelCase__ , UpperCamelCase__ , self.hparams.label_smoothing , ignore_index=UpperCamelCase__ ) return (loss,) @property def snake_case_ ( self ) -> int: '''simple docstring''' return self.tokenizer.pad_token_id def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict: '''simple docstring''' A_ = self._step(UpperCamelCase__ ) A_ = dict(zip(self.loss_names , UpperCamelCase__ ) ) # tokens per batch A_ = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum() A_ = batch["""input_ids"""].shape[0] A_ = batch["""input_ids"""].eq(self.pad ).sum() A_ = batch["""input_ids"""].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict: '''simple docstring''' return self._generative_step(UpperCamelCase__ ) def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__="val" ) -> Dict: '''simple docstring''' self.step_count += 1 A_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} A_ = losses["""loss"""] A_ = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""] } A_ = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) A_ = torch.tensor(UpperCamelCase__ ).type_as(UpperCamelCase__ ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(UpperCamelCase__ ) A_ = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()} A_ = self.step_count self.metrics[prefix].append(UpperCamelCase__ ) # callback writes this to self.metrics_save_path A_ = flatten_list([x["""preds"""] for x in outputs] ) return { "log": all_metrics, "preds": preds, f'''{prefix}_loss''': loss, f'''{prefix}_{self.val_metric}''': metric_tensor, } def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict: '''simple docstring''' return calculate_rouge(UpperCamelCase__ , UpperCamelCase__ ) def snake_case_ ( self , UpperCamelCase__ ) -> dict: '''simple docstring''' A_ = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') A_ = self.model.generate( batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=UpperCamelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) A_ = (time.time() - ta) / batch["""input_ids"""].shape[0] A_ = self.ids_to_clean_text(UpperCamelCase__ ) A_ = self.ids_to_clean_text(batch["""labels"""] ) A_ = self._step(UpperCamelCase__ ) A_ = dict(zip(self.loss_names , UpperCamelCase__ ) ) A_ = self.calc_generative_metrics(UpperCamelCase__ , UpperCamelCase__ ) A_ = np.mean(lmap(UpperCamelCase__ , UpperCamelCase__ ) ) base_metrics.update(gen_time=UpperCamelCase__ , gen_len=UpperCamelCase__ , preds=UpperCamelCase__ , target=UpperCamelCase__ , **UpperCamelCase__ ) return base_metrics def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple: '''simple docstring''' return self._generative_step(UpperCamelCase__ ) def snake_case_ ( self , UpperCamelCase__ ) -> Dict: '''simple docstring''' return self.validation_epoch_end(UpperCamelCase__ , prefix="""test""" ) def snake_case_ ( self , UpperCamelCase__ ) -> SeqaSeqDataset: '''simple docstring''' A_ = self.n_obs[type_path] A_ = self.target_lens[type_path] A_ = self.dataset_class( self.tokenizer , type_path=UpperCamelCase__ , n_obs=UpperCamelCase__ , max_target_length=UpperCamelCase__ , **self.dataset_kwargs , ) return dataset def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader: '''simple docstring''' A_ = self.get_dataset(UpperCamelCase__ ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": A_ = dataset.make_sortish_sampler(UpperCamelCase__ , distributed=self.hparams.gpus > 1 ) return DataLoader( UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase__ , num_workers=self.num_workers , sampler=UpperCamelCase__ , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": A_ = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( UpperCamelCase__ , batch_sampler=UpperCamelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase__ , num_workers=self.num_workers , sampler=UpperCamelCase__ , ) def snake_case_ ( self ) -> DataLoader: '''simple docstring''' A_ = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=UpperCamelCase__ ) return dataloader def snake_case_ ( self ) -> DataLoader: '''simple docstring''' return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size ) def snake_case_ ( self ) -> DataLoader: '''simple docstring''' return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size ) @staticmethod def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: '''simple docstring''' BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ ) add_generic_args(UpperCamelCase__ , UpperCamelCase__ ) parser.add_argument( """--max_source_length""" , default=1024 , type=UpperCamelCase__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--max_target_length""" , default=56 , type=UpperCamelCase__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--val_max_target_length""" , default=142 , type=UpperCamelCase__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--test_max_target_length""" , default=142 , type=UpperCamelCase__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument("""--freeze_encoder""" , action="""store_true""" ) parser.add_argument("""--freeze_embeds""" , action="""store_true""" ) parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=UpperCamelCase__ ) parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=UpperCamelCase__ ) parser.add_argument("""--max_tokens_per_batch""" , type=UpperCamelCase__ , default=UpperCamelCase__ ) parser.add_argument("""--logger_name""" , type=UpperCamelCase__ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" ) parser.add_argument("""--n_train""" , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" ) parser.add_argument("""--n_val""" , type=UpperCamelCase__ , default=500 , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" ) parser.add_argument("""--n_test""" , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" ) parser.add_argument( """--task""" , type=UpperCamelCase__ , default="""summarization""" , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" ) parser.add_argument("""--label_smoothing""" , type=UpperCamelCase__ , default=0.0 , required=UpperCamelCase__ ) parser.add_argument("""--src_lang""" , type=UpperCamelCase__ , default="""""" , required=UpperCamelCase__ ) parser.add_argument("""--tgt_lang""" , type=UpperCamelCase__ , default="""""" , required=UpperCamelCase__ ) parser.add_argument("""--eval_beams""" , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ ) parser.add_argument( """--val_metric""" , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , choices=["""bleu""", """rouge2""", """loss""", None] ) parser.add_argument("""--eval_max_gen_length""" , type=UpperCamelCase__ , default=UpperCamelCase__ , help="""never generate more than n tokens""" ) parser.add_argument("""--save_top_k""" , type=UpperCamelCase__ , default=1 , required=UpperCamelCase__ , help="""How many checkpoints to save""" ) parser.add_argument( """--early_stopping_patience""" , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help=( """-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So""" """ val_check_interval will effect it.""" ) , ) return parser class A__ ( _snake_case ): lowercase = "translation" lowercase = ["loss"] lowercase = ["bleu"] lowercase = "bleu" def __init__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]: '''simple docstring''' super().__init__(UpperCamelCase__ , **UpperCamelCase__ ) A_ = hparams.src_lang A_ = hparams.tgt_lang def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> dict: '''simple docstring''' return calculate_bleu(UpperCamelCase__ , UpperCamelCase__ ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=None ) -> SummarizationModule: Path(args.output_dir ).mkdir(exist_ok=UpperCAmelCase__ ) check_output_dir(UpperCAmelCase__, expected_items=3 ) if model is None: if "summarization" in args.task: A_ = SummarizationModule(UpperCAmelCase__ ) else: A_ = TranslationModule(UpperCAmelCase__ ) A_ = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith("""/tmp""" ) or str(args.output_dir ).startswith("""/var""" ) ): A_ = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger A_ = os.environ.get("""WANDB_PROJECT""", UpperCAmelCase__ ) A_ = WandbLogger(name=model.output_dir.name, project=UpperCAmelCase__ ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger A_ = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}''' ) if args.early_stopping_patience >= 0: A_ = get_early_stopping_callback(model.val_metric, args.early_stopping_patience ) else: A_ = False A_ = args.val_metric == """loss""" A_ = generic_train( UpperCAmelCase__, UpperCAmelCase__, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback( args.output_dir, model.val_metric, args.save_top_k, UpperCAmelCase__ ), early_stopping_callback=UpperCAmelCase__, logger=UpperCAmelCase__, ) pickle_save(model.hparams, model.output_dir / """hparams.pkl""" ) if not args.do_predict: return model A_ = """""" A_ = sorted(glob.glob(os.path.join(args.output_dir, """*.ckpt""" ), recursive=UpperCAmelCase__ ) ) if checkpoints: A_ = checkpoints[-1] A_ = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() __lowerCamelCase = pl.Trainer.add_argparse_args(parser) __lowerCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd()) __lowerCamelCase = parser.parse_args() main(args)
101
1
"""simple docstring""" import math import sys import cva import numpy as np def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.ndarray: # For applying gaussian function for each element in matrix. __lowerCAmelCase: Union[str, Any] = math.sqrt(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase: List[str] = 1 / (sigma * math.sqrt(2 * math.pi )) return cons * np.exp(-((img / sigma) ** 2) * 0.5 ) def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.ndarray: __lowerCAmelCase: Any = kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.ndarray: # Creates a gaussian kernel of given dimension. __lowerCAmelCase: List[str] = np.zeros((kernel_size, kernel_size) ) for i in range(0 , __SCREAMING_SNAKE_CASE ): for j in range(0 , __SCREAMING_SNAKE_CASE ): __lowerCAmelCase: str = math.sqrt( abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 ) return vec_gaussian(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> np.ndarray: __lowerCAmelCase: Dict = np.zeros(img.shape ) __lowerCAmelCase: Union[str, Any] = get_gauss_kernel(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase: Any = img.shape for i in range(kernel_size // 2 , size_x - kernel_size // 2 ): for j in range(kernel_size // 2 , size_y - kernel_size // 2 ): __lowerCAmelCase: Tuple = get_slice(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Dict = img_s - img_s[kernel_size // 2, kernel_size // 2] __lowerCAmelCase: List[Any] = vec_gaussian(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __lowerCAmelCase: int = np.multiply(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __lowerCAmelCase: int = np.multiply(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Tuple = np.sum(__SCREAMING_SNAKE_CASE ) / np.sum(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase: str = val return imga def a__ ( __SCREAMING_SNAKE_CASE ) -> tuple: __lowerCAmelCase: List[Any] = args[1] if args[1:] else "../image_data/lena.jpg" __lowerCAmelCase: List[Any] = float(args[2] ) if args[2:] else 1.0 __lowerCAmelCase: List[Any] = float(args[3] ) if args[3:] else 1.0 if args[4:]: __lowerCAmelCase: int = int(args[4] ) __lowerCAmelCase: str = kernel_size + abs(kernel_size % 2 - 1 ) else: __lowerCAmelCase: str = 5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": __A , __A , __A , __A = parse_args(sys.argv) __A = cva.imread(filename, 0) cva.imshow("input image", img) __A = img / 255 __A = out.astype("float32") __A = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) __A = out * 255 __A = np.uinta(out) cva.imshow("output image", out) cva.waitKey(0) cva.destroyAllWindows()
217
"""simple docstring""" import pickle import numpy as np from matplotlib import pyplot as plt class snake_case : def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=0.2 , UpperCamelCase__ : Any=0.2)-> Optional[int]: '''simple docstring''' __lowerCAmelCase: Optional[Any] = bp_numa __lowerCAmelCase: Optional[int] = bp_numa __lowerCAmelCase: Tuple = bp_numa __lowerCAmelCase: Optional[int] = conva_get[:2] __lowerCAmelCase: int = conva_get[2] __lowerCAmelCase: List[str] = size_pa __lowerCAmelCase: Tuple = rate_w __lowerCAmelCase: Dict = rate_t __lowerCAmelCase: List[Any] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5) for i in range(self.conva[1]) ] __lowerCAmelCase: Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) __lowerCAmelCase: int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5) __lowerCAmelCase: Optional[Any] = -2 * np.random.rand(self.conva[1]) + 1 __lowerCAmelCase: int = -2 * np.random.rand(self.num_bpa) + 1 __lowerCAmelCase: str = -2 * np.random.rand(self.num_bpa) + 1 def lowercase_ ( self : Optional[int] , UpperCamelCase__ : int)-> List[str]: '''simple docstring''' __lowerCAmelCase: Any = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(UpperCamelCase__ , "wb") as f: pickle.dump(UpperCamelCase__ , UpperCamelCase__) print(f"Model saved: {save_path}") @classmethod def lowercase_ ( cls : Dict , UpperCamelCase__ : Union[str, Any])-> List[Any]: '''simple docstring''' with open(UpperCamelCase__ , "rb") as f: __lowerCAmelCase: Dict = pickle.load(UpperCamelCase__) # noqa: S301 __lowerCAmelCase: Optional[int] = model_dic.get("conv1") conv_get.append(model_dic.get("step_conv1")) __lowerCAmelCase: List[str] = model_dic.get("size_pooling1") __lowerCAmelCase: Union[str, Any] = model_dic.get("num_bp1") __lowerCAmelCase: Any = model_dic.get("num_bp2") __lowerCAmelCase: Union[str, Any] = model_dic.get("num_bp3") __lowerCAmelCase: Optional[int] = model_dic.get("rate_weight") __lowerCAmelCase: int = model_dic.get("rate_thre") # create model instance __lowerCAmelCase: Tuple = CNN(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) # modify model parameter __lowerCAmelCase: Any = model_dic.get("w_conv1") __lowerCAmelCase: Optional[Any] = model_dic.get("wkj") __lowerCAmelCase: Any = model_dic.get("vji") __lowerCAmelCase: Dict = model_dic.get("thre_conv1") __lowerCAmelCase: int = model_dic.get("thre_bp2") __lowerCAmelCase: Optional[int] = model_dic.get("thre_bp3") return conv_ins def lowercase_ ( self : Dict , UpperCamelCase__ : List[Any])-> List[Any]: '''simple docstring''' return 1 / (1 + np.exp(-1 * x)) def lowercase_ ( self : Dict , UpperCamelCase__ : List[Any])-> Optional[Any]: '''simple docstring''' return round(UpperCamelCase__ , 3) def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int)-> Dict: '''simple docstring''' __lowerCAmelCase: List[Any] = convs[0] __lowerCAmelCase: int = convs[1] __lowerCAmelCase: Union[str, Any] = np.shape(UpperCamelCase__)[0] # get the data slice of original image data, data_focus __lowerCAmelCase: Optional[Any] = [] for i_focus in range(0 , size_data - size_conv + 1 , UpperCamelCase__): for j_focus in range(0 , size_data - size_conv + 1 , UpperCamelCase__): __lowerCAmelCase: Union[str, Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(UpperCamelCase__) # calculate the feature map of every single kernel, and saved as list of matrix __lowerCAmelCase: int = [] __lowerCAmelCase: Optional[int] = int((size_data - size_conv) / conv_step + 1) for i_map in range(UpperCamelCase__): __lowerCAmelCase: List[str] = [] for i_focus in range(len(UpperCamelCase__)): __lowerCAmelCase: Union[str, Any] = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map])) - thre_convs[i_map] ) featuremap.append(self.sig(UpperCamelCase__)) __lowerCAmelCase: str = np.asmatrix(UpperCamelCase__).reshape( UpperCamelCase__ , UpperCamelCase__) data_featuremap.append(UpperCamelCase__) # expanding the data slice to One dimenssion __lowerCAmelCase: Optional[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(UpperCamelCase__)) __lowerCAmelCase: List[Any] = np.asarray(UpperCamelCase__) return focus_list, data_featuremap def lowercase_ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]="average_pool")-> str: '''simple docstring''' __lowerCAmelCase: Tuple = len(featuremaps[0]) __lowerCAmelCase: List[Any] = int(size_map / size_pooling) __lowerCAmelCase: int = [] for i_map in range(len(UpperCamelCase__)): __lowerCAmelCase: str = featuremaps[i_map] __lowerCAmelCase: List[Any] = [] for i_focus in range(0 , UpperCamelCase__ , UpperCamelCase__): for j_focus in range(0 , UpperCamelCase__ , UpperCamelCase__): __lowerCAmelCase: Any = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(UpperCamelCase__)) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(UpperCamelCase__)) __lowerCAmelCase: Optional[int] = np.asmatrix(UpperCamelCase__).reshape(UpperCamelCase__ , UpperCamelCase__) featuremap_pooled.append(UpperCamelCase__) return featuremap_pooled def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : str)-> int: '''simple docstring''' __lowerCAmelCase: List[Any] = [] for i in range(len(UpperCamelCase__)): __lowerCAmelCase: Union[str, Any] = np.shape(data[i]) __lowerCAmelCase: int = data[i].reshape(1 , shapes[0] * shapes[1]) __lowerCAmelCase: Dict = data_listed.getA().tolist()[0] data_expanded.extend(UpperCamelCase__) __lowerCAmelCase: Any = np.asarray(UpperCamelCase__) return data_expanded def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any])-> Optional[Any]: '''simple docstring''' __lowerCAmelCase: Dict = np.asarray(UpperCamelCase__) __lowerCAmelCase: Optional[int] = np.shape(UpperCamelCase__) __lowerCAmelCase: Optional[int] = data_mat.reshape(1 , shapes[0] * shapes[1]) return data_expanded def lowercase_ ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict)-> List[Any]: '''simple docstring''' __lowerCAmelCase: Optional[int] = [] __lowerCAmelCase: Any = 0 for i_map in range(UpperCamelCase__): __lowerCAmelCase: Optional[Any] = np.ones((size_map, size_map)) for i in range(0 , UpperCamelCase__ , UpperCamelCase__): for j in range(0 , UpperCamelCase__ , UpperCamelCase__): __lowerCAmelCase: Optional[Any] = pd_pool[ i_pool ] __lowerCAmelCase: str = i_pool + 1 __lowerCAmelCase: Dict = np.multiply( UpperCamelCase__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]))) pd_all.append(UpperCamelCase__) return pd_all def lowercase_ ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str=bool)-> List[str]: '''simple docstring''' print("----------------------Start Training-------------------------") print((" - - Shape: Train_Data ", np.shape(UpperCamelCase__))) print((" - - Shape: Teach_Data ", np.shape(UpperCamelCase__))) __lowerCAmelCase: str = 0 __lowerCAmelCase: Optional[int] = [] __lowerCAmelCase: List[Any] = 1_0_0_0_0 while rp < n_repeat and mse >= error_accuracy: __lowerCAmelCase: Optional[Any] = 0 print(f"-------------Learning Time {rp}--------------") for p in range(len(UpperCamelCase__)): # print('------------Learning Image: %d--------------'%p) __lowerCAmelCase: Dict = np.asmatrix(datas_train[p]) __lowerCAmelCase: Dict = np.asarray(datas_teach[p]) __lowerCAmelCase , __lowerCAmelCase: int = self.convolute( UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) __lowerCAmelCase: Any = self.pooling(UpperCamelCase__ , self.size_poolinga) __lowerCAmelCase: Optional[Any] = np.shape(UpperCamelCase__) __lowerCAmelCase: str = self._expand(UpperCamelCase__) __lowerCAmelCase: str = data_bp_input __lowerCAmelCase: int = np.dot(UpperCamelCase__ , self.vji.T) - self.thre_bpa __lowerCAmelCase: int = self.sig(UpperCamelCase__) __lowerCAmelCase: Optional[Any] = np.dot(UpperCamelCase__ , self.wkj.T) - self.thre_bpa __lowerCAmelCase: str = self.sig(UpperCamelCase__) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- __lowerCAmelCase: Union[str, Any] = np.multiply( (data_teach - bp_outa) , np.multiply(UpperCamelCase__ , (1 - bp_outa))) __lowerCAmelCase: Any = np.multiply( np.dot(UpperCamelCase__ , self.wkj) , np.multiply(UpperCamelCase__ , (1 - bp_outa))) __lowerCAmelCase: str = np.dot(UpperCamelCase__ , self.vji) __lowerCAmelCase: Union[str, Any] = pd_i_all / (self.size_poolinga * self.size_poolinga) __lowerCAmelCase: str = pd_conva_pooled.T.getA().tolist() __lowerCAmelCase: str = self._calculate_gradient_from_pool( UpperCamelCase__ , UpperCamelCase__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1]): __lowerCAmelCase: List[Any] = self._expand_mat(pd_conva_all[k_conv]) __lowerCAmelCase: int = self.rate_weight * np.dot(UpperCamelCase__ , UpperCamelCase__) __lowerCAmelCase: Tuple = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0])) __lowerCAmelCase: Tuple = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv]) * self.rate_thre ) # all connected layer __lowerCAmelCase: List[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight __lowerCAmelCase: Union[str, Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight __lowerCAmelCase: Tuple = self.thre_bpa - pd_k_all * self.rate_thre __lowerCAmelCase: Optional[int] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image __lowerCAmelCase: List[str] = np.sum(abs(data_teach - bp_outa)) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) __lowerCAmelCase: Tuple = rp + 1 __lowerCAmelCase: Optional[Any] = error_count / patterns all_mse.append(UpperCamelCase__) def draw_error(): __lowerCAmelCase: Dict = [error_accuracy for i in range(int(n_repeat * 1.2))] plt.plot(UpperCamelCase__ , "+-") plt.plot(UpperCamelCase__ , "r--") plt.xlabel("Learning Times") plt.ylabel("All_mse") plt.grid(UpperCamelCase__ , alpha=0.5) plt.show() print("------------------Training Complished---------------------") print((" - - Training epoch: ", rp, f" - - Mse: {mse:.6f}")) if draw_e: draw_error() return mse def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : Tuple)-> List[str]: '''simple docstring''' __lowerCAmelCase: int = [] print("-------------------Start Testing-------------------------") print((" - - Shape: Test_Data ", np.shape(UpperCamelCase__))) for p in range(len(UpperCamelCase__)): __lowerCAmelCase: Dict = np.asmatrix(datas_test[p]) __lowerCAmelCase , __lowerCAmelCase: Optional[int] = self.convolute( UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) __lowerCAmelCase: Tuple = self.pooling(UpperCamelCase__ , self.size_poolinga) __lowerCAmelCase: List[str] = self._expand(UpperCamelCase__) __lowerCAmelCase: int = data_bp_input __lowerCAmelCase: List[Any] = bp_outa * self.vji.T - self.thre_bpa __lowerCAmelCase: Any = self.sig(UpperCamelCase__) __lowerCAmelCase: Union[str, Any] = bp_outa * self.wkj.T - self.thre_bpa __lowerCAmelCase: List[str] = self.sig(UpperCamelCase__) produce_out.extend(bp_outa.getA().tolist()) __lowerCAmelCase: Tuple = [list(map(self.do_round , UpperCamelCase__)) for each in produce_out] return np.asarray(UpperCamelCase__) def lowercase_ ( self : int , UpperCamelCase__ : Any)-> Any: '''simple docstring''' __lowerCAmelCase: Union[str, Any] = np.asmatrix(UpperCamelCase__) __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.convolute( UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) __lowerCAmelCase: Any = self.pooling(UpperCamelCase__ , self.size_poolinga) return data_conveda, data_pooleda if __name__ == "__main__": pass
217
1
"""simple docstring""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase__ = """\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } """ UpperCAmelCase__ = """\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). """ UpperCAmelCase__ = """ Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric(\"code_eval\") >>> test_cases = [\"assert add(2,3)==5\"] >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {'pass@1': 0.5, 'pass@2': 1.0} """ UpperCAmelCase__ = """ ################################################################################ !!!WARNING!!! ################################################################################ The \"code_eval\" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this with: >>> import os >>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\" ################################################################################\ """ UpperCAmelCase__ = """The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): def lowerCAmelCase_ ( self : Tuple ): return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Value("""string""" ), } ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any]=[1, 10, 100] , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Union[str, Any]=3.0 ): if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""" ) with ThreadPoolExecutor(max_workers=__lowerCAmelCase ) as executor: _UpperCAmelCase = [] _UpperCAmelCase = Counter() _UpperCAmelCase = 0 _UpperCAmelCase = defaultdict(__lowerCAmelCase ) for task_id, (candidates, test_case) in enumerate(zip(__lowerCAmelCase , __lowerCAmelCase ) ): for candidate in candidates: _UpperCAmelCase = candidate + """\n""" + test_case _UpperCAmelCase = (test_program, timeout, task_id, completion_id[task_id]) _UpperCAmelCase = executor.submit(__lowerCAmelCase , *__lowerCAmelCase ) futures.append(__lowerCAmelCase ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(__lowerCAmelCase ): _UpperCAmelCase = future.result() results[result["task_id"]].append((result["""completion_id"""], result) ) _UpperCAmelCase , _UpperCAmelCase = [], [] for result in results.values(): result.sort() _UpperCAmelCase = [r[1]["""passed"""] for r in result] total.append(len(__lowerCAmelCase ) ) correct.append(sum(__lowerCAmelCase ) ) _UpperCAmelCase = np.array(__lowerCAmelCase ) _UpperCAmelCase = np.array(__lowerCAmelCase ) _UpperCAmelCase = k _UpperCAmelCase = {f'''pass@{k}''': estimate_pass_at_k(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" def estimator(lowercase ,lowercase ,lowercase ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) ) if isinstance(lowercase ,lowercase ): _UpperCAmelCase = itertools.repeat(lowercase ,len(lowercase ) ) else: assert len(lowercase ) == len(lowercase ) _UpperCAmelCase = iter(lowercase ) return np.array([estimator(int(lowercase ) ,int(lowercase ) ,lowercase ) for n, c in zip(lowercase ,lowercase )] )
30
"""simple docstring""" import mpmath # for roots of unity import numpy as np class a : def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ): # Input as list _UpperCAmelCase = list(poly_a or [0] )[:] _UpperCAmelCase = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() _UpperCAmelCase = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() _UpperCAmelCase = len(self.polyB ) # Add 0 to make lengths equal a power of 2 _UpperCAmelCase = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform _UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product _UpperCAmelCase = self.__multiply() def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ): _UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(__lowerCAmelCase ) <= 1: return dft[0] # _UpperCAmelCase = self.c_max_length // 2 while next_ncol > 0: _UpperCAmelCase = [[] for i in range(__lowerCAmelCase )] _UpperCAmelCase = self.root**next_ncol # First half of next step _UpperCAmelCase = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__lowerCAmelCase ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step _UpperCAmelCase = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__lowerCAmelCase ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update _UpperCAmelCase = new_dft _UpperCAmelCase = next_ncol // 2 return dft[0] def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.__dft("""A""" ) _UpperCAmelCase = self.__dft("""B""" ) _UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT _UpperCAmelCase = 2 while next_ncol <= self.c_max_length: _UpperCAmelCase = [[] for i in range(__lowerCAmelCase )] _UpperCAmelCase = self.root ** (next_ncol // 2) _UpperCAmelCase = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update _UpperCAmelCase = new_inverse_c next_ncol *= 2 # Unpack _UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : Dict ): _UpperCAmelCase = """A = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) ) _UpperCAmelCase = """B = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) ) _UpperCAmelCase = """A*B = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) ) return f'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
30
1
import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' if "img_encoder.pos_embed" in name: A__ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' ) if "img_encoder.patch_embed.proj" in name: A__ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' ) if "img_encoder.patch_embed.norm" in name: A__ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' ) if "img_encoder.layers" in name: A__ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' ) if "blocks" in name and "res" not in name: A__ = name.replace('blocks' , 'layers' ) if "attn" in name and "pre_assign" not in name: A__ = name.replace('attn' , 'self_attn' ) if "proj" in name and "self_attn" in name and "text" not in name: A__ = name.replace('proj' , 'out_proj' ) if "pre_assign_attn.attn.proj" in name: A__ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' ) if "norm1" in name: A__ = name.replace('norm1' , 'layer_norm1' ) if "norm2" in name and "pre_assign" not in name: A__ = name.replace('norm2' , 'layer_norm2' ) if "img_encoder.norm" in name: A__ = name.replace('img_encoder.norm' , 'vision_model.layernorm' ) # text encoder if "text_encoder.token_embedding" in name: A__ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' ) if "text_encoder.positional_embedding" in name: A__ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "text_encoder.transformer.resblocks." in name: A__ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' ) if "ln_1" in name: A__ = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: A__ = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: A__ = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: A__ = name.replace('c_proj' , 'fc2' ) if "text_encoder" in name: A__ = name.replace('text_encoder' , 'text_model' ) if "ln_final" in name: A__ = name.replace('ln_final' , 'final_layer_norm' ) # projection layers if "img_projector.linear_hidden." in name: A__ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' ) if "img_projector.linear_out." in name: A__ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' ) if "text_projector.linear_hidden" in name: A__ = name.replace('text_projector.linear_hidden' , 'text_projection' ) if "text_projector.linear_out" in name: A__ = name.replace('text_projector.linear_out' , 'text_projection.3' ) return name def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ) -> str: '''simple docstring''' for key in orig_state_dict.copy().keys(): A__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors A__ = key.split('.' ) A__ , A__ = int(key_split[2] ), int(key_split[4] ) A__ = config.vision_config.hidden_size if "weight" in key: A__ = val[:dim, :] A__ = val[dim : dim * 2, :] A__ = val[-dim:, :] else: A__ = val[:dim] A__ = val[dim : dim * 2] A__ = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors A__ = key.split('.' ) A__ = int(key_split[3] ) A__ = config.text_config.hidden_size if "weight" in key: A__ = val[:dim, :] A__ = val[ dim : dim * 2, : ] A__ = val[-dim:, :] else: A__ = val[:dim] A__ = val[dim : dim * 2] A__ = val[-dim:] else: A__ = rename_key(SCREAMING_SNAKE_CASE__ ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): A__ = val.squeeze_() else: A__ = val return orig_state_dict def _snake_case( ) -> Optional[int]: '''simple docstring''' A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) return im @torch.no_grad() def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]="groupvit-gcc-yfcc" , SCREAMING_SNAKE_CASE__ : List[Any]=False ) -> Dict: '''simple docstring''' A__ = GroupViTConfig() A__ = GroupViTModel(SCREAMING_SNAKE_CASE__ ).eval() A__ = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )['model'] A__ = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ , A__ = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(SCREAMING_SNAKE_CASE__ ) == 0) # verify result A__ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' ) A__ = prepare_img() A__ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ) with torch.no_grad(): A__ = model(**SCREAMING_SNAKE_CASE__ ) if model_name == "groupvit-gcc-yfcc": A__ = torch.tensor([[13.3523, 6.3629]] ) elif model_name == "groupvit-gcc-redcaps": A__ = torch.tensor([[16.1873, 8.6230]] ) else: raise ValueError(f'Model name {model_name} not supported.' ) assert torch.allclose(outputs.logits_per_image , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print('Successfully saved processor and model to' , SCREAMING_SNAKE_CASE__ ) if push_to_hub: print('Pushing to the hub...' ) processor.push_to_hub(SCREAMING_SNAKE_CASE__ , organization='nielsr' ) model.push_to_hub(SCREAMING_SNAKE_CASE__ , organization='nielsr' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model." ) parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint") parser.add_argument( "--model_name", default="groupvit-gccy-fcc", type=str, help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'", ) parser.add_argument( "--push_to_hub", action="https://huggingface.co/datasets/infinityofspace/python_codestyles-mixed1-500/viewer/default/store_true", help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.", ) lowercase_ = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
7
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def lowerCamelCase ( lowerCAmelCase : int = 200_0000 ): """simple docstring""" __magic_name__ : list[int] = [0] __magic_name__ : int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target __magic_name__ : int = 0 # the area corresponding to the grid that gives the product closest to target __magic_name__ : int = 0 # an estimate of b, using the quadratic formula __magic_name__ : float # the largest integer less than b_estimate __magic_name__ : int # the largest integer less than b_estimate __magic_name__ : int # the triangle number corresponding to b_floor __magic_name__ : int # the triangle number corresponding to b_ceil __magic_name__ : int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): __magic_name__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 __magic_name__ : List[Any] = floor(lowerCAmelCase ) __magic_name__ : Dict = ceil(lowerCAmelCase ) __magic_name__ : Any = triangle_numbers[b_floor] __magic_name__ : Optional[int] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): __magic_name__ : Any = triangle_b_first_guess * triangle_a __magic_name__ : Any = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): __magic_name__ : List[str] = triangle_b_second_guess * triangle_a __magic_name__ : Optional[int] = idx_a * b_ceil return area if __name__ == "__main__": print(F'{solution() = }')
331
0
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def snake_case (UpperCAmelCase__ ) -> None: UpperCamelCase_ ,UpperCamelCase_: Dict = analyze_text(UpperCAmelCase__ ) UpperCamelCase_: List[str] = list(' ' + ascii_lowercase ) # what is our total sum of probabilities. UpperCamelCase_: Dict = sum(single_char_strings.values() ) # one length string UpperCamelCase_: List[str] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: UpperCamelCase_: List[Any] = single_char_strings[ch] UpperCamelCase_: int = my_str / all_sum my_fir_sum += prob * math.loga(UpperCAmelCase__ ) # entropy formula. # print entropy print(F'''{round(-1 * my_fir_sum ):.1f}''' ) # two len string UpperCamelCase_: int = sum(two_char_strings.values() ) UpperCamelCase_: Optional[int] = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: UpperCamelCase_: Tuple = cha + cha if sequence in two_char_strings: UpperCamelCase_: str = two_char_strings[sequence] UpperCamelCase_: str = int(UpperCAmelCase__ ) / all_sum my_sec_sum += prob * math.loga(UpperCAmelCase__ ) # print second entropy print(F'''{round(-1 * my_sec_sum ):.1f}''' ) # print the difference between them print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' ) def snake_case (UpperCAmelCase__ ) -> tuple[dict, dict]: UpperCamelCase_: Any = Counter() # type: ignore UpperCamelCase_: Union[str, Any] = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(UpperCAmelCase__ ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def snake_case () -> Tuple: import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
292
import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def snake_case (UpperCAmelCase__ , UpperCAmelCase__=() , UpperCAmelCase__=None , UpperCAmelCase__="no" , UpperCAmelCase__="29500" ) -> List[Any]: UpperCamelCase_: Any = False UpperCamelCase_: List[str] = False if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ): UpperCamelCase_: List[Any] = True elif "IPython" in sys.modules: UpperCamelCase_: List[Any] = 'google.colab' in str(sys.modules['IPython'].get_ipython() ) try: UpperCamelCase_: Optional[int] = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' ) if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , UpperCAmelCase__ ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( 'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside ' 'your training function. Restart your notebook and make sure no cells initializes an ' '`Accelerator`.' ) if num_processes is None: UpperCamelCase_: List[str] = 8 UpperCamelCase_: str = PrepareForLaunch(UpperCAmelCase__ , distributed_type='TPU' ) print(F'''Launching a training on {num_processes} TPU cores.''' ) xmp.spawn(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='fork' ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print('Launching training on one GPU.' ) else: print('Launching training on one CPU.' ) function(*UpperCAmelCase__ ) else: if num_processes is None: raise ValueError( 'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( 'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized ' 'inside your training function. Restart your notebook and make sure no cells initializes an ' '`Accelerator`.' ) if torch.cuda.is_initialized(): raise ValueError( 'To launch a multi-GPU training from your notebook, you need to avoid running any instruction ' 'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA ' 'function.' ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=UpperCAmelCase__ , master_addr='127.0.01' , master_port=UpperCAmelCase__ , mixed_precision=UpperCAmelCase__ ): UpperCamelCase_: str = PrepareForLaunch(UpperCAmelCase__ , distributed_type='MULTI_GPU' ) print(F'''Launching training on {num_processes} GPUs.''' ) try: start_processes(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='fork' ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( 'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. ' 'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. ' 'Please review your imports and test them when running the `notebook_launcher()` to identify ' 'which one is problematic.' ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): UpperCamelCase_: Tuple = '1' print('Launching training on MPS.' ) elif torch.cuda.is_available(): print('Launching training on one GPU.' ) else: print('Launching training on CPU.' ) function(*UpperCAmelCase__ ) def snake_case (UpperCAmelCase__ , UpperCAmelCase__=() , UpperCAmelCase__=2 ) -> Optional[int]: from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=UpperCAmelCase__ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ): UpperCamelCase_: str = PrepareForLaunch(UpperCAmelCase__ , debug=UpperCAmelCase__ ) start_processes(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='fork' )
292
1
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar lowerCAmelCase_ : Optional[Any] = TypeVar('KEY') lowerCAmelCase_ : Any = TypeVar('VAL') @dataclass(frozen=lowerCamelCase_ , slots=lowerCamelCase_ ) class __SCREAMING_SNAKE_CASE (Generic[KEY, VAL] ): """simple docstring""" __a =42 __a =42 class __SCREAMING_SNAKE_CASE (_Item ): """simple docstring""" def __init__( self : Optional[Any] ): super().__init__(__a , __a ) def __bool__( self : Any ): return False lowerCAmelCase_ : List[str] = _DeletedItem() class __SCREAMING_SNAKE_CASE (MutableMapping[KEY, VAL] ): """simple docstring""" def __init__( self : List[str] , __a : int = 8 , __a : float = 0.75 ): _a = initial_block_size _a = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 _a = capacity_factor _a = 0 def UpperCamelCase__ ( self : Union[str, Any] , __a : KEY ): return hash(__a ) % len(self._buckets ) def UpperCamelCase__ ( self : Optional[int] , __a : int ): return (ind + 1) % len(self._buckets ) def UpperCamelCase__ ( self : int , __a : int , __a : KEY , __a : VAL ): _a = self._buckets[ind] if not stored: _a = _Item(__a , __a ) self._len += 1 return True elif stored.key == key: _a = _Item(__a , __a ) return True else: return False def UpperCamelCase__ ( self : Any ): _a = len(self._buckets ) * self._capacity_factor return len(self ) >= int(__a ) def UpperCamelCase__ ( self : List[str] ): if len(self._buckets ) <= self._initial_block_size: return False _a = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def UpperCamelCase__ ( self : Tuple , __a : int ): _a = self._buckets _a = [None] * new_size _a = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def UpperCamelCase__ ( self : Any ): self._resize(len(self._buckets ) * 2 ) def UpperCamelCase__ ( self : Union[str, Any] ): self._resize(len(self._buckets ) // 2 ) def UpperCamelCase__ ( self : Union[str, Any] , __a : KEY ): _a = self._get_bucket_index(__a ) for _ in range(len(self._buckets ) ): yield ind _a = self._get_next_ind(__a ) def UpperCamelCase__ ( self : Any , __a : KEY , __a : VAL ): for ind in self._iterate_buckets(__a ): if self._try_set(__a , __a , __a ): break def __setitem__( self : int , __a : KEY , __a : VAL ): if self._is_full(): self._size_up() self._add_item(__a , __a ) def __delitem__( self : Dict , __a : KEY ): for ind in self._iterate_buckets(__a ): _a = self._buckets[ind] if item is None: raise KeyError(__a ) if item is _deleted: continue if item.key == key: _a = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Any , __a : KEY ): for ind in self._iterate_buckets(__a ): _a = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(__a ) def __len__( self : int ): return self._len def __iter__( self : Dict ): yield from (item.key for item in self._buckets if item) def __repr__( self : Tuple ): _a = " ,".join( f'{item.key}: {item.val}' for item in self._buckets if item ) return f'HashMap({val_string})'
63
from __future__ import annotations def A ( _UpperCAmelCase : list[int] ) -> bool: '''simple docstring''' return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
0
from __future__ import annotations import numpy as np def __snake_case ( _UpperCAmelCase ): __a , __a = np.shape(_UpperCAmelCase ) if rows != columns: __a = ( '''\'table\' has to be of square shaped array but got a ''' f'{rows}x{columns} array:\n{table}' ) raise ValueError(_UpperCAmelCase ) __a = np.zeros((rows, columns) ) __a = np.zeros((rows, columns) ) for i in range(_UpperCAmelCase ): for j in range(_UpperCAmelCase ): __a = sum(lower[i][k] * upper[k][j] for k in range(_UpperCAmelCase ) ) if upper[j][j] == 0: raise ArithmeticError('''No LU decomposition exists''' ) __a = (table[i][j] - total) / upper[j][j] __a = 1 for j in range(_UpperCAmelCase , _UpperCAmelCase ): __a = sum(lower[i][k] * upper[k][j] for k in range(_UpperCAmelCase ) ) __a = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
364
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( __UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : str = KandinskyVaaImgaImgPipeline UpperCamelCase__ : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image'''] UpperCamelCase__ : Dict = [ '''image_embeds''', '''negative_image_embeds''', '''image''', ] UpperCamelCase__ : Any = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] UpperCamelCase__ : List[Any] = False @property def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' return 32 @property def _lowerCamelCase ( self : List[str]): '''simple docstring''' return 32 @property def _lowerCamelCase ( self : List[Any]): '''simple docstring''' return self.time_input_dim @property def _lowerCamelCase ( self : int): '''simple docstring''' return self.time_input_dim * 4 @property def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return 100 @property def _lowerCamelCase ( self : Any): '''simple docstring''' torch.manual_seed(0) __a = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } __a = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE) return model @property def _lowerCamelCase ( self : Tuple): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' torch.manual_seed(0) __a = VQModel(**self.dummy_movq_kwargs) return model def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.dummy_unet __a = self.dummy_movq __a = { '''num_train_timesteps''': 1_000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_00_85, '''beta_end''': 0.0_12, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } __a = DDIMScheduler(**__SCREAMING_SNAKE_CASE) __a = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=0): '''simple docstring''' __a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE)).to(__SCREAMING_SNAKE_CASE) __a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to( __SCREAMING_SNAKE_CASE) # create init_image __a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__SCREAMING_SNAKE_CASE)).to(__SCREAMING_SNAKE_CASE) __a = image.cpu().permute(0 , 2 , 3 , 1)[0] __a = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE)).convert('''RGB''').resize((256, 256)) if str(__SCREAMING_SNAKE_CASE).startswith('''mps'''): __a = torch.manual_seed(__SCREAMING_SNAKE_CASE) else: __a = torch.Generator(device=__SCREAMING_SNAKE_CASE).manual_seed(__SCREAMING_SNAKE_CASE) __a = { '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = '''cpu''' __a = self.get_dummy_components() __a = self.pipeline_class(**__SCREAMING_SNAKE_CASE) __a = pipe.to(__SCREAMING_SNAKE_CASE) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE) __a = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE)) __a = output.images __a = pipe( **self.get_dummy_inputs(__SCREAMING_SNAKE_CASE) , return_dict=__SCREAMING_SNAKE_CASE , )[0] __a = image[0, -3:, -3:, -1] __a = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __a = np.array( [0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 ), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' @slow @require_torch_gpu class _A ( unittest.TestCase ): def _lowerCamelCase ( self : Dict): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self : str): '''simple docstring''' __a = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_img2img_frog.npy''') __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''') __a = '''A red cartoon frog, 4k''' __a = KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa) pipe_prior.to(__SCREAMING_SNAKE_CASE) __a = KandinskyVaaImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa) __a = pipeline.to(__SCREAMING_SNAKE_CASE) pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE) __a = torch.Generator(device='''cpu''').manual_seed(0) __a , __a = pipe_prior( __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() __a = pipeline( image=__SCREAMING_SNAKE_CASE , image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , ) __a = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
131
0
from __future__ import annotations from random import random class __UpperCAmelCase : def __init__( self : str, __A : int | None = None ): UpperCAmelCase : int = value UpperCAmelCase : Any = random() UpperCAmelCase : Node | None = None UpperCAmelCase : Node | None = None def __repr__( self : Tuple ): from pprint import pformat if self.left is None and self.right is None: return F'''\'{self.value}: {self.prior:.5}\'''' else: return pformat( {F'''{self.value}: {self.prior:.5}''': (self.left, self.right)}, indent=1 ) def __str__( self : Dict ): UpperCAmelCase : Dict = str(self.value ) + ''' ''' UpperCAmelCase : Dict = str(self.left or '''''' ) UpperCAmelCase : List[str] = str(self.right or '''''' ) return value + left + right def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] ) -> tuple[Node | None, Node | None]: if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: UpperCAmelCase : Dict = split(root.left , __lowerCamelCase ) return left, root else: UpperCAmelCase : Optional[Any] = split(root.right , __lowerCamelCase ) return root, right def a__ ( UpperCAmelCase : str , UpperCAmelCase : str ) -> Node | None: if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: UpperCAmelCase : Optional[int] = merge(left.right , __lowerCamelCase ) return left else: UpperCAmelCase : str = merge(__lowerCamelCase , right.left ) return right def a__ ( UpperCAmelCase : Any , UpperCAmelCase : Dict ) -> Node | None: UpperCAmelCase : Union[str, Any] = Node(__lowerCamelCase ) UpperCAmelCase : Optional[Any] = split(__lowerCamelCase , __lowerCamelCase ) return merge(merge(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Node | None: UpperCAmelCase : List[str] = split(__lowerCamelCase , value - 1 ) UpperCAmelCase : Tuple = split(__lowerCamelCase , __lowerCamelCase ) return merge(__lowerCamelCase , __lowerCamelCase ) def a__ ( UpperCAmelCase : Union[str, Any] ) -> None: if not root: # None return else: inorder(root.left ) print(root.value , end=''',''' ) inorder(root.right ) def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str ) -> Node | None: for arg in args.split(): if arg[0] == "+": UpperCAmelCase : Any = insert(__lowerCamelCase , int(arg[1:] ) ) elif arg[0] == "-": UpperCAmelCase : List[Any] = erase(__lowerCamelCase , int(arg[1:] ) ) else: print('''Unknown command''' ) return root def a__ ( ) -> None: UpperCAmelCase : str = None print( '''enter numbers to create a tree, + value to add value into treap, ''' '''- value to erase all nodes with value. \'q\' to quit. ''' ) UpperCAmelCase : Optional[Any] = input() while args != "q": UpperCAmelCase : Union[str, Any] = interact_treap(__lowerCamelCase , __lowerCamelCase ) print(__lowerCamelCase ) UpperCAmelCase : Optional[Any] = input() print('''good by!''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
336
"""simple docstring""" import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node lowerCAmelCase_ = 4 lowerCAmelCase_ = 3 class __A ( A_ ): '''simple docstring''' pass def __UpperCAmelCase ( __lowerCamelCase ) -> Dict: for shard in shards: for i in range(__lowerCamelCase ): yield {"i": i, "shard": shard} def __UpperCAmelCase ( ) -> Tuple: lowercase__ : int = int(os.environ['''RANK'''] ) lowercase__ : str = int(os.environ['''WORLD_SIZE'''] ) lowercase__ : List[Any] = ArgumentParser() parser.add_argument('''--streaming''' , type=__lowerCamelCase ) parser.add_argument('''--local_rank''' , type=__lowerCamelCase ) parser.add_argument('''--num_workers''' , type=__lowerCamelCase , default=0 ) lowercase__ : int = parser.parse_args() lowercase__ : Optional[Any] = args.streaming lowercase__ : List[Any] = args.num_workers lowercase__ : Optional[Any] = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(__lowerCamelCase )]} lowercase__ : Dict = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase ) if not streaming: lowercase__ : int = Dataset.from_list(list(__lowerCamelCase ) ) lowercase__ : int = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase ) lowercase__ : Optional[Any] = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase ) lowercase__ : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD lowercase__ : str = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) lowercase__ : str = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" ) if __name__ == "__main__": main()
16
0
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if bit_count < 0: raise ValueError('''The given input must be positive''' ) # get the generated string sequence _UpperCAmelCase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE ) # # convert them to integers for i in range(len(_SCREAMING_SNAKE_CASE ) ): _UpperCAmelCase = int(sequence[i] , 2 ) return sequence def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] _UpperCAmelCase = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits _UpperCAmelCase = gray_code_sequence_string(bit_count - 1 ) _UpperCAmelCase = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): _UpperCAmelCase = '''0''' + smaller_sequence[i] sequence.append(_SCREAMING_SNAKE_CASE ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): _UpperCAmelCase = '''1''' + smaller_sequence[i] sequence.append(_SCREAMING_SNAKE_CASE ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
326
"""simple docstring""" import logging import os from .state import PartialState class _a ( logging.LoggerAdapter): """simple docstring""" @staticmethod def lowercase__ ( __UpperCamelCase : Optional[Any] )->List[Any]: _UpperCAmelCase = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Union[str, Any] )->int: if PartialState._shared_state == {}: raise RuntimeError( '''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' ) _UpperCAmelCase = kwargs.pop('''main_process_only''' , __UpperCamelCase ) _UpperCAmelCase = kwargs.pop('''in_order''' , __UpperCamelCase ) if self.isEnabledFor(__UpperCamelCase ): if self._should_log(__UpperCamelCase ): _UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase ) self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) elif in_order: _UpperCAmelCase = PartialState() for i in range(state.num_processes ): if i == state.process_index: _UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase ) self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) state.wait_for_everyone() def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = None ): '''simple docstring''' if log_level is None: _UpperCAmelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = logging.getLogger(_SCREAMING_SNAKE_CASE ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(_SCREAMING_SNAKE_CASE , {} )
326
1
'''simple docstring''' def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int: """simple docstring""" if n == 1 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ): return 0 elif n == 2: return 1 else: _UpperCAmelCase : List[Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int: """simple docstring""" _UpperCAmelCase : List[str] = 0 _UpperCAmelCase : Dict = 2 while digits < n: index += 1 _UpperCAmelCase : Union[str, Any] = len(str(fibonacci(_UpperCAmelCase ) ) ) return index def UpperCamelCase_ ( _UpperCAmelCase : int = 1_000 ) -> int: """simple docstring""" return fibonacci_digits_index(_UpperCAmelCase ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
31
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class lowercase_ ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : List[Any] ) ->Tuple: """simple docstring""" a = tempfile.mkdtemp() # fmt: off a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) a = { '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } a = os.path.join(self.tmpdirname , __UpperCAmelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__UpperCAmelCase , __UpperCAmelCase ) def __lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : List[Any] ) ->int: """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __lowerCAmelCase ( self : Union[str, Any] , **__UpperCAmelCase : Optional[int] ) ->Union[str, Any]: """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __lowerCAmelCase ( self : Tuple ) ->Any: """simple docstring""" shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self : str ) ->Dict: """simple docstring""" a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] a = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __lowerCAmelCase ( self : Any ) ->Tuple: """simple docstring""" a = self.get_tokenizer() a = self.get_image_processor() a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) a = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def __lowerCAmelCase ( self : int ) ->Dict: """simple docstring""" a = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) a = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 ) a = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def __lowerCAmelCase ( self : Tuple ) ->Dict: """simple docstring""" a = self.get_image_processor() a = self.get_tokenizer() a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) a = self.prepare_image_inputs() a = image_processor(__UpperCAmelCase , return_tensors='''np''' ) a = processor(images=__UpperCAmelCase , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __lowerCAmelCase ( self : List[str] ) ->str: """simple docstring""" a = self.get_image_processor() a = self.get_tokenizer() a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) a = '''lower newer''' a = processor(text=__UpperCAmelCase ) a = tokenizer(__UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowerCAmelCase ( self : List[Any] ) ->List[str]: """simple docstring""" a = self.get_image_processor() a = self.get_tokenizer() a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) a = '''lower newer''' a = self.prepare_image_inputs() a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(__UpperCAmelCase ): processor() def __lowerCAmelCase ( self : Optional[int] ) ->List[str]: """simple docstring""" a = self.get_image_processor() a = self.get_tokenizer() a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a = processor.batch_decode(__UpperCAmelCase ) a = tokenizer.batch_decode(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __lowerCAmelCase ( self : Optional[Any] ) ->Dict: """simple docstring""" a = self.get_image_processor() a = self.get_tokenizer() a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) a = '''lower newer''' a = self.prepare_image_inputs() a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
0
0
"""simple docstring""" from ...processing_utils import ProcessorMixin class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Tuple = ['''image_processor''', '''feature_extractor'''] UpperCAmelCase : Dict = '''TvltImageProcessor''' UpperCAmelCase : Tuple = '''TvltFeatureExtractor''' def __init__( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ): super().__init__(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase ) _A = image_processor _A = feature_extractor def __call__( self : List[Any] , _UpperCAmelCase : str=None , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : List[str]=False , *_UpperCAmelCase : Any , **_UpperCAmelCase : str , ): if images is None and audio is None: raise ValueError('You need to specify either an `images` or `audio` input to process.' ) _A = None if images is not None: _A = self.image_processor(_UpperCAmelCase , mask_pixel=_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) if images_mixed is not None: _A = self.image_processor(_UpperCAmelCase , is_mixed=_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) if audio is not None: _A = self.feature_extractor( _UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , mask_audio=_UpperCAmelCase , **_UpperCAmelCase ) _A = {} if audio is not None: output_dict.update(_UpperCAmelCase ) if images is not None: output_dict.update(_UpperCAmelCase ) if images_mixed_dict is not None: output_dict.update(_UpperCAmelCase ) return output_dict @property def lowerCAmelCase_ ( self : Any ): _A = self.image_processor.model_input_names _A = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
370
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def _snake_case ( _snake_case : Dict ) -> Any: '''simple docstring''' if ( (cp >= 0X4e00 and cp <= 0X9fff) or (cp >= 0X3400 and cp <= 0X4dbf) # or (cp >= 0X2_0000 and cp <= 0X2_a6df) # or (cp >= 0X2_a700 and cp <= 0X2_b73f) # or (cp >= 0X2_b740 and cp <= 0X2_b81f) # or (cp >= 0X2_b820 and cp <= 0X2_ceaf) # or (cp >= 0Xf900 and cp <= 0Xfaff) or (cp >= 0X2_f800 and cp <= 0X2_fa1f) # ): # return True return False def _snake_case ( _snake_case : str ) -> Tuple: '''simple docstring''' for char in word: _A = ord(_snake_case ) if not _is_chinese_char(_snake_case ): return 0 return 1 def _snake_case ( _snake_case : List[str] ) -> Optional[Any]: '''simple docstring''' _A = set() for token in tokens: _A = len(_snake_case ) > 1 and is_chinese(_snake_case ) if chinese_word: word_set.add(_snake_case ) _A = list(_snake_case ) return word_list def _snake_case ( _snake_case : List[str] , _snake_case : set() ) -> Optional[Any]: '''simple docstring''' if not chinese_word_set: return bert_tokens _A = max([len(_snake_case ) for w in chinese_word_set] ) _A = bert_tokens _A , _A = 0, len(_snake_case ) while start < end: _A = True if is_chinese(bert_word[start] ): _A = min(end - start , _snake_case ) for i in range(_snake_case , 1 , -1 ): _A = ''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): _A = '##' + bert_word[j] _A = start + i _A = False break if single_word: start += 1 return bert_word def _snake_case ( _snake_case : List[str] , _snake_case : LTP , _snake_case : BertTokenizer ) -> str: '''simple docstring''' _A = [] for i in range(0 , len(_snake_case ) , 1_00 ): _A = ltp_tokenizer.seg(lines[i : i + 1_00] )[0] _A = [get_chinese_word(_snake_case ) for r in res] ltp_res.extend(_snake_case ) assert len(_snake_case ) == len(_snake_case ) _A = [] for i in range(0 , len(_snake_case ) , 1_00 ): _A = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=_snake_case , truncation=_snake_case , max_length=5_12 ) bert_res.extend(res['input_ids'] ) assert len(_snake_case ) == len(_snake_case ) _A = [] for input_ids, chinese_word in zip(_snake_case , _snake_case ): _A = [] for id in input_ids: _A = bert_tokenizer._convert_id_to_token(_snake_case ) input_tokens.append(_snake_case ) _A = add_sub_symbol(_snake_case , _snake_case ) _A = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_snake_case ): if token[:2] == "##": _A = token[2:] # save chinese tokens' pos if len(_snake_case ) == 1 and _is_chinese_char(ord(_snake_case ) ): ref_id.append(_snake_case ) ref_ids.append(_snake_case ) assert len(_snake_case ) == len(_snake_case ) return ref_ids def _snake_case ( _snake_case : List[str] ) -> Dict: '''simple docstring''' with open(args.file_name , 'r' , encoding='utf-8' ) as f: _A = f.readlines() _A = [line.strip() for line in data if len(_snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' _A = LTP(args.ltp ) # faster in GPU device _A = BertTokenizer.from_pretrained(args.bert ) _A = prepare_ref(_snake_case , _snake_case , _snake_case ) with open(args.save_path , 'w' , encoding='utf-8' ) as f: _A = [json.dumps(_snake_case ) + '\n' for ref in ref_ids] f.writelines(_snake_case ) if __name__ == "__main__": a = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''' ) parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''') parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''') a = parser.parse_args() main(args)
271
0
import inspect import unittest from transformers import RegNetConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import RegNetForImageClassification, RegNetModel from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowerCamelCase : """simple docstring""" def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , )->List[str]: '''simple docstring''' A_ : str = parent A_ : int = batch_size A_ : List[str] = image_size A_ : Dict = num_channels A_ : Tuple = embeddings_size A_ : Union[str, Any] = hidden_sizes A_ : Dict = depths A_ : str = is_training A_ : Union[str, Any] = use_labels A_ : Union[str, Any] = hidden_act A_ : Optional[Any] = num_labels A_ : Tuple = scope A_ : Optional[int] = len(_SCREAMING_SNAKE_CASE ) def _snake_case ( self )->Optional[Any]: '''simple docstring''' A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : str = None if self.use_labels: A_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) A_ : Optional[Any] = self.get_config() return config, pixel_values, labels def _snake_case ( self )->Union[str, Any]: '''simple docstring''' return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]: '''simple docstring''' A_ : Dict = RegNetModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() A_ : Any = model(_SCREAMING_SNAKE_CASE ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]: '''simple docstring''' A_ : Union[str, Any] = self.num_labels A_ : Dict = RegNetForImageClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() A_ : int = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self )->Union[str, Any]: '''simple docstring''' A_ : Tuple = self.prepare_config_and_inputs() A_ , A_ , A_ : str = config_and_inputs A_ : Any = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _lowerCamelCase ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" snake_case = (RegNetModel, RegNetForImageClassification) if is_torch_available() else () snake_case = ( {"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification} if is_torch_available() else {} ) snake_case = False snake_case = False snake_case = False snake_case = False def _snake_case ( self )->Union[str, Any]: '''simple docstring''' A_ : Union[str, Any] = RegNetModelTester(self ) A_ : Union[str, Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE ) def _snake_case ( self )->Dict: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self )->Tuple: '''simple docstring''' return @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def _snake_case ( self )->Dict: '''simple docstring''' pass @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def _snake_case ( self )->str: '''simple docstring''' pass def _snake_case ( self )->List[Any]: '''simple docstring''' A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : str = model_class(_SCREAMING_SNAKE_CASE ) A_ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : Any = [*signature.parameters.keys()] A_ : Any = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def _snake_case ( self )->Any: '''simple docstring''' A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def _snake_case ( self )->Optional[Any]: '''simple docstring''' A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Union[str, Any] = model_class(config=_SCREAMING_SNAKE_CASE ) for name, module in model.named_modules(): if isinstance(_SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def _snake_case ( self )->List[Any]: '''simple docstring''' def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): A_ : str = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): A_ : Tuple = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) A_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A_ : Optional[int] = self.model_tester.num_stages self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() A_ : int = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: A_ : int = layer_type A_ : List[Any] = True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : str = True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _snake_case ( self )->Dict: '''simple docstring''' A_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE ) @slow def _snake_case ( self )->str: '''simple docstring''' for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Dict = RegNetModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE ( ): A_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _snake_case ( self )->List[str]: '''simple docstring''' return ( AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _snake_case ( self )->Tuple: '''simple docstring''' A_ : List[Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_SCREAMING_SNAKE_CASE ) A_ : Optional[Any] = self.default_image_processor A_ : Any = prepare_img() A_ : Optional[Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): A_ : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE ) # verify the logits A_ : Union[str, Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) A_ : Optional[int] = torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
186
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""): raise Exception("""requires fairseq >= 1.0.0a""") logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = """Hello world! cécé herlolip""" def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A_ : List[Any] = FairseqRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE ) roberta.eval() # disable dropout A_ : Dict = roberta.model.encoder.sentence_encoder A_ : Optional[Any] = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , ) if classification_head: A_ : Optional[int] = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our RoBERTa config:''' , SCREAMING_SNAKE_CASE ) A_ : List[str] = XLMRobertaXLForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(SCREAMING_SNAKE_CASE ) model.eval() # Now let's copy all the weights. # Embeddings A_ : str = roberta_sent_encoder.embed_tokens.weight A_ : int = roberta_sent_encoder.embed_positions.weight A_ : Union[str, Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. A_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight A_ : int = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer A_ : BertLayer = model.roberta.encoder.layer[i] A_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] A_ : RobertaAttention = layer.attention A_ : Dict = roberta_layer.self_attn_layer_norm.weight A_ : str = roberta_layer.self_attn_layer_norm.bias # self attention A_ : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) A_ : str = roberta_layer.self_attn.q_proj.weight A_ : List[str] = roberta_layer.self_attn.q_proj.bias A_ : int = roberta_layer.self_attn.k_proj.weight A_ : List[Any] = roberta_layer.self_attn.k_proj.bias A_ : Dict = roberta_layer.self_attn.v_proj.weight A_ : int = roberta_layer.self_attn.v_proj.bias # self-attention output A_ : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape A_ : Any = roberta_layer.self_attn.out_proj.weight A_ : Optional[Any] = roberta_layer.self_attn.out_proj.bias # this one is final layer norm A_ : Any = roberta_layer.final_layer_norm.weight A_ : int = roberta_layer.final_layer_norm.bias # intermediate A_ : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape A_ : int = roberta_layer.fca.weight A_ : List[str] = roberta_layer.fca.bias # output A_ : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape A_ : Optional[int] = roberta_layer.fca.weight A_ : List[Any] = roberta_layer.fca.bias # end of layer if classification_head: A_ : str = roberta.model.classification_heads['''mnli'''].dense.weight A_ : int = roberta.model.classification_heads['''mnli'''].dense.bias A_ : str = roberta.model.classification_heads['''mnli'''].out_proj.weight A_ : Dict = roberta.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head A_ : int = roberta.model.encoder.lm_head.dense.weight A_ : List[str] = roberta.model.encoder.lm_head.dense.bias A_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight A_ : int = roberta.model.encoder.lm_head.layer_norm.bias A_ : Optional[int] = roberta.model.encoder.lm_head.weight A_ : Dict = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. A_ : torch.Tensor = roberta.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1 A_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE )[0] if classification_head: A_ : str = roberta.model.classification_heads['''mnli'''](roberta.extract_features(SCREAMING_SNAKE_CASE ) ) else: A_ : int = roberta.model(SCREAMING_SNAKE_CASE )[0] print(our_output.shape , their_output.shape ) A_ : Any = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 A_ : Tuple = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) pathlib.Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--classification_head""", action="""store_true""", help="""Whether to convert a final classification head.""" ) UpperCamelCase = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
186
1
'''simple docstring''' import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def _snake_case ( A , A , A , A=5 ) -> List[str]: # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('''<mask>''' ) == 1 lowerCAmelCase__ = torch.tensor(tokenizer.encode(A , add_special_tokens=A ) ).unsqueeze(0 ) # Batch size 1 lowerCAmelCase__ = model(A )[0] # The last hidden-state is the first element of the output tuple lowerCAmelCase__ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() lowerCAmelCase__ = logits[0, masked_index, :] lowerCAmelCase__ = logits.softmax(dim=0 ) lowerCAmelCase__ , lowerCAmelCase__ = prob.topk(k=A , dim=0 ) lowerCAmelCase__ = ''' '''.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(A ) )] ) lowerCAmelCase__ = tokenizer.mask_token lowerCAmelCase__ = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ): lowerCAmelCase__ = predicted_token_bpe.replace('''\u2581''' , ''' ''' ) if " {0}".format(A ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(''' {0}'''.format(A ) , A ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(A , A ), values[index].item(), predicted_token, ) ) return topk_filled_outputs __UpperCAmelCase = CamembertTokenizer.from_pretrained('''camembert-base''') __UpperCAmelCase = CamembertForMaskedLM.from_pretrained('''camembert-base''') model.eval() __UpperCAmelCase = '''Le camembert est <mask> :)''' print(fill_mask(masked_input, model, tokenizer, topk=3))
228
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging __UpperCAmelCase = logging.get_logger(__name__) class a__ ( a__ ): '''simple docstring''' lowercase__ : int = "linear" lowercase__ : Any = "cosine" lowercase__ : Optional[int] = "cosine_with_restarts" lowercase__ : Optional[Any] = "polynomial" lowercase__ : Tuple = "constant" lowercase__ : Optional[int] = "constant_with_warmup" lowercase__ : Optional[int] = "piecewise_constant" def _snake_case ( A , A = -1 ) -> Any: return LambdaLR(A , lambda A : 1 , last_epoch=A ) def _snake_case ( A , A , A = -1 ) -> Optional[Any]: def lr_lambda(A ): if current_step < num_warmup_steps: return float(A ) / float(max(1.0 , A ) ) return 1.0 return LambdaLR(A , A , last_epoch=A ) def _snake_case ( A , A , A = -1 ) -> Union[str, Any]: lowerCAmelCase__ = {} lowerCAmelCase__ = step_rules.split(''',''' ) for rule_str in rule_list[:-1]: lowerCAmelCase__ , lowerCAmelCase__ = rule_str.split(''':''' ) lowerCAmelCase__ = int(A ) lowerCAmelCase__ = float(A ) lowerCAmelCase__ = value lowerCAmelCase__ = float(rule_list[-1] ) def create_rules_function(A , A ): def rule_func(A ) -> float: lowerCAmelCase__ = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(A ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func lowerCAmelCase__ = create_rules_function(A , A ) return LambdaLR(A , A , last_epoch=A ) def _snake_case ( A , A , A , A=-1 ) -> Optional[int]: def lr_lambda(A ): if current_step < num_warmup_steps: return float(A ) / float(max(1 , A ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(A , A , A ) def _snake_case ( A , A , A , A = 0.5 , A = -1 ) -> List[str]: def lr_lambda(A ): if current_step < num_warmup_steps: return float(A ) / float(max(1 , A ) ) lowerCAmelCase__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(A ) * 2.0 * progress )) ) return LambdaLR(A , A , A ) def _snake_case ( A , A , A , A = 1 , A = -1 ) -> Union[str, Any]: def lr_lambda(A ): if current_step < num_warmup_steps: return float(A ) / float(max(1 , A ) ) lowerCAmelCase__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(A ) * progress) % 1.0) )) ) return LambdaLR(A , A , A ) def _snake_case ( A , A , A , A=1E-7 , A=1.0 , A=-1 ) -> Union[str, Any]: lowerCAmelCase__ = optimizer.defaults['''lr'''] if not (lr_init > lr_end): raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(A ): if current_step < num_warmup_steps: return float(A ) / float(max(1 , A ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: lowerCAmelCase__ = lr_init - lr_end lowerCAmelCase__ = num_training_steps - num_warmup_steps lowerCAmelCase__ = 1 - (current_step - num_warmup_steps) / decay_steps lowerCAmelCase__ = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(A , A , A ) __UpperCAmelCase = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def _snake_case ( A , A , A = None , A = None , A = None , A = 1 , A = 1.0 , A = -1 , ) -> int: lowerCAmelCase__ = SchedulerType(A ) lowerCAmelCase__ = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(A , last_epoch=A ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(A , step_rules=A , last_epoch=A ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(A , num_warmup_steps=A , last_epoch=A ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( A , num_warmup_steps=A , num_training_steps=A , num_cycles=A , last_epoch=A , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( A , num_warmup_steps=A , num_training_steps=A , power=A , last_epoch=A , ) return schedule_func( A , num_warmup_steps=A , num_training_steps=A , last_epoch=A )
228
1
'''simple docstring''' import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def A_ ( snake_case ): SCREAMING_SNAKE_CASE:str = int(__snake_case ) SCREAMING_SNAKE_CASE:Any = t // 3600, (t // 60) % 60, t % 60 return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}''' def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case=300 ): # docstyle-ignore return F''' <div> {prefix} <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress> {label} </div> ''' def A_ ( snake_case ): SCREAMING_SNAKE_CASE:Optional[Any] = "<table border=\"1\" class=\"dataframe\">\n" html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += F''' <th>{i}</th>\n''' html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: SCREAMING_SNAKE_CASE:int = F'''{elt:.6f}''' if isinstance(__snake_case , __snake_case ) else str(__snake_case ) html_code += F''' <td>{elt}</td>\n''' html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class _snake_case : _A : List[Any] = 5 _A : Union[str, Any] = 0.2 def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int = None ,SCREAMING_SNAKE_CASE__ : str = True ,SCREAMING_SNAKE_CASE__ : Tuple = None ,SCREAMING_SNAKE_CASE__ : List[Any] = 300 ,): SCREAMING_SNAKE_CASE:int = total SCREAMING_SNAKE_CASE:int = "" if prefix is None else prefix SCREAMING_SNAKE_CASE:Union[str, Any] = leave SCREAMING_SNAKE_CASE:Optional[int] = parent SCREAMING_SNAKE_CASE:Optional[int] = width SCREAMING_SNAKE_CASE:Union[str, Any] = None SCREAMING_SNAKE_CASE:str = None SCREAMING_SNAKE_CASE:List[str] = None def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = False ,SCREAMING_SNAKE_CASE__ : List[str] = None ): SCREAMING_SNAKE_CASE:str = value if comment is not None: SCREAMING_SNAKE_CASE:Optional[Any] = comment if self.last_value is None: SCREAMING_SNAKE_CASE:List[Any] = time.time() SCREAMING_SNAKE_CASE:str = value SCREAMING_SNAKE_CASE:Tuple = None SCREAMING_SNAKE_CASE:Any = self.warmup SCREAMING_SNAKE_CASE:Optional[int] = 1 self.update_bar(_a ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for ,self.total ): if self.first_calls > 0: self.first_calls -= 1 SCREAMING_SNAKE_CASE:Union[str, Any] = time.time() SCREAMING_SNAKE_CASE:Optional[int] = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: SCREAMING_SNAKE_CASE:List[str] = self.elapsed_time / (value - self.start_value) else: SCREAMING_SNAKE_CASE:List[str] = None if value >= self.total: SCREAMING_SNAKE_CASE:int = self.total SCREAMING_SNAKE_CASE:List[str] = None if not self.leave: self.close() elif self.average_time_per_item is not None: SCREAMING_SNAKE_CASE:List[str] = self.average_time_per_item * (self.total - value) self.update_bar(_a ) SCREAMING_SNAKE_CASE:Any = value SCREAMING_SNAKE_CASE:List[str] = current_time if self.average_time_per_item is None: SCREAMING_SNAKE_CASE:int = 1 else: SCREAMING_SNAKE_CASE:Union[str, Any] = max(int(self.update_every / self.average_time_per_item ) ,1 ) def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Tuple=None ): SCREAMING_SNAKE_CASE:List[str] = " " * (len(str(self.total ) ) - len(str(_a ) )) + str(_a ) if self.elapsed_time is None: SCREAMING_SNAKE_CASE:Optional[Any] = F'''[{spaced_value}/{self.total} : < :''' elif self.predicted_remaining is None: SCREAMING_SNAKE_CASE:Optional[int] = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}''' else: SCREAMING_SNAKE_CASE:str = ( F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <''' F''' {format_time(self.predicted_remaining )}''' ) self.label += F''', {1/self.average_time_per_item:.2f} it/s''' self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]''' self.display() def __UpperCamelCase ( self : List[str] ): SCREAMING_SNAKE_CASE:Dict = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: SCREAMING_SNAKE_CASE:Any = disp.display(disp.HTML(self.html_code ) ,display_id=_a ) else: self.output.update(disp.HTML(self.html_code ) ) def __UpperCamelCase ( self : Union[str, Any] ): if self.parent is None and self.output is not None: self.output.update(disp.HTML("" ) ) class _snake_case ( a_ ): def __init__( self : str ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ): super().__init__(_a ) SCREAMING_SNAKE_CASE:Optional[Any] = None if column_names is None else [column_names] SCREAMING_SNAKE_CASE:str = None def __UpperCamelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE:List[str] = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: SCREAMING_SNAKE_CASE:str = disp.display(disp.HTML(self.html_code ) ,display_id=_a ) else: self.output.update(disp.HTML(self.html_code ) ) def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Dict ): if self.inner_table is None: SCREAMING_SNAKE_CASE:Any = [list(values.keys() ), list(values.values() )] else: SCREAMING_SNAKE_CASE:Optional[int] = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(_a ) SCREAMING_SNAKE_CASE:Optional[int] = columns self.inner_table.append([values[c] for c in columns] ) def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=300 ): SCREAMING_SNAKE_CASE:Optional[Any] = NotebookProgressBar(_a ,prefix=_a ,parent=self ,width=_a ) return self.child_bar def __UpperCamelCase ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE:Union[str, Any] = None self.display() class _snake_case ( a_ ): def __init__( self : str ): SCREAMING_SNAKE_CASE:Union[str, Any] = None SCREAMING_SNAKE_CASE:List[Any] = None SCREAMING_SNAKE_CASE:Tuple = False def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ): SCREAMING_SNAKE_CASE:int = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step" SCREAMING_SNAKE_CASE:int = 0 SCREAMING_SNAKE_CASE:Union[str, Any] = 0 SCREAMING_SNAKE_CASE:str = [self.first_column] + ["Training Loss"] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append("Validation Loss" ) SCREAMING_SNAKE_CASE:List[Any] = NotebookTrainingTracker(state.max_steps ,_a ) def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : Any ): SCREAMING_SNAKE_CASE:int = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}''' self.training_tracker.update( state.global_step + 1 ,comment=F'''Epoch {epoch}/{state.num_train_epochs}''' ,force_update=self._force_next_update ,) SCREAMING_SNAKE_CASE:Union[str, Any] = False def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict=None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ): if not has_length(_a ): return if self.prediction_bar is None: if self.training_tracker is not None: SCREAMING_SNAKE_CASE:Dict = self.training_tracker.add_child(len(_a ) ) else: SCREAMING_SNAKE_CASE:Union[str, Any] = NotebookProgressBar(len(_a ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ): if self.prediction_bar is not None: self.prediction_bar.close() SCREAMING_SNAKE_CASE:Any = None def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : str=None ,**SCREAMING_SNAKE_CASE__ : int ): # Only for when there is no evaluation if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: SCREAMING_SNAKE_CASE:Any = {"Training Loss": logs["loss"]} # First column is necessarily Step sine we're not in epoch eval strategy SCREAMING_SNAKE_CASE:Dict = state.global_step self.training_tracker.write_line(_a ) def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : int=None ,**SCREAMING_SNAKE_CASE__ : int ): if self.training_tracker is not None: SCREAMING_SNAKE_CASE:Dict = {"Training Loss": "No log", "Validation Loss": "No log"} for log in reversed(state.log_history ): if "loss" in log: SCREAMING_SNAKE_CASE:Dict = log["loss"] break if self.first_column == "Epoch": SCREAMING_SNAKE_CASE:Tuple = int(state.epoch ) else: SCREAMING_SNAKE_CASE:Optional[int] = state.global_step SCREAMING_SNAKE_CASE:Any = "eval" for k in metrics: if k.endswith("_loss" ): SCREAMING_SNAKE_CASE:Tuple = re.sub(R"\_loss$" ,"" ,_a ) SCREAMING_SNAKE_CASE:int = metrics.pop("total_flos" ,_a ) SCREAMING_SNAKE_CASE:Any = metrics.pop("epoch" ,_a ) SCREAMING_SNAKE_CASE:Any = metrics.pop(F'''{metric_key_prefix}_runtime''' ,_a ) SCREAMING_SNAKE_CASE:Any = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' ,_a ) SCREAMING_SNAKE_CASE:List[Any] = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' ,_a ) SCREAMING_SNAKE_CASE:int = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' ,_a ) for k, v in metrics.items(): if k == F'''{metric_key_prefix}_loss''': SCREAMING_SNAKE_CASE:Optional[int] = v else: SCREAMING_SNAKE_CASE:List[str] = k.split("_" ) SCREAMING_SNAKE_CASE:Tuple = " ".join([part.capitalize() for part in splits[1:]] ) SCREAMING_SNAKE_CASE:Any = v self.training_tracker.write_line(_a ) self.training_tracker.remove_child() SCREAMING_SNAKE_CASE:List[Any] = None # Evaluation takes a long time so we should force the next update. SCREAMING_SNAKE_CASE:Union[str, Any] = True def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : int ): self.training_tracker.update( state.global_step ,comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' ,force_update=_a ) SCREAMING_SNAKE_CASE:Union[str, Any] = None
139
"""simple docstring""" def __magic_name__ ( __snake_case : int , __snake_case : int , __snake_case : int ) -> float: lowercase : List[Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def __magic_name__ ( ) -> int: print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
202
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) def __lowerCamelCase ( lowerCamelCase__ : Optional[int] ): '''simple docstring''' lowerCamelCase = DPTConfig(embedding_type="""hybrid""" ) if "large" in checkpoint_url: lowerCamelCase = 1024 lowerCamelCase = 4096 lowerCamelCase = 24 lowerCamelCase = 16 lowerCamelCase = [5, 11, 17, 23] lowerCamelCase = [256, 512, 1024, 1024] lowerCamelCase = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: lowerCamelCase = 768 lowerCamelCase = [1, 1, 1, 0.5] lowerCamelCase = [256, 512, 768, 768] lowerCamelCase = 150 lowerCamelCase = 16 lowerCamelCase = (1, 384, 384) lowerCamelCase = False lowerCamelCase = """project""" if "ade" in checkpoint_url: lowerCamelCase = True lowerCamelCase = 768 lowerCamelCase = [1, 1, 1, 0.5] lowerCamelCase = 150 lowerCamelCase = 16 lowerCamelCase = """huggingface/label-files""" lowerCamelCase = """ade20k-id2label.json""" lowerCamelCase = json.load(open(cached_download(hf_hub_url(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) ) , """r""" ) ) lowerCamelCase = {int(lowerCamelCase__ ): v for k, v in idalabel.items()} lowerCamelCase = idalabel lowerCamelCase = {v: k for k, v in idalabel.items()} lowerCamelCase = [1, 150, 480, 480] return config, expected_shape def __lowerCamelCase ( lowerCamelCase__ : str ): '''simple docstring''' lowerCamelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(lowerCamelCase__ , lowerCamelCase__ ) def __lowerCamelCase ( lowerCamelCase__ : str ): '''simple docstring''' if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowerCamelCase = name.replace("""pretrained.model""" , """dpt.encoder""" ) if "pretrained.model" in name: lowerCamelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" ) if "patch_embed" in name: lowerCamelCase = name.replace("""patch_embed""" , """""" ) if "pos_embed" in name: lowerCamelCase = name.replace("""pos_embed""" , """position_embeddings""" ) if "attn.proj" in name: lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" ) if "proj" in name and "project" not in name: lowerCamelCase = name.replace("""proj""" , """projection""" ) if "blocks" in name: lowerCamelCase = name.replace("""blocks""" , """layer""" ) if "mlp.fc1" in name: lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" ) if "norm1" in name and "backbone" not in name: lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name and "backbone" not in name: lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" ) if "scratch.output_conv" in name: lowerCamelCase = name.replace("""scratch.output_conv""" , """head""" ) if "scratch" in name: lowerCamelCase = name.replace("""scratch""" , """neck""" ) if "layer1_rn" in name: lowerCamelCase = name.replace("""layer1_rn""" , """convs.0""" ) if "layer2_rn" in name: lowerCamelCase = name.replace("""layer2_rn""" , """convs.1""" ) if "layer3_rn" in name: lowerCamelCase = name.replace("""layer3_rn""" , """convs.2""" ) if "layer4_rn" in name: lowerCamelCase = name.replace("""layer4_rn""" , """convs.3""" ) if "refinenet" in name: lowerCamelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowerCamelCase = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' ) if "out_conv" in name: lowerCamelCase = name.replace("""out_conv""" , """projection""" ) if "resConfUnit1" in name: lowerCamelCase = name.replace("""resConfUnit1""" , """residual_layer1""" ) if "resConfUnit2" in name: lowerCamelCase = name.replace("""resConfUnit2""" , """residual_layer2""" ) if "conv1" in name: lowerCamelCase = name.replace("""conv1""" , """convolution1""" ) if "conv2" in name: lowerCamelCase = name.replace("""conv2""" , """convolution2""" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowerCamelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" ) if "pretrained.act_postprocess2.0.project.0" in name: lowerCamelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" ) if "pretrained.act_postprocess3.0.project.0" in name: lowerCamelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" ) if "pretrained.act_postprocess4.0.project.0" in name: lowerCamelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" ) # resize blocks if "pretrained.act_postprocess1.3" in name: lowerCamelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" ) if "pretrained.act_postprocess1.4" in name: lowerCamelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" ) if "pretrained.act_postprocess2.3" in name: lowerCamelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" ) if "pretrained.act_postprocess2.4" in name: lowerCamelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" ) if "pretrained.act_postprocess3.3" in name: lowerCamelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" ) if "pretrained.act_postprocess4.3" in name: lowerCamelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" ) if "pretrained.act_postprocess4.4" in name: lowerCamelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" ) if "pretrained" in name: lowerCamelCase = name.replace("""pretrained""" , """dpt""" ) if "bn" in name: lowerCamelCase = name.replace("""bn""" , """batch_norm""" ) if "head" in name: lowerCamelCase = name.replace("""head""" , """head.head""" ) if "encoder.norm" in name: lowerCamelCase = name.replace("""encoder.norm""" , """layernorm""" ) if "auxlayer" in name: lowerCamelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" ) if "backbone" in name: lowerCamelCase = name.replace("""backbone""" , """backbone.bit.encoder""" ) if ".." in name: lowerCamelCase = name.replace("""..""" , """.""" ) if "stem.conv" in name: lowerCamelCase = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: lowerCamelCase = name.replace("""blocks""" , """layers""" ) if "convolution" in name and "backbone" in name: lowerCamelCase = name.replace("""convolution""" , """conv""" ) if "layer" in name and "backbone" in name: lowerCamelCase = name.replace("""layer""" , """layers""" ) if "backbone.bit.encoder.bit" in name: lowerCamelCase = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" ) if "embedder.conv" in name: lowerCamelCase = name.replace("""embedder.conv""" , """embedder.convolution""" ) if "backbone.bit.encoder.stem.norm" in name: lowerCamelCase = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" ) return name def __lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' ) lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase = in_proj_weight[: config.hidden_size, :] lowerCamelCase = in_proj_bias[: config.hidden_size] lowerCamelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase = in_proj_bias[-config.hidden_size :] def __lowerCamelCase ( ): '''simple docstring''' lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) return im @torch.no_grad() def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ): '''simple docstring''' lowerCamelCase , lowerCamelCase = get_dpt_config(lowerCamelCase__ ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") lowerCamelCase = torch.load(lowerCamelCase__ , map_location="""cpu""" ) # remove certain keys remove_ignore_keys_(lowerCamelCase__ ) # rename keys for key in state_dict.copy().keys(): lowerCamelCase = state_dict.pop(lowerCamelCase__ ) lowerCamelCase = val # read in qkv matrices read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ ) # load HuggingFace model lowerCamelCase = DPTForSemanticSegmentation(lowerCamelCase__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) model.eval() # Check outputs on an image lowerCamelCase = 480 if """ade""" in checkpoint_url else 384 lowerCamelCase = DPTImageProcessor(size=lowerCamelCase__ ) lowerCamelCase = prepare_img() lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors="""pt""" ) # forward pass lowerCamelCase = model(**lowerCamelCase__ ).logits if """ade""" in checkpoint_url else model(**lowerCamelCase__ ).predicted_depth if show_prediction: lowerCamelCase = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=lowerCamelCase__ , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: model.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" ) if __name__ == "__main__": UpperCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt", type=str, help="URL of the original DPT checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=False, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", action="https://huggingface.co/datasets/infinityofspace/python_codestyles-mixed1-500/viewer/default/store_true", ) parser.add_argument( "--model_name", default="dpt-large", type=str, help="Name of the model, in case you're pushing to the hub.", ) parser.add_argument( "--show_prediction", action="https://huggingface.co/datasets/infinityofspace/python_codestyles-mixed1-500/viewer/default/store_true", ) UpperCAmelCase : Tuple = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
66
UpperCAmelCase : Tuple = "Tobias Carryer" from time import time class __lowercase : """simple docstring""" def __init__( self , A , A , A , A=int(time() ) ) -> Optional[int]: # noqa: B008 '''simple docstring''' lowerCamelCase = multiplier lowerCamelCase = increment lowerCamelCase = modulo lowerCamelCase = seed def __A ( self ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. UpperCAmelCase : List[Any] = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31) while True: print(lcg.next_number())
66
1
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 lowerCAmelCase__ : str = get_tests_dir('fixtures') lowerCAmelCase__ : List[str] = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowerCAmelCase__ : List[Any] = get_tests_dir('fixtures/dummy-config.json') class snake_case ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self : Optional[Any] ): UpperCAmelCase__ = 0 def __lowerCAmelCase ( self : List[str] ): UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def __lowerCAmelCase ( self : Optional[Any] ): UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def __lowerCAmelCase ( self : Tuple ): with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase__ = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict() config_dict.pop('feature_extractor_type' ) UpperCAmelCase__ = WavaVecaFeatureExtractor(**lowerCamelCase__ ) # save in new folder model_config.save_pretrained(lowerCamelCase__ ) config.save_pretrained(lowerCamelCase__ ) UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) # make sure private variable is not incorrectly saved UpperCAmelCase__ = json.loads(config.to_json_string() ) self.assertTrue('_processor_class' not in dict_as_saved ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def __lowerCAmelCase ( self : List[str] ): UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def __lowerCAmelCase ( self : Union[str, Any] ): with self.assertRaisesRegex( lowerCamelCase__ ,'bert-base is not a local folder and is not a valid model identifier' ): UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained('bert-base' ) def __lowerCAmelCase ( self : List[Any] ): with self.assertRaisesRegex( lowerCamelCase__ ,R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ,revision='aaaaaa' ) def __lowerCAmelCase ( self : List[str] ): with self.assertRaisesRegex( lowerCamelCase__ ,'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' ,): UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' ) def __lowerCAmelCase ( self : int ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowerCamelCase__ ): UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase__ ): UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=lowerCamelCase__ ) UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase__ ) UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ ,'NewFeatureExtractor' ) def __lowerCAmelCase ( self : str ): try: AutoConfig.register('custom' ,lowerCamelCase__ ) AutoFeatureExtractor.register(lowerCamelCase__ ,lowerCamelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase__ ): AutoFeatureExtractor.register(lowerCamelCase__ ,lowerCamelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCAmelCase__ = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase__ ) UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self : Optional[int] ): class snake_case ( __UpperCAmelCase ): """simple docstring""" snake_case__ = True try: AutoConfig.register('custom' ,lowerCamelCase__ ) AutoFeatureExtractor.register(lowerCamelCase__ ,lowerCamelCase__ ) # If remote code is not set, the default is to use local UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' ) self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=lowerCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' ) self.assertTrue(not hasattr(lowerCamelCase__ ,'is_local' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
98
"""simple docstring""" import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class snake_case ( __UpperCAmelCase ): """simple docstring""" snake_case__ = (PNDMScheduler,) snake_case__ = (("num_inference_steps", 50),) def __lowerCAmelCase ( self : List[str] ,**lowerCamelCase__ : str ): UpperCAmelCase__ = { 'num_train_timesteps': 1_000, 'beta_start': 0.0_0_0_1, 'beta_end': 0.0_2, 'beta_schedule': 'linear', } config.update(**lowerCamelCase__ ) return config def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Optional[Any]=0 ,**lowerCamelCase__ : List[str] ): UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ ) UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(lowerCamelCase__ ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCamelCase__ ) UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ ) new_scheduler.set_timesteps(lowerCamelCase__ ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self : Tuple ): pass def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[str]=0 ,**lowerCamelCase__ : Tuple ): UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(lowerCamelCase__ ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCamelCase__ ) UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCamelCase__ ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self : List[Any] ,**lowerCamelCase__ : int ): UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ ) UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ ) UpperCAmelCase__ = 10 UpperCAmelCase__ = self.dummy_model() UpperCAmelCase__ = self.dummy_sample_deter scheduler.set_timesteps(lowerCamelCase__ ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ ) UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ ) UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample return sample def __lowerCAmelCase ( self : int ): UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ ) for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample if num_inference_steps is not None and hasattr(lowerCamelCase__ ,'set_timesteps' ): scheduler.set_timesteps(lowerCamelCase__ ) elif num_inference_steps is not None and not hasattr(lowerCamelCase__ ,'set_timesteps' ): UpperCAmelCase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample self.assertEqual(output_a.shape ,sample.shape ) self.assertEqual(output_a.shape ,output_a.shape ) UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample self.assertEqual(output_a.shape ,sample.shape ) self.assertEqual(output_a.shape ,output_a.shape ) def __lowerCAmelCase ( self : List[Any] ): for timesteps in [100, 1_000]: self.check_over_configs(num_train_timesteps=lowerCamelCase__ ) def __lowerCAmelCase ( self : Optional[int] ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowerCamelCase__ ) UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps ,torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,) def __lowerCAmelCase ( self : Dict ): for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=lowerCamelCase__ ,beta_end=lowerCamelCase__ ) def __lowerCAmelCase ( self : Union[str, Any] ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCamelCase__ ) def __lowerCAmelCase ( self : List[Any] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCamelCase__ ) def __lowerCAmelCase ( self : Optional[Any] ): for t in [1, 5, 10]: self.check_over_forward(time_step=lowerCamelCase__ ) def __lowerCAmelCase ( self : List[Any] ): for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ): self.check_over_forward(num_inference_steps=lowerCamelCase__ ) def __lowerCAmelCase ( self : int ): # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 UpperCAmelCase__ = 27 for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ ) scheduler.set_timesteps(lowerCamelCase__ ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample def __lowerCAmelCase ( self : int ): with self.assertRaises(lowerCamelCase__ ): UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ ) scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample def __lowerCAmelCase ( self : Tuple ): UpperCAmelCase__ = self.full_loop() UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) ) UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2 assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3 def __lowerCAmelCase ( self : Tuple ): UpperCAmelCase__ = self.full_loop(prediction_type='v_prediction' ) UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) ) UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2 assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3 def __lowerCAmelCase ( self : Union[str, Any] ): # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 ) UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) ) UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2 assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3 def __lowerCAmelCase ( self : Tuple ): # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 ) UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) ) UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) ) assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2 assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
98
1
"""simple docstring""" import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput snake_case__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ): warnings.warn( '''The preprocess method is deprecated and will be removed in a future version. Please''' ''' use VaeImageProcessor.preprocess instead''' , _snake_case , ) if isinstance(_snake_case , torch.Tensor ): return image elif isinstance(_snake_case , PIL.Image.Image ): lowerCAmelCase : Optional[int] = [image] if isinstance(image[0] , PIL.Image.Image ): lowerCAmelCase, lowerCAmelCase : int = image[0].size lowerCAmelCase, lowerCAmelCase : Optional[int] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 lowerCAmelCase : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image] lowerCAmelCase : int = np.concatenate(_snake_case , axis=0 ) lowerCAmelCase : Optional[Any] = np.array(_snake_case ).astype(np.floataa ) / 255.0 lowerCAmelCase : List[Any] = image.transpose(0 , 3 , 1 , 2 ) lowerCAmelCase : List[str] = 2.0 * image - 1.0 lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case ) elif isinstance(image[0] , torch.Tensor ): lowerCAmelCase : Any = torch.cat(_snake_case , dim=0 ) return image def _snake_case ( _snake_case : Union[List, PIL.Image.Image, torch.Tensor] ): if isinstance(_snake_case , torch.Tensor ): return mask elif isinstance(_snake_case , PIL.Image.Image ): lowerCAmelCase : str = [mask] if isinstance(mask[0] , PIL.Image.Image ): lowerCAmelCase, lowerCAmelCase : int = mask[0].size lowerCAmelCase, lowerCAmelCase : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 lowerCAmelCase : List[str] = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask] lowerCAmelCase : Optional[int] = np.concatenate(_snake_case , axis=0 ) lowerCAmelCase : Dict = mask.astype(np.floataa ) / 255.0 lowerCAmelCase : List[str] = 0 lowerCAmelCase : Optional[int] = 1 lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case ) elif isinstance(mask[0] , torch.Tensor ): lowerCAmelCase : Optional[int] = torch.cat(_snake_case , dim=0 ) return mask class snake_case_( a__ ): __UpperCamelCase = 42 __UpperCamelCase = 42 def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] ): super().__init__() self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) @torch.no_grad() def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : Union[torch.Tensor, PIL.Image.Image] , UpperCamelCase_ : int = 2_5_0 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ): lowerCAmelCase : Optional[Any] = image lowerCAmelCase : Tuple = _preprocess_image(UpperCamelCase_ ) lowerCAmelCase : int = original_image.to(device=self.device , dtype=self.unet.dtype ) lowerCAmelCase : Optional[Any] = _preprocess_mask(UpperCamelCase_ ) lowerCAmelCase : str = mask_image.to(device=self.device , dtype=self.unet.dtype ) lowerCAmelCase : Union[str, Any] = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCAmelCase : Union[str, Any] = original_image.shape lowerCAmelCase : str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.device ) lowerCAmelCase : Optional[int] = eta lowerCAmelCase : List[str] = self.scheduler.timesteps[0] + 1 lowerCAmelCase : List[str] = generator[0] if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual lowerCAmelCase : Union[str, Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample # compute previous image: x_t -> x_t-1 lowerCAmelCase : str = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample else: # compute the reverse: x_t-1 -> x_t lowerCAmelCase : Optional[Any] = self.scheduler.undo_step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : List[Any] = t lowerCAmelCase : int = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase : Tuple = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ )
314
"""simple docstring""" import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class snake_case_( a__ ): __UpperCamelCase = (DDPMScheduler,) def lowerCamelCase__ ( self : List[Any] , **UpperCamelCase_ : Union[str, Any] ): lowerCAmelCase : Optional[Any] = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**UpperCamelCase_ ) return config def lowerCamelCase__ ( self : Optional[int] ): for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase_ ) def lowerCamelCase__ ( self : Any ): self.check_over_configs(thresholding=UpperCamelCase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , ) def lowerCamelCase__ ( self : Tuple ): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): for t in [0, 5_0_0, 9_9_9]: self.check_over_forward(time_step=UpperCamelCase_ ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : str = self.scheduler_classes[0] lowerCAmelCase : Dict = self.get_scheduler_config() lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5 def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : List[Any] = self.scheduler_classes[0] lowerCAmelCase : List[Any] = self.get_scheduler_config() lowerCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ ) lowerCAmelCase : List[str] = self.dummy_model() lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter lowerCAmelCase : List[Any] = torch.manual_seed(0 ) for t in reversed(range(UpperCamelCase_ ) ): # 1. predict noise residual lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase : Union[str, Any] = pred_prev_sample lowerCAmelCase : str = torch.sum(torch.abs(UpperCamelCase_ ) ) lowerCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 258.9_606 ) < 1E-2 assert abs(result_mean.item() - 0.3_372 ) < 1E-3 def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Optional[int] = self.scheduler_classes[0] lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : Dict = len(UpperCamelCase_ ) lowerCAmelCase : Any = self.dummy_model() lowerCAmelCase : Any = self.dummy_sample_deter lowerCAmelCase : List[Any] = torch.manual_seed(0 ) for t in reversed(range(UpperCamelCase_ ) ): # 1. predict noise residual lowerCAmelCase : str = model(UpperCamelCase_ , UpperCamelCase_ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase : List[Any] = pred_prev_sample lowerCAmelCase : List[str] = torch.sum(torch.abs(UpperCamelCase_ ) ) lowerCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 202.0_296 ) < 1E-2 assert abs(result_mean.item() - 0.2_631 ) < 1E-3 def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Dict = self.scheduler_classes[0] lowerCAmelCase : Tuple = self.get_scheduler_config() lowerCAmelCase : int = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : List[Any] = [1_0_0, 8_7, 5_0, 1, 0] scheduler.set_timesteps(timesteps=UpperCamelCase_ ) lowerCAmelCase : Dict = scheduler.timesteps for i, timestep in enumerate(UpperCamelCase_ ): if i == len(UpperCamelCase_ ) - 1: lowerCAmelCase : List[Any] = -1 else: lowerCAmelCase : Union[str, Any] = timesteps[i + 1] lowerCAmelCase : Any = scheduler.previous_timestep(UpperCamelCase_ ) lowerCAmelCase : Dict = prev_t.item() self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase : List[Any] = self.get_scheduler_config() lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : int = [1_0_0, 8_7, 5_0, 5_1, 0] with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Any = self.scheduler_classes[0] lowerCAmelCase : Optional[int] = self.get_scheduler_config() lowerCAmelCase : str = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : List[str] = [1_0_0, 8_7, 5_0, 1, 0] lowerCAmelCase : int = len(UpperCamelCase_ ) with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : List[Any] = self.scheduler_classes[0] lowerCAmelCase : Tuple = self.get_scheduler_config() lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=UpperCamelCase_ )
314
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType lowercase_ = logging.get_logger(__name__) lowercase_ = { """microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""", """microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""", """microsoft/deberta-v2-xlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json""" ), """microsoft/deberta-v2-xxlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''deberta-v2''' def __init__( self : Any , _A : Tuple=12_8100 , _A : int=1536 , _A : int=24 , _A : List[Any]=24 , _A : Any=6144 , _A : List[Any]="gelu" , _A : List[str]=0.1 , _A : Any=0.1 , _A : Tuple=512 , _A : Optional[int]=0 , _A : List[Any]=0.02 , _A : Optional[int]=1e-7 , _A : Tuple=False , _A : Optional[Any]=-1 , _A : Union[str, Any]=0 , _A : Union[str, Any]=True , _A : List[Any]=None , _A : Optional[int]=0 , _A : Optional[int]="gelu" , **_A : List[str] , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : List[Any] = hidden_size __SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers __SCREAMING_SNAKE_CASE : List[str] = num_attention_heads __SCREAMING_SNAKE_CASE : List[Any] = intermediate_size __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings __SCREAMING_SNAKE_CASE : Any = type_vocab_size __SCREAMING_SNAKE_CASE : Any = initializer_range __SCREAMING_SNAKE_CASE : str = relative_attention __SCREAMING_SNAKE_CASE : Tuple = max_relative_positions __SCREAMING_SNAKE_CASE : List[str] = pad_token_id __SCREAMING_SNAKE_CASE : Optional[Any] = position_biased_input # Backwards compatibility if type(_A ) == str: __SCREAMING_SNAKE_CASE : List[str] = [x.strip() for x in pos_att_type.lower().split('''|''' )] __SCREAMING_SNAKE_CASE : Optional[Any] = pos_att_type __SCREAMING_SNAKE_CASE : List[str] = vocab_size __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps __SCREAMING_SNAKE_CASE : Any = kwargs.get('''pooler_hidden_size''' , _A ) __SCREAMING_SNAKE_CASE : Tuple = pooler_dropout __SCREAMING_SNAKE_CASE : int = pooler_hidden_act class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" if self.task == "multiple-choice": __SCREAMING_SNAKE_CASE : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''sequence'''} if self._config.type_vocab_size > 0: return OrderedDict( [('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] ) else: return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] ) @property def UpperCAmelCase__ ( self : Dict ): """simple docstring""" return 12 def UpperCAmelCase__ ( self : List[Any] , _A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _A : int = -1 , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional["TensorType"] = None , _A : int = 3 , _A : int = 40 , _A : int = 40 , _A : "PreTrainedTokenizerBase" = None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = super().generate_dummy_inputs(preprocessor=_A , framework=_A ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
303
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed lowercase_ = { """distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), """roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), """bert""": (BertConfig, BertForMaskedLM, BertTokenizer), """gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def a__ ( snake_case ): """simple docstring""" assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def a__ ( snake_case , snake_case ): """simple docstring""" if args.student_type == "roberta": __SCREAMING_SNAKE_CASE : int = False elif args.student_type == "gpt2": __SCREAMING_SNAKE_CASE : Optional[int] = False def a__ ( snake_case , snake_case ): """simple docstring""" if args.student_type == "roberta": __SCREAMING_SNAKE_CASE : Dict = False def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description='''Training''' ) parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' ) parser.add_argument( '''--dump_path''' , type=snake_case , required=snake_case , help='''The output directory (log, checkpoints, parameters, etc.)''' ) parser.add_argument( '''--data_file''' , type=snake_case , required=snake_case , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , ) parser.add_argument( '''--student_type''' , type=snake_case , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''The student type (DistilBERT, RoBERTa).''' , ) parser.add_argument('''--student_config''' , type=snake_case , required=snake_case , help='''Path to the student configuration.''' ) parser.add_argument( '''--student_pretrained_weights''' , default=snake_case , type=snake_case , help='''Load student initialization checkpoint.''' ) parser.add_argument( '''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''Teacher type (BERT, RoBERTa).''' ) parser.add_argument('''--teacher_name''' , type=snake_case , required=snake_case , help='''The teacher model.''' ) parser.add_argument('''--temperature''' , default=2.0 , type=snake_case , help='''Temperature for the softmax temperature.''' ) parser.add_argument( '''--alpha_ce''' , default=0.5 , type=snake_case , help='''Linear weight for the distillation loss. Must be >=0.''' ) parser.add_argument( '''--alpha_mlm''' , default=0.0 , type=snake_case , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , ) parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case , help='''Linear weight for the CLM loss. Must be >=0.''' ) parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case , help='''Linear weight of the MSE loss. Must be >=0.''' ) parser.add_argument( '''--alpha_cos''' , default=0.0 , type=snake_case , help='''Linear weight of the cosine embedding loss. Must be >=0.''' ) parser.add_argument( '''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' ) parser.add_argument( '''--mlm_mask_prop''' , default=0.15 , type=snake_case , help='''Proportion of tokens for which we need to make a prediction.''' , ) parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case , help='''Proportion of tokens to mask out.''' ) parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case , help='''Proportion of tokens to keep.''' ) parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case , help='''Proportion of tokens to randomly replace.''' ) parser.add_argument( '''--mlm_smoothing''' , default=0.7 , type=snake_case , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , ) parser.add_argument('''--token_counts''' , type=snake_case , help='''The token counts in the data_file for MLM.''' ) parser.add_argument( '''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , ) parser.add_argument( '''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , ) parser.add_argument( '''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , ) parser.add_argument('''--n_epoch''' , type=snake_case , default=3 , help='''Number of pass on the whole dataset.''' ) parser.add_argument('''--batch_size''' , type=snake_case , default=5 , help='''Batch size (for each process).''' ) parser.add_argument( '''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=snake_case , default=50 , help='''Gradient accumulation for larger training batches.''' , ) parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case , help='''Linear warmup proportion.''' ) parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case , help='''Weight decay if we apply some.''' ) parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case , help='''The initial learning rate for Adam.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case , help='''Max gradient norm.''' ) parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case , help='''Random initialization range.''' ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=snake_case , default='''O1''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_gpu''' , type=snake_case , default=1 , help='''Number of GPUs in the node.''' ) parser.add_argument('''--local_rank''' , type=snake_case , default=-1 , help='''Distributed training - Local rank''' ) parser.add_argument('''--seed''' , type=snake_case , default=56 , help='''Random seed''' ) parser.add_argument('''--log_interval''' , type=snake_case , default=500 , help='''Tensorboard logging interval.''' ) parser.add_argument('''--checkpoint_interval''' , type=snake_case , default=4_000 , help='''Checkpoint interval.''' ) __SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() sanity_checks(snake_case ) # ARGS # init_gpu_params(snake_case ) set_seed(snake_case ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ''' itUse `--force` if you want to overwrite it''' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f: json.dump(vars(snake_case ) , snake_case , indent=4 ) git_log(args.dump_path ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = MODEL_CLASSES[args.student_type] __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = MODEL_CLASSES[args.teacher_type] # TOKENIZER # __SCREAMING_SNAKE_CASE : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name ) __SCREAMING_SNAKE_CASE : Optional[Any] = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): __SCREAMING_SNAKE_CASE : Any = tokenizer.all_special_tokens.index(snake_case ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) __SCREAMING_SNAKE_CASE : Any = special_tok_ids __SCREAMING_SNAKE_CASE : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , '''rb''' ) as fp: __SCREAMING_SNAKE_CASE : List[str] = pickle.load(snake_case ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , '''rb''' ) as fp: __SCREAMING_SNAKE_CASE : Optional[Any] = pickle.load(snake_case ) __SCREAMING_SNAKE_CASE : List[Any] = np.maximum(snake_case , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): __SCREAMING_SNAKE_CASE : Any = 0.0 # do not predict special tokens __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(snake_case ) else: __SCREAMING_SNAKE_CASE : Optional[int] = None __SCREAMING_SNAKE_CASE : Optional[Any] = LmSeqsDataset(params=snake_case , data=snake_case ) logger.info('''Data loader created.''' ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = student_config_class.from_pretrained(args.student_config ) __SCREAMING_SNAKE_CASE : Dict = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case ) else: __SCREAMING_SNAKE_CASE : str = student_model_class(snake_case ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info('''Student loaded.''' ) # TEACHER # __SCREAMING_SNAKE_CASE : List[str] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(snake_case , snake_case ) if args.freeze_token_type_embds: freeze_token_type_embeddings(snake_case , snake_case ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() __SCREAMING_SNAKE_CASE : int = Distiller( params=snake_case , dataset=snake_case , token_probs=snake_case , student=snake_case , teacher=snake_case ) distiller.train() logger.info('''Let\'s go get some drinks.''' ) if __name__ == "__main__": main()
303
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _SCREAMING_SNAKE_CASE : Dict = { "configuration_poolformer": [ "POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PoolFormerConfig", "PoolFormerOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : int = ["PoolFormerFeatureExtractor"] _SCREAMING_SNAKE_CASE : str = ["PoolFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : List[str] = [ "POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "PoolFormerForImageClassification", "PoolFormerModel", "PoolFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
92
'''simple docstring''' import logging import os from .state import PartialState class _snake_case ( logging.LoggerAdapter ): @staticmethod def lowerCAmelCase__ ( a__ ) -> Optional[Any]: '''simple docstring''' snake_case_ = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def lowerCAmelCase__ ( self , a__ , a__ , *a__ , **a__ ) -> List[Any]: '''simple docstring''' if PartialState._shared_state == {}: raise RuntimeError( "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." ) snake_case_ = kwargs.pop("main_process_only" , a__ ) snake_case_ = kwargs.pop("in_order" , a__ ) if self.isEnabledFor(a__ ): if self._should_log(a__ ): snake_case_ , snake_case_ = self.process(a__ , a__ ) self.logger.log(a__ , a__ , *a__ , **a__ ) elif in_order: snake_case_ = PartialState() for i in range(state.num_processes ): if i == state.process_index: snake_case_ , snake_case_ = self.process(a__ , a__ ) self.logger.log(a__ , a__ , *a__ , **a__ ) state.wait_for_everyone() def UpperCamelCase_( snake_case : str , snake_case : str = None ): '''simple docstring''' if log_level is None: snake_case_ = os.environ.get("ACCELERATE_LOG_LEVEL" , snake_case ) snake_case_ = logging.getLogger(snake_case ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(snake_case , {} )
92
1
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
283
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) _snake_case = 2_99_79_24_58 # Symbols _snake_case , _snake_case , _snake_case , _snake_case = symbols('''ct x y z''') def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if velocity > c: raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError("Speed must be greater than or equal to 1!" ) return velocity / c def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE_ ) ** 2 ) def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return np.array( [ [gamma(SCREAMING_SNAKE_CASE_ ), -gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), 0, 0], [-gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), gamma(SCREAMING_SNAKE_CASE_ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' if event is None: lowerCamelCase : Tuple = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(SCREAMING_SNAKE_CASE_ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: _snake_case = transform(29_97_92_45) print('''Example of four vector: ''') print(f'''ct\' = {four_vector[0]}''') print(f'''x\' = {four_vector[1]}''') print(f'''y\' = {four_vector[2]}''') print(f'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values _snake_case = {ct: c, x: 1, y: 1, z: 1} _snake_case = [four_vector[i].subs(sub_dict) for i in range(4)] print(f'''\n{numerical_vector}''')
283
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging A: int = logging.get_logger(__name__) A: str = { "sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json", # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): __lowerCAmelCase : List[Any] = 'vit_msn' def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-06 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> Dict: '''simple docstring''' super().__init__(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Any = hidden_size UpperCAmelCase : Dict = num_hidden_layers UpperCAmelCase : int = num_attention_heads UpperCAmelCase : Union[str, Any] = intermediate_size UpperCAmelCase : Optional[Any] = hidden_act UpperCAmelCase : Union[str, Any] = hidden_dropout_prob UpperCAmelCase : List[str] = attention_probs_dropout_prob UpperCAmelCase : Optional[Any] = initializer_range UpperCAmelCase : List[Any] = layer_norm_eps UpperCAmelCase : List[str] = image_size UpperCAmelCase : Optional[int] = patch_size UpperCAmelCase : str = num_channels UpperCAmelCase : Union[str, Any] = qkv_bias
76
"""simple docstring""" import math import sys def _snake_case ( UpperCamelCase : str ): UpperCAmelCase : Dict = """""" try: with open(UpperCamelCase , """rb""" ) as binary_file: UpperCAmelCase : str = binary_file.read() for dat in data: UpperCAmelCase : List[Any] = F"{dat:08b}" result += curr_byte return result except OSError: print("""File not accessible""" ) sys.exit() def _snake_case ( UpperCamelCase : str ): UpperCAmelCase : Optional[int] = {"""0""": """0""", """1""": """1"""} UpperCAmelCase , UpperCAmelCase : Optional[int] = """""", """""" UpperCAmelCase : int = len(UpperCamelCase ) for i in range(len(UpperCamelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue UpperCAmelCase : Any = lexicon[curr_string] result += last_match_id UpperCAmelCase : Any = last_match_id + """0""" if math.loga(UpperCamelCase ).is_integer(): UpperCAmelCase : Optional[Any] = {} for curr_key in list(UpperCamelCase ): UpperCAmelCase : Dict = lexicon.pop(UpperCamelCase ) UpperCAmelCase : int = new_lex UpperCAmelCase : int = last_match_id + """1""" index += 1 UpperCAmelCase : List[str] = """""" return result def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ): UpperCAmelCase : Dict = 8 try: with open(UpperCamelCase , """wb""" ) as opened_file: UpperCAmelCase : Union[str, Any] = [ to_write[i : i + byte_length] for i in range(0 , len(UpperCamelCase ) , UpperCamelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append("""10000000""" ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="""big""" ) ) except OSError: print("""File not accessible""" ) sys.exit() def _snake_case ( UpperCamelCase : str ): UpperCAmelCase : Any = 0 for letter in data_bits: if letter == "1": break counter += 1 UpperCAmelCase : List[str] = data_bits[counter:] UpperCAmelCase : Tuple = data_bits[counter + 1 :] return data_bits def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ): UpperCAmelCase : int = read_file_binary(UpperCamelCase ) UpperCAmelCase : str = remove_prefix(UpperCamelCase ) UpperCAmelCase : Any = decompress_data(UpperCamelCase ) write_file_binary(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
76
1
"""simple docstring""" class lowerCAmelCase__ : # Public class to implement a graph '''simple docstring''' def __init__( self : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : list[list[bool]]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = row SCREAMING_SNAKE_CASE_ : Tuple = col SCREAMING_SNAKE_CASE_ : Union[str, Any] = graph def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : list[list[bool]]): '''simple docstring''' return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : list[list[bool]]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order SCREAMING_SNAKE_CASE_ : Tuple = [-1, 0, 1, -1, 1, -1, 0, 1] SCREAMING_SNAKE_CASE_ : Tuple = True # Make those cells visited for k in range(8): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowercase_): self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowercase_) def _SCREAMING_SNAKE_CASE ( self : int): # And finally, count all islands. '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = [[False for j in range(self.COL)] for i in range(self.ROW)] SCREAMING_SNAKE_CASE_ : Dict = 0 for i in range(self.ROW): for j in range(self.COL): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(lowercase_ , lowercase_ , lowercase_) count += 1 return count
91
"""simple docstring""" from __future__ import annotations class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Any , lowercase_ : int = 0): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[Any] = key def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : str , lowercase_ : int): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(lowercase_) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(lowercase_) ^ key) for ch in content] def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : int = 0): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned SCREAMING_SNAKE_CASE_ : List[str] = '''''' for ch in content: ans += chr(ord(lowercase_) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str , lowercase_ : int = 0): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned SCREAMING_SNAKE_CASE_ : List[Any] = '''''' for ch in content: ans += chr(ord(lowercase_) ^ key) return ans def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int = 0): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) try: with open(lowercase_) as fin, open('''encrypt.out''' , '''w+''') as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(lowercase_ , lowercase_)) except OSError: return False return True def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str , lowercase_ : int): '''simple docstring''' assert isinstance(lowercase_ , lowercase_) and isinstance(lowercase_ , lowercase_) try: with open(lowercase_) as fin, open('''decrypt.out''' , '''w+''') as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(lowercase_ , lowercase_)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
91
1
import numpy as np from PIL import Image def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): __a = np.array(__lowerCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) __a = 0 __a = 0 __a = 0 __a = 0 # compute the shape of the output matrix __a = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape __a = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix __a = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __a = 0 __a = 0 return updated_arr def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): __a = np.array(__lowerCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) __a = 0 __a = 0 __a = 0 __a = 0 # compute the shape of the output matrix __a = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape __a = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix __a = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __a = 0 __a = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name="""avgpooling""", verbose=True) # Loading the image lowerCamelCase_ : Any = Image.open("""path_to_image""") # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
197
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ : Optional[int] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : str = ["""XLNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : Optional[Any] = ["""XLNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : Union[str, Any] = [ """XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLNetForMultipleChoice""", """XLNetForQuestionAnswering""", """XLNetForQuestionAnsweringSimple""", """XLNetForSequenceClassification""", """XLNetForTokenClassification""", """XLNetLMHeadModel""", """XLNetModel""", """XLNetPreTrainedModel""", """load_tf_weights_in_xlnet""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : Optional[int] = [ """TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLNetForMultipleChoice""", """TFXLNetForQuestionAnsweringSimple""", """TFXLNetForSequenceClassification""", """TFXLNetForTokenClassification""", """TFXLNetLMHeadModel""", """TFXLNetMainLayer""", """TFXLNetModel""", """TFXLNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
197
1
lowercase = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []} lowercase = ["a", "b", "c", "d", "e"] def __UpperCAmelCase ( a_ , a_ , a_): snake_case_ = start # add current to visited visited.append(a_) snake_case_ = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: snake_case_ = topological_sort(a_ , a_ , a_) # if all neighbors visited add current to sort sort.append(a_) # if all vertices haven't been visited select a new one to visit if len(a_) != len(a_): for vertice in vertices: if vertice not in visited: snake_case_ = topological_sort(a_ , a_ , a_) # return sort return sort if __name__ == "__main__": lowercase = topological_sort("a", [], []) print(sort)
178
import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class UpperCamelCase_ : '''simple docstring''' def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=5 , a=4 , a=4 , a="gelu" , a=0.0 , a=0.1 , a=True , a=5_12 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Optional[Any]: snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_multiple_size snake_case_ = hidden_act snake_case_ = hidden_dropout snake_case_ = attention_dropout snake_case_ = weight_tying snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = num_choices snake_case_ = scope def _UpperCamelCase ( self ) -> List[Any]: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ = self.get_config() return config, input_ids, input_mask, token_labels def _UpperCamelCase ( self ) -> Dict: return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , ) def _UpperCamelCase ( self ) -> int: snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.prepare_config_and_inputs() snake_case_ = True return config, input_ids, input_mask, token_labels def _UpperCamelCase ( self , a , a , a ) -> Any: snake_case_ = GPTNeoXJapaneseModel(config=a ) model.to(a ) model.eval() snake_case_ = model(a , attention_mask=a ) snake_case_ = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self , a , a , a ) -> Union[str, Any]: snake_case_ = True snake_case_ = GPTNeoXJapaneseModel(a ) model.to(a ) model.eval() snake_case_ = model(a , attention_mask=a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self , a , a , a , a ) -> int: snake_case_ = GPTNeoXJapaneseForCausalLM(config=a ) model.to(a ) model.eval() snake_case_ = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCamelCase ( self , a , a , a ) -> Tuple: snake_case_ = True snake_case_ = GPTNeoXJapaneseForCausalLM(config=a ) model.to(a ) model.eval() # first forward pass snake_case_ = model(a , attention_mask=a , use_cache=a ) snake_case_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case_ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case_ = torch.cat([input_mask, next_mask] , dim=-1 ) snake_case_ = model(a , attention_mask=a , output_hidden_states=a ) snake_case_ = output_from_no_past['hidden_states'][0] snake_case_ = model( a , attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0] # select random slice snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) ) def _UpperCamelCase ( self ) -> Dict: snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () lowerCAmelCase = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () lowerCAmelCase = ( {'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def _UpperCamelCase ( self ) -> List[Any]: snake_case_ = GPTNeoXJapaneseModelTester(self ) snake_case_ = ConfigTester(self , config_class=a , hidden_size=37 ) def _UpperCamelCase ( self ) -> str: self.config_tester.run_common_tests() def _UpperCamelCase ( self ) -> Optional[Any]: snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(a , a , a ) def _UpperCamelCase ( self ) -> Union[str, Any]: snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(a , a , a ) def _UpperCamelCase ( self ) -> Optional[int]: # This regression test was failing with PyTorch < 1.3 snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder() snake_case_ = None self.model_tester.create_and_check_model_as_decoder(a , a , a ) def _UpperCamelCase ( self ) -> Dict: snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(a , a , a ) def _UpperCamelCase ( self ) -> List[Any]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*a ) @slow def _UpperCamelCase ( self ) -> Any: snake_case_ = 'abeja/gpt-neox-japanese-2.7b' snake_case_ = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、'] snake_case_ = [ 'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。', '100年後に必要とされる会社は、「人」が中心の会社です。', 'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。', '国境の長いトンネルを抜けると、そこは雪国だった。', '美味しい日本食といえば、やっぱりお寿司ですよね。', ] snake_case_ = GPTNeoXJapaneseTokenizer.from_pretrained(a ) snake_case_ = GPTNeoXJapaneseForCausalLM.from_pretrained(a ) snake_case_ = [] for prompt in prompts: snake_case_ = tokenizer(a , return_tensors='pt' ).input_ids snake_case_ = model.generate(a , max_length=50 ) snake_case_ = tokenizer.batch_decode(a , skip_special_tokens=a ) predicted_outputs += generated_string self.assertListEqual(a , a )
178
1
"""simple docstring""" import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class _a ( unittest.TestCase): def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str]=7 , _SCREAMING_SNAKE_CASE : Tuple=3 , _SCREAMING_SNAKE_CASE : Any=18 , _SCREAMING_SNAKE_CASE : str=30 , _SCREAMING_SNAKE_CASE : List[str]=400 , _SCREAMING_SNAKE_CASE : List[Any]=True , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : List[str]=True , )-> Any: lowerCAmelCase__ : int = size if size is not None else {'height': 18, 'width': 18} lowerCAmelCase__ : List[str] = parent lowerCAmelCase__ : Optional[Any] = batch_size lowerCAmelCase__ : Optional[int] = num_channels lowerCAmelCase__ : Dict = image_size lowerCAmelCase__ : int = min_resolution lowerCAmelCase__ : Tuple = max_resolution lowerCAmelCase__ : List[str] = do_resize lowerCAmelCase__ : Dict = size lowerCAmelCase__ : Dict = do_normalize def UpperCAmelCase__( self : List[str] )-> List[Any]: return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804], [-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class _a ( _lowercase , unittest.TestCase): _a : Optional[int] = ImageGPTImageProcessor if is_vision_available() else None def UpperCAmelCase__( self : Dict )-> Optional[Any]: lowerCAmelCase__ : Dict = ImageGPTImageProcessingTester(self ) @property def UpperCAmelCase__( self : str )-> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__( self : Any )-> Optional[int]: lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''clusters''' ) ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) def UpperCAmelCase__( self : List[Any] )-> Any: lowerCAmelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) lowerCAmelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def UpperCAmelCase__( self : Optional[Any] )-> int: lowerCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) lowerCAmelCase__ : Optional[int] = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(_A , obj[key] ) ) else: self.assertEqual(obj[key] , _A ) def UpperCAmelCase__( self : Any )-> Optional[int]: lowerCAmelCase__ : Dict = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ : List[Any] = os.path.join(_A , '''image_processor.json''' ) image_processor_first.to_json_file(_A ) lowerCAmelCase__ : Optional[Any] = self.image_processing_class.from_json_file(_A ).to_dict() lowerCAmelCase__ : Optional[int] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(_A , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , _A ) def UpperCAmelCase__( self : int )-> Tuple: lowerCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(_A ) lowerCAmelCase__ : Optional[int] = self.image_processing_class.from_pretrained(_A ).to_dict() lowerCAmelCase__ : Any = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(_A , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , _A ) @unittest.skip('''ImageGPT requires clusters at initialization''' ) def UpperCAmelCase__( self : Dict )-> int: pass def lowerCamelCase_ ( ): """simple docstring""" lowerCAmelCase__ : Tuple = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' ) lowerCAmelCase__ : List[str] = Image.open(dataset[4]['''file'''] ) lowerCAmelCase__ : List[Any] = Image.open(dataset[5]['''file'''] ) lowerCAmelCase__ : Any = [imagea, imagea] return images @require_vision @require_torch class _a ( unittest.TestCase): @slow def UpperCAmelCase__( self : Optional[Any] )-> Any: lowerCAmelCase__ : Optional[int] = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' ) lowerCAmelCase__ : Union[str, Any] = prepare_images() # test non-batched lowerCAmelCase__ : List[Any] = image_processing(images[0] , return_tensors='''pt''' ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1024) ) lowerCAmelCase__ : int = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() , _A ) # test batched lowerCAmelCase__ : List[str] = image_processing(_A , return_tensors='''pt''' ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1024) ) lowerCAmelCase__ : Any = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , _A )
357
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) lowerCamelCase = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = ['''BeitFeatureExtractor'''] lowerCamelCase = ['''BeitImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = [ '''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BeitForImageClassification''', '''BeitForMaskedImageModeling''', '''BeitForSemanticSegmentation''', '''BeitModel''', '''BeitPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = [ '''FlaxBeitForImageClassification''', '''FlaxBeitForMaskedImageModeling''', '''FlaxBeitModel''', '''FlaxBeitPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
211
0
def A ( ) -> Union[str, Any]: '''simple docstring''' for n in range(1 , 1_000_000 ): yield n * (n + 1) // 2 def A ( _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = 1 _UpperCAmelCase = 2 while i * i <= n: _UpperCAmelCase = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def A ( ) -> Tuple: '''simple docstring''' return next(i for i in triangle_number_generator() if count_divisors(_UpperCAmelCase ) > 500 ) if __name__ == "__main__": print(solution())
339
"""simple docstring""" import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase_ : Any = { """microsoft/conditional-detr-resnet-50""": ( """https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json""" ), } class __A ( _SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase = "conditional_detr" __lowerCAmelCase = ["past_key_values"] __lowerCAmelCase = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , __A=True , __A=None , __A=3 , __A=300 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.02 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=2 , __A=5 , __A=2 , __A=1 , __A=1 , __A=2 , __A=5 , __A=2 , __A=0.25 , **__A , ) -> List[Any]: if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) a =CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(__A , __A ): a =backbone_config.get('''model_type''' ) a =CONFIG_MAPPING[backbone_model_type] a =config_class.from_dict(__A ) a =use_timm_backbone a =backbone_config a =num_channels a =num_queries a =d_model a =encoder_ffn_dim a =encoder_layers a =encoder_attention_heads a =decoder_ffn_dim a =decoder_layers a =decoder_attention_heads a =dropout a =attention_dropout a =activation_dropout a =activation_function a =init_std a =init_xavier_std a =encoder_layerdrop a =decoder_layerdrop a =encoder_layers a =auxiliary_loss a =position_embedding_type a =backbone a =use_pretrained_backbone a =dilation # Hungarian matcher a =class_cost a =bbox_cost a =giou_cost # Loss coefficients a =mask_loss_coefficient a =dice_loss_coefficient a =cls_loss_coefficient a =bbox_loss_coefficient a =giou_loss_coefficient a =focal_alpha super().__init__(is_encoder_decoder=__A , **__A ) @property def SCREAMING_SNAKE_CASE ( self ) -> int: return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE ( self ) -> int: return self.d_model def SCREAMING_SNAKE_CASE ( self ) -> Tuple: a =copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: a =self.backbone_config.to_dict() a =self.__class__.model_type return output class __A ( _SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase = version.parse("1.11" ) @property def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def SCREAMING_SNAKE_CASE ( self ) -> float: return 1E-5 @property def SCREAMING_SNAKE_CASE ( self ) -> int: return 12
81
0
"""simple docstring""" from ... import PretrainedConfig __A : Dict = { 'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json', } class __UpperCamelCase ( _A ): SCREAMING_SNAKE_CASE = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP SCREAMING_SNAKE_CASE = "nezha" def __init__(self : Any , __SCREAMING_SNAKE_CASE : Optional[int]=2_1_1_2_8 , __SCREAMING_SNAKE_CASE : Dict=7_6_8 , __SCREAMING_SNAKE_CASE : Optional[int]=1_2 , __SCREAMING_SNAKE_CASE : Dict=1_2 , __SCREAMING_SNAKE_CASE : List[str]=3_0_7_2 , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=5_1_2 , __SCREAMING_SNAKE_CASE : Optional[Any]=6_4 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Tuple=0.0_2 , __SCREAMING_SNAKE_CASE : Dict=1E-12 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : Any=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : str=True , **__SCREAMING_SNAKE_CASE : Optional[Any] , ): super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_act A = intermediate_size A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = max_relative_position A = type_vocab_size A = initializer_range A = layer_norm_eps A = classifier_dropout A = use_cache
57
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() __A : int = logging.get_logger(__name__) __A : Optional[int] = [ ('bert.bert', 'visual_bert'), ('bert.cls', 'cls'), ('bert.classifier', 'cls'), ('token_type_embeddings_visual', 'visual_token_type_embeddings'), ('position_embeddings_visual', 'visual_position_embeddings'), ('projection', 'visual_projection'), ] __A : Union[str, Any] = [ 'nlvr2_coco_pre_trained.th', 'nlvr2_fine_tuned.th', 'nlvr2_pre_trained.th', 'vcr_coco_pre_train.th', 'vcr_fine_tune.th', 'vcr_pre_train.th', 'vqa_coco_pre_trained.th', 'vqa_fine_tuned.th', 'vqa_pre_trained.th', ] def __SCREAMING_SNAKE_CASE ( lowercase__ ): """simple docstring""" A = torch.load(lowercase__ , map_location="cpu" ) return sd def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__=rename_keys_prefix ): """simple docstring""" A = OrderedDict() A = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue A = key for name_pair in rename_keys_prefix: A = new_key.replace(name_pair[0] , name_pair[1] ) A = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately A = new_d["cls.predictions.bias"] return new_d @torch.no_grad() def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ): """simple docstring""" assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: A = "pretraining" if "vcr" in checkpoint_path: A = {"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: A = {"visual_embedding_dim": 2_048} elif "vqa" in checkpoint_path: A = {"visual_embedding_dim": 2_048} elif "nlvr" in checkpoint_path: A = {"visual_embedding_dim": 1_024} else: raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: A = {"visual_embedding_dim": 512} A = "multichoice" elif "vqa_advanced" in checkpoint_path: A = {"visual_embedding_dim": 2_048} A = "vqa_advanced" elif "vqa" in checkpoint_path: A = {"visual_embedding_dim": 2_048, "num_labels": 3_129} A = "vqa" elif "nlvr" in checkpoint_path: A = { "visual_embedding_dim": 1_024, "num_labels": 2, } A = "nlvr" A = VisualBertConfig(**lowercase__ ) # Load State Dict A = load_state_dict(lowercase__ ) A = get_new_dict(lowercase__ , lowercase__ ) if model_type == "pretraining": A = VisualBertForPreTraining(lowercase__ ) elif model_type == "vqa": A = VisualBertForQuestionAnswering(lowercase__ ) elif model_type == "nlvr": A = VisualBertForVisualReasoning(lowercase__ ) elif model_type == "multichoice": A = VisualBertForMultipleChoice(lowercase__ ) model.load_state_dict(lowercase__ ) # Save Checkpoints Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) model.save_pretrained(lowercase__ ) if __name__ == "__main__": __A : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.') parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.') __A : Any = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
57
1
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class __lowerCAmelCase ( _a ): def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , **__magic_name__ , ) -> List[Any]: '''simple docstring''' super().__init__(features=__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase , **__lowercase ) snake_case_ : Tuple = Sql( cache_dir=__lowercase , features=__lowercase , sql=__lowercase , con=__lowercase , **__lowercase , ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : List[Any] = None snake_case_ : Optional[Any] = None snake_case_ : Union[str, Any] = None snake_case_ : int = None self.builder.download_and_prepare( download_config=__lowercase , download_mode=__lowercase , verification_mode=__lowercase , base_path=__lowercase , ) # Build dataset for splits snake_case_ : List[Any] = self.builder.as_dataset( split='''train''' , verification_mode=__lowercase , in_memory=self.keep_in_memory ) return dataset class __lowerCAmelCase : def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ) -> int: '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' ) snake_case_ : Optional[int] = dataset snake_case_ : int = name snake_case_ : Optional[int] = con snake_case_ : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE snake_case_ : List[Any] = num_proc snake_case_ : str = to_sql_kwargs def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = self.to_sql_kwargs.pop('''sql''' , __lowercase ) snake_case_ : str = self.to_sql_kwargs.pop('''con''' , __lowercase ) snake_case_ : Dict = self.to_sql_kwargs.pop('''index''' , __lowercase ) snake_case_ : List[str] = self._write(index=__lowercase , **self.to_sql_kwargs ) return written def lowerCamelCase (self , __magic_name__ ) -> Optional[int]: '''simple docstring''' snake_case_ , snake_case_ , snake_case_ : Optional[int] = args snake_case_ : Optional[Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs snake_case_ : Tuple = query_table( table=self.dataset.data , key=slice(__lowercase , offset + self.batch_size ) , indices=self.dataset._indices , ) snake_case_ : List[Any] = batch.to_pandas() snake_case_ : int = df.to_sql(self.name , self.con , index=__lowercase , **__lowercase ) return num_rows or len(__lowercase ) def lowerCamelCase (self , __magic_name__ , **__magic_name__ ) -> int: '''simple docstring''' snake_case_ : List[Any] = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: snake_case_ , snake_case_ : int = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __lowercase , __lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += num_rows return written
279
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} UpperCAmelCase = { '''vocab_file''': { '''google/realm-cc-news-pretrained-embedder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-encoder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-scorer''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt''' ), '''google/realm-cc-news-pretrained-openqa''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt''' ), '''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''', '''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''', '''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''', '''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''', }, '''tokenizer_file''': { '''google/realm-cc-news-pretrained-embedder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont''' ), '''google/realm-cc-news-pretrained-encoder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json''' ), '''google/realm-cc-news-pretrained-scorer''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json''' ), '''google/realm-cc-news-pretrained-openqa''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json''' ), '''google/realm-orqa-nq-openqa''': ( '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json''' ), '''google/realm-orqa-nq-reader''': ( '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json''' ), '''google/realm-orqa-wq-openqa''': ( '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json''' ), '''google/realm-orqa-wq-reader''': ( '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json''' ), }, } UpperCAmelCase = { '''google/realm-cc-news-pretrained-embedder''': 512, '''google/realm-cc-news-pretrained-encoder''': 512, '''google/realm-cc-news-pretrained-scorer''': 512, '''google/realm-cc-news-pretrained-openqa''': 512, '''google/realm-orqa-nq-openqa''': 512, '''google/realm-orqa-nq-reader''': 512, '''google/realm-orqa-wq-openqa''': 512, '''google/realm-orqa-wq-reader''': 512, } UpperCAmelCase = { '''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True}, '''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-nq-reader''': {'''do_lower_case''': True}, '''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True}, '''google/realm-orqa-wq-reader''': {'''do_lower_case''': True}, } class lowerCAmelCase ( A ): lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = RealmTokenizer def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : int=None , __lowercase : List[Any]=True , __lowercase : Any="[UNK]" , __lowercase : Union[str, Any]="[SEP]" , __lowercase : Union[str, Any]="[PAD]" , __lowercase : Tuple="[CLS]" , __lowercase : List[Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : Union[str, Any]=None , **__lowercase : int , ): """simple docstring""" super().__init__( __lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , ) __lowercase =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , __lowercase ) != do_lower_case or normalizer_state.get('strip_accents' , __lowercase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , __lowercase ) != tokenize_chinese_chars ): __lowercase =getattr(__lowercase , normalizer_state.pop('type' ) ) __lowercase =do_lower_case __lowercase =strip_accents __lowercase =tokenize_chinese_chars __lowercase =normalizer_class(**__lowercase ) __lowercase =do_lower_case def snake_case ( self : List[str] , __lowercase : Optional[Any] , **__lowercase : Any ): """simple docstring""" __lowercase =PaddingStrategy.MAX_LENGTH __lowercase =text __lowercase =kwargs.pop('text_pair' , __lowercase ) __lowercase =kwargs.pop('return_tensors' , __lowercase ) __lowercase ={ 'input_ids': [], 'attention_mask': [], 'token_type_ids': [], } for idx, candidate_text in enumerate(__lowercase ): if batch_text_pair is not None: __lowercase =batch_text_pair[idx] else: __lowercase =None __lowercase =super().__call__(__lowercase , __lowercase , return_tensors=__lowercase , **__lowercase ) __lowercase =encoded_candidates.get('input_ids' ) __lowercase =encoded_candidates.get('attention_mask' ) __lowercase =encoded_candidates.get('token_type_ids' ) if encoded_input_ids is not None: output_data["input_ids"].append(__lowercase ) if encoded_attention_mask is not None: output_data["attention_mask"].append(__lowercase ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(__lowercase ) __lowercase ={key: item for key, item in output_data.items() if len(__lowercase ) != 0} return BatchEncoding(__lowercase , tensor_type=__lowercase ) def snake_case ( self : List[str] , __lowercase : Tuple , __lowercase : Optional[int]=None ): """simple docstring""" __lowercase =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case ( self : List[str] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ): """simple docstring""" __lowercase =[self.sep_token_id] __lowercase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case ( self : Dict , __lowercase : str , __lowercase : Optional[str] = None ): """simple docstring""" __lowercase =self._tokenizer.model.save(__lowercase , name=__lowercase ) return tuple(__lowercase )
141
0
'''simple docstring''' from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name def lowercase (_A ): """simple docstring""" if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(_A ): return ext raise Exception( f'Unable to determine file format from file extension {path}. ' f'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' ) def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Tuple = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) _lowerCAmelCase : List[Any] = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format _lowerCAmelCase : Dict = PipelineDataFormat.from_str( format=_A , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(_A , _A ) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = nlp _lowerCAmelCase : List[str] = reader @staticmethod def a ( snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = parser.add_parser('run' , help='Run a pipeline through the CLI' ) run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' ) run_parser.add_argument('--input' , type=snake_case__ , help='Path to the file to use for inference' ) run_parser.add_argument('--output' , type=snake_case__ , help='Path to the file that will be used post to write results.' ) run_parser.add_argument('--model' , type=snake_case__ , help='Name or path to the model to instantiate.' ) run_parser.add_argument('--config' , type=snake_case__ , help='Name or path to the model\'s config to instantiate.' ) run_parser.add_argument( '--tokenizer' , type=snake_case__ , help='Name of the tokenizer to use. (default: same as the model name)' ) run_parser.add_argument( '--column' , type=snake_case__ , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , ) run_parser.add_argument( '--format' , type=snake_case__ , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , ) run_parser.add_argument( '--device' , type=snake_case__ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , ) run_parser.add_argument('--overwrite' , action="https://huggingface.co/datasets/infinityofspace/python_codestyles-mixed1-500/viewer/default/store_true" , help='Allow overwriting the output file.' ) run_parser.set_defaults(func=snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : Any = self._nlp, [] for entry in self._reader: _lowerCAmelCase : Optional[int] = nlp(**snake_case__ ) if self._reader.is_multi_columns else nlp(snake_case__ ) if isinstance(snake_case__ , snake_case__ ): outputs.append(snake_case__ ) else: outputs += output # Saving data if self._nlp.binary_output: _lowerCAmelCase : str = self._reader.save_binary(snake_case__ ) logger.warning(F'Current pipeline requires output to be in binary format, saving at {binary_path}' ) else: self._reader.save(snake_case__ )
25
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Any = logging.get_logger(__name__) lowerCAmelCase : List[Any] = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "mvp" __magic_name__ = ["past_key_values"] __magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = max_position_embeddings _lowerCAmelCase : Optional[Any] = d_model _lowerCAmelCase : Optional[int] = encoder_ffn_dim _lowerCAmelCase : Optional[int] = encoder_layers _lowerCAmelCase : Any = encoder_attention_heads _lowerCAmelCase : Any = decoder_ffn_dim _lowerCAmelCase : Optional[Any] = decoder_layers _lowerCAmelCase : int = decoder_attention_heads _lowerCAmelCase : Union[str, Any] = dropout _lowerCAmelCase : List[Any] = attention_dropout _lowerCAmelCase : List[str] = activation_dropout _lowerCAmelCase : Optional[Any] = activation_function _lowerCAmelCase : Any = init_std _lowerCAmelCase : Any = encoder_layerdrop _lowerCAmelCase : Union[str, Any] = decoder_layerdrop _lowerCAmelCase : Optional[int] = classifier_dropout _lowerCAmelCase : List[Any] = use_cache _lowerCAmelCase : Optional[int] = encoder_layers _lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True _lowerCAmelCase : Optional[Any] = use_prompt _lowerCAmelCase : Optional[Any] = prompt_length _lowerCAmelCase : Any = prompt_mid_dim super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , ) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ): _lowerCAmelCase : Any = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' 'The config can simply be saved and uploaded again to be fixed.' )
25
1
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int _lowercase: Optional[int] = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class _lowercase ( datasets.BuilderConfig ): """simple docstring""" __A = None def a( A : Dict , A : Any , ) -> List[str]: """simple docstring""" import pyspark def generate_fn(): a = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) ) for partition_id in partition_order: a = df_with_partition_id.select("*" ).where(f'''part_id = {partition_id}''' ).drop("part_id" ) a = partition_df.collect() a = 0 for row in rows: yield f'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class _lowercase ( _BaseExamplesIterable ): """simple docstring""" def __init__(self , lowerCamelCase_ , lowerCamelCase_=None , ): """simple docstring""" a = df a = partition_order or range(self.df.rdd.getNumPartitions() ) a = _generate_iterable_examples(self.df , self.partition_order ) def __iter__(self ): """simple docstring""" yield from self.generate_examples_fn() def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" a = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(lowerCamelCase_ ) return SparkExamplesIterable(self.df , partition_order=lowerCamelCase_ ) def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" a = self.split_shard_indices_by_worker(lowerCamelCase_ , lowerCamelCase_ ) return SparkExamplesIterable(self.df , partition_order=lowerCamelCase_ ) @property def UpperCamelCase_ (self ): """simple docstring""" return len(self.partition_order ) class _lowercase ( datasets.DatasetBuilder ): """simple docstring""" __A = SparkConfig def __init__(self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ): """simple docstring""" import pyspark a = pyspark.sql.SparkSession.builder.getOrCreate() a = df a = working_dir super().__init__( cache_dir=lowerCamelCase_ , config_name=str(self.df.semanticHash() ) , **lowerCamelCase_ , ) def UpperCamelCase_ (self ): """simple docstring""" def create_cache_and_write_probe(lowerCamelCase_ ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=lowerCamelCase_ ) a = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(lowerCamelCase_ , "a" ) return [probe_file] if self._spark.conf.get("spark.master" , "" ).startswith("local" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: a = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowerCamelCase_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( "When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" ) def UpperCamelCase_ (self ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" import pyspark def get_arrow_batch_size(lowerCamelCase_ ): for batch in it: yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} ) a = self.df.count() a = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. a = ( self.df.limit(lowerCamelCase_ ) .repartition(1 ) .mapInArrow(lowerCamelCase_ , "batch_bytes: long" ) .agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) ) .collect()[0] .sample_bytes / sample_num_rows ) a = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. a = min(lowerCamelCase_ , int(approx_total_size / max_shard_size ) ) a = self.df.repartition(lowerCamelCase_ ) def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ): """simple docstring""" import pyspark a = ParquetWriter if file_format == """parquet""" else ArrowWriter a = os.path.join(self._working_dir , os.path.basename(lowerCamelCase_ ) ) if self._working_dir else fpath a = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. a = self.config.features a = self._writer_batch_size a = self._fs.storage_options def write_arrow(lowerCamelCase_ ): # Within the same SparkContext, no two task attempts will share the same attempt ID. a = pyspark.TaskContext().taskAttemptId() a = next(lowerCamelCase_ , lowerCamelCase_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , ) a = 0 a = writer_class( features=lowerCamelCase_ , path=working_fpath.replace("SSSSS" , F'''{shard_id:05d}''' ).replace("TTTTT" , F'''{task_id:05d}''' ) , writer_batch_size=lowerCamelCase_ , storage_options=lowerCamelCase_ , embed_local_files=lowerCamelCase_ , ) a = pa.Table.from_batches([first_batch] ) writer.write_table(lowerCamelCase_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: a = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , ) shard_id += 1 a = writer_class( features=writer._features , path=working_fpath.replace("SSSSS" , F'''{shard_id:05d}''' ).replace("TTTTT" , F'''{task_id:05d}''' ) , writer_batch_size=lowerCamelCase_ , storage_options=lowerCamelCase_ , embed_local_files=lowerCamelCase_ , ) a = pa.Table.from_batches([batch] ) writer.write_table(lowerCamelCase_ ) if writer._num_bytes > 0: a = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(lowerCamelCase_ ) ): a = os.path.join(os.path.dirname(lowerCamelCase_ ) , os.path.basename(lowerCamelCase_ ) ) shutil.move(lowerCamelCase_ , lowerCamelCase_ ) a = ( self.df.mapInArrow(lowerCamelCase_ , "task_id: long, num_examples: long, num_bytes: long" ) .groupBy("task_id" ) .agg( pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = "arrow" , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ): """simple docstring""" self._validate_cache_dir() a = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(lowerCamelCase_ ) a = not is_remote_filesystem(self._fs ) a = os.path.join if is_local else posixpath.join a = """-TTTTT-SSSSS-of-NNNNN""" a = F'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' a = path_join(self._output_dir , lowerCamelCase_ ) a = 0 a = 0 a = 0 a = [] a = [] for task_id, content in self._prepare_split_single(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): ( a ) = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(lowerCamelCase_ ) a = total_num_examples a = total_num_bytes # should rename everything at the end logger.debug(F'''Renaming {total_shards} shards.''' ) if total_shards > 1: a = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. a = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ): rename( lowerCamelCase_ , fpath.replace("SSSSS" , F'''{shard_id:05d}''' ).replace("TTTTT" , F'''{task_id:05d}''' ) , fpath.replace("TTTTT-SSSSS" , F'''{global_shard_id:05d}''' ).replace("NNNNN" , F'''{total_shards:05d}''' ) , ) a = [] a = 0 for i in range(len(lowerCamelCase_ ) ): a = task_id_and_num_shards[i] for shard_id in range(lowerCamelCase_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(lowerCamelCase_ , len(lowerCamelCase_ ) ).map(lambda lowerCamelCase_ : _rename_shard(*lowerCamelCase_ ) ).collect() else: # don't use any pattern a = 0 a = task_id_and_num_shards[0][0] self._rename( fpath.replace("SSSSS" , F'''{shard_id:05d}''' ).replace("TTTTT" , F'''{task_id:05d}''' ) , fpath.replace(lowerCamelCase_ , "" ) , ) def UpperCamelCase_ (self , lowerCamelCase_ , ): """simple docstring""" return SparkExamplesIterable(self.df )
227
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = StableDiffusionDiffEditPipeline lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'} lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'} lowercase = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase = frozenset([] ) def _lowercase( self ) -> Optional[int]: torch.manual_seed(0 ) UpperCAmelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A , ) UpperCAmelCase : int = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , ) UpperCAmelCase : List[Any] = DDIMInverseScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_zero=A , ) torch.manual_seed(0 ) UpperCAmelCase : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCAmelCase : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) UpperCAmelCase : Optional[Any] = CLIPTextModel(A ) UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCAmelCase : int = { """unet""": unet, """scheduler""": scheduler, """inverse_scheduler""": inverse_scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _lowercase( self , A , A=0 ) -> Optional[Any]: UpperCAmelCase : Any = floats_tensor((1, 16, 16) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : Any = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(A ) ).to(A ) if str(A ).startswith("""mps""" ): UpperCAmelCase : List[Any] = torch.manual_seed(A ) else: UpperCAmelCase : List[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : int = { """prompt""": """a dog and a newt""", """mask_image""": mask, """image_latents""": latents, """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self , A , A=0 ) -> Optional[int]: UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : Any = { """image""": image, """source_prompt""": """a cat and a frog""", """target_prompt""": """a dog and a newt""", """generator""": generator, """num_inference_steps""": 2, """num_maps_per_mask""": 2, """mask_encode_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self , A , A=0 ) -> str: UpperCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : str = { """image""": image, """prompt""": """a cat and a frog""", """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """decode_latents""": True, """output_type""": """numpy""", } return inputs def _lowercase( self ) -> List[Any]: if not hasattr(self.pipeline_class , """_optional_components""" ): return UpperCAmelCase : Dict = self.get_dummy_components() UpperCAmelCase : int = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(A , A , A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) UpperCAmelCase : Any = self.get_dummy_inputs(A ) UpperCAmelCase : Optional[Any] = pipe(**A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(A ) UpperCAmelCase : Dict = self.pipeline_class.from_pretrained(A ) pipe_loaded.to(A ) pipe_loaded.set_progress_bar_config(disable=A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(A , A ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , ) UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(A ) UpperCAmelCase : Tuple = pipe_loaded(**A )[0] UpperCAmelCase : Optional[Any] = np.abs(output - output_loaded ).max() self.assertLess(A , 1e-4 ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[int] = """cpu""" UpperCAmelCase : Optional[Any] = self.get_dummy_components() UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Union[str, Any] = self.get_dummy_mask_inputs(A ) UpperCAmelCase : List[Any] = pipe.generate_mask(**A ) UpperCAmelCase : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) UpperCAmelCase : Optional[int] = np.array([0] * 9 ) UpperCAmelCase : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = """cpu""" UpperCAmelCase : List[str] = self.get_dummy_components() UpperCAmelCase : Optional[Any] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Optional[int] = self.get_dummy_inversion_inputs(A ) UpperCAmelCase : List[str] = pipe.invert(**A ).images UpperCAmelCase : Union[str, Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase : Dict = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCAmelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) def _lowercase( self ) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def _lowercase( self ) -> int: UpperCAmelCase : List[Any] = """cpu""" UpperCAmelCase : int = self.get_dummy_components() UpperCAmelCase : List[Any] = {"""beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """beta_schedule""": """scaled_linear"""} UpperCAmelCase : int = DPMSolverMultistepScheduler(**A ) UpperCAmelCase : int = DPMSolverMultistepInverseScheduler(**A ) UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = self.get_dummy_inversion_inputs(A ) UpperCAmelCase : Any = pipe.invert(**A ).images UpperCAmelCase : Dict = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase : Any = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) @require_torch_gpu @slow class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def _lowercase( cls ) -> Dict: UpperCAmelCase : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" ) UpperCAmelCase : Union[str, Any] = raw_image.convert("""RGB""" ).resize((768, 768) ) UpperCAmelCase : List[str] = raw_image def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Dict = torch.manual_seed(0 ) UpperCAmelCase : int = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = """a bowl of fruit""" UpperCAmelCase : List[Any] = """a bowl of pears""" UpperCAmelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) UpperCAmelCase : Tuple = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A ).latents UpperCAmelCase : Any = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0] UpperCAmelCase : List[str] = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1 def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = torch.manual_seed(0 ) UpperCAmelCase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : int = """a bowl of fruit""" UpperCAmelCase : int = """a bowl of pears""" UpperCAmelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) UpperCAmelCase : Any = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A , num_inference_steps=25 , ).latents UpperCAmelCase : str = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0] UpperCAmelCase : Tuple = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1
265
0
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class __lowerCAmelCase ( UpperCAmelCase_ ): lowerCamelCase_ : Optional[Any] = """gptj""" lowerCamelCase_ : Tuple = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__(self , __magic_name__=5_0400 , __magic_name__=2048 , __magic_name__=4096 , __magic_name__=28 , __magic_name__=16 , __magic_name__=64 , __magic_name__=None , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=5_0256 , __magic_name__=5_0256 , __magic_name__=False , **__magic_name__ , ) -> Tuple: '''simple docstring''' snake_case_ : Any = vocab_size snake_case_ : Optional[int] = n_positions snake_case_ : Tuple = n_embd snake_case_ : int = n_layer snake_case_ : Any = n_head snake_case_ : Any = n_inner snake_case_ : Dict = rotary_dim snake_case_ : Tuple = activation_function snake_case_ : Optional[Any] = resid_pdrop snake_case_ : Any = embd_pdrop snake_case_ : List[str] = attn_pdrop snake_case_ : str = layer_norm_epsilon snake_case_ : List[Any] = initializer_range snake_case_ : Dict = use_cache snake_case_ : List[Any] = bos_token_id snake_case_ : Tuple = eos_token_id super().__init__( bos_token_id=__lowercase , eos_token_id=__lowercase , tie_word_embeddings=__lowercase , **__lowercase ) class __lowerCAmelCase ( UpperCAmelCase_ ): def __init__(self , __magic_name__ , __magic_name__ = "default" , __magic_name__ = None , __magic_name__ = False , ) -> Any: '''simple docstring''' super().__init__(__lowercase , task=__lowercase , patching_specs=__lowercase , use_past=__lowercase ) if not getattr(self._config , '''pad_token_id''' , __lowercase ): # TODO: how to do that better? snake_case_ : Tuple = 0 @property def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' snake_case_ : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(__lowercase , direction='''inputs''' ) snake_case_ : str = {0: '''batch''', 1: '''past_sequence + sequence'''} else: snake_case_ : Any = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def lowerCamelCase (self ) -> int: '''simple docstring''' return self._config.n_layer @property def lowerCamelCase (self ) -> int: '''simple docstring''' return self._config.n_head def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = super(__lowercase , self ).generate_dummy_inputs( __lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase ) # We need to order the input in the way they appears in the forward() snake_case_ : int = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch snake_case_ : str = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values snake_case_ : List[str] = seqlen + 2 snake_case_ : Union[str, Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) snake_case_ : Tuple = [ (torch.zeros(__lowercase ), torch.zeros(__lowercase )) for _ in range(self.num_layers ) ] snake_case_ : Tuple = common_inputs['''attention_mask'''] if self.use_past: snake_case_ : Tuple = ordered_inputs['''attention_mask'''].dtype snake_case_ : Optional[Any] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(__lowercase , __lowercase , dtype=__lowercase )] , dim=1 ) return ordered_inputs @property def lowerCamelCase (self ) -> int: '''simple docstring''' return 13
367
from math import isclose, sqrt def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> tuple[float, float, float]: """simple docstring""" snake_case_ : Dict = point_y / 4 / point_x snake_case_ : List[str] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) snake_case_ : Union[str, Any] = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) snake_case_ : Tuple = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 snake_case_ : Union[str, Any] = outgoing_gradient**2 + 4 snake_case_ : Tuple = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) snake_case_ : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100 snake_case_ : Dict = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) snake_case_ : Optional[int] = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point snake_case_ : Any = x_minus if isclose(_UpperCamelCase , _UpperCamelCase ) else x_plus snake_case_ : int = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def lowerCamelCase_ ( _UpperCamelCase = 1.4 , _UpperCamelCase = -9.6 ) -> int: """simple docstring""" snake_case_ : int = 0 snake_case_ : float = first_x_coord snake_case_ : float = first_y_coord snake_case_ : float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): snake_case_ , snake_case_ , snake_case_ : str = next_point(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(F'''{solution() = }''')
279
0
"""simple docstring""" def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ): _validate_point(UpperCamelCase_ ) _validate_point(UpperCamelCase_ ) if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(a - b ) for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ) ) ) def _lowerCAmelCase ( UpperCamelCase_ ): if point: if isinstance(UpperCamelCase_ , UpperCamelCase_ ): for item in point: if not isinstance(UpperCamelCase_ , (int, float) ): __SCREAMING_SNAKE_CASE = ( """Expected a list of numbers as input, found """ f"{type(UpperCamelCase_ ).__name__}" ) raise TypeError(UpperCamelCase_ ) else: __SCREAMING_SNAKE_CASE = f"Expected a list of numbers as input, found {type(UpperCamelCase_ ).__name__}" raise TypeError(UpperCamelCase_ ) else: raise ValueError("""Missing an input""" ) def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ): _validate_point(UpperCamelCase_ ) _validate_point(UpperCamelCase_ ) if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(x - y ) for x, y in zip(UpperCamelCase_ , UpperCamelCase_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
100
"""simple docstring""" from __future__ import annotations from fractions import Fraction def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> bool: return ( num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den ) def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[str]: lowercase__: str = [] lowercase__: str = 1_1 lowercase__: str = int('''1''' + '''0''' * digit_len ) for num in range(__UpperCAmelCase , __UpperCAmelCase ): while den <= 9_9: if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0): if is_digit_cancelling(__UpperCAmelCase , __UpperCAmelCase ): solutions.append(F"""{num}/{den}""" ) den += 1 num += 1 lowercase__: Dict = 1_0 return solutions def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 2 ) -> int: lowercase__: List[str] = 1.0 for fraction in fraction_list(__UpperCAmelCase ): lowercase__: List[str] = Fraction(__UpperCAmelCase ) result *= frac.denominator / frac.numerator return int(__UpperCAmelCase ) if __name__ == "__main__": print(solution())
177
0
"""simple docstring""" import warnings from contextlib import contextmanager from ....processing_utils import ProcessorMixin class a ( a__ ): snake_case__ = '''MCTCTFeatureExtractor''' snake_case__ = '''AutoTokenizer''' def __init__( self , _snake_case , _snake_case ): """simple docstring""" super().__init__(_snake_case , _snake_case ) lowerCAmelCase = self.feature_extractor lowerCAmelCase = False def __call__( self , *_snake_case , **_snake_case ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*_snake_case , **_snake_case ) if "raw_speech" in kwargs: warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' ) lowerCAmelCase = kwargs.pop('raw_speech' ) else: lowerCAmelCase = kwargs.pop('audio' , _snake_case ) lowerCAmelCase = kwargs.pop('sampling_rate' , _snake_case ) lowerCAmelCase = kwargs.pop('text' , _snake_case ) if len(_snake_case ) > 0: lowerCAmelCase = args[0] lowerCAmelCase = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if audio is not None: lowerCAmelCase = self.feature_extractor(_snake_case , *_snake_case , sampling_rate=_snake_case , **_snake_case ) if text is not None: lowerCAmelCase = self.tokenizer(_snake_case , **_snake_case ) if text is None: return inputs elif audio is None: return encodings else: lowerCAmelCase = encodings['input_ids'] return inputs def UpperCamelCase__ ( self , *_snake_case , **_snake_case ): """simple docstring""" return self.tokenizer.batch_decode(*_snake_case , **_snake_case ) def UpperCamelCase__ ( self , *_snake_case , **_snake_case ): """simple docstring""" if self._in_target_context_manager: return self.current_processor.pad(*_snake_case , **_snake_case ) lowerCAmelCase = kwargs.pop('input_features' , _snake_case ) lowerCAmelCase = kwargs.pop('labels' , _snake_case ) if len(_snake_case ) > 0: lowerCAmelCase = args[0] lowerCAmelCase = args[1:] if input_features is not None: lowerCAmelCase = self.feature_extractor.pad(_snake_case , *_snake_case , **_snake_case ) if labels is not None: lowerCAmelCase = self.tokenizer.pad(_snake_case , **_snake_case ) if labels is None: return input_features elif input_features is None: return labels else: lowerCAmelCase = labels['input_ids'] return input_features def UpperCamelCase__ ( self , *_snake_case , **_snake_case ): """simple docstring""" return self.tokenizer.decode(*_snake_case , **_snake_case ) @contextmanager def UpperCamelCase__ ( self ): """simple docstring""" warnings.warn( '`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ' 'labels by using the argument `text` of the regular `__call__` method (either in the same call as ' 'your audio inputs, or in a separate call.' ) lowerCAmelCase = True lowerCAmelCase = self.tokenizer yield lowerCAmelCase = self.feature_extractor lowerCAmelCase = False
309
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class a : def __init__( self ): """simple docstring""" lowerCAmelCase = '' lowerCAmelCase = '' lowerCAmelCase = [] lowerCAmelCase = 0 lowerCAmelCase = 2_56 lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 0 def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = cva.imread(_snake_case , 0 ) lowerCAmelCase = copy.deepcopy(self.img ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' ) lowerCAmelCase = np.sum(_snake_case ) for i in range(len(_snake_case ) ): lowerCAmelCase = x[i] / self.k self.sk += prk lowerCAmelCase = (self.L - 1) * self.sk if self.rem != 0: lowerCAmelCase = int(last % last ) lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(_snake_case ) lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size ) lowerCAmelCase = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCAmelCase = self.img[j][i] if num != self.last_list[num]: lowerCAmelCase = self.last_list[num] cva.imwrite('output_data/output.jpg' , self.img ) def UpperCamelCase__ ( self ): """simple docstring""" plt.hist(self.img.ravel() , 2_56 , [0, 2_56] ) def UpperCamelCase__ ( self ): """simple docstring""" cva.imshow('Output-Image' , self.img ) cva.imshow('Input-Image' , self.original_image ) cva.waitKey(50_00 ) cva.destroyAllWindows() if __name__ == "__main__": __UpperCamelCase : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') __UpperCamelCase : List[Any] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
309
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Optional[int] = { "microsoft/trocr-base-handwritten": ( "https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class A__ ( snake_case__ ): """simple docstring""" __magic_name__ = 'trocr' __magic_name__ = ['past_key_values'] __magic_name__ = { 'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'decoder_layers', } def __init__( self , __snake_case=5_0_2_6_5 , __snake_case=1_0_2_4 , __snake_case=1_2 , __snake_case=1_6 , __snake_case=4_0_9_6 , __snake_case="gelu" , __snake_case=5_1_2 , __snake_case=0.1 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=2 , __snake_case=0.02 , __snake_case=0.0 , __snake_case=True , __snake_case=False , __snake_case=True , __snake_case=True , __snake_case=1 , __snake_case=0 , __snake_case=2 , **__snake_case , ): snake_case = vocab_size snake_case = d_model snake_case = decoder_layers snake_case = decoder_attention_heads snake_case = decoder_ffn_dim snake_case = activation_function snake_case = max_position_embeddings snake_case = dropout snake_case = attention_dropout snake_case = activation_dropout snake_case = init_std snake_case = decoder_layerdrop snake_case = use_cache snake_case = scale_embedding snake_case = use_learned_position_embeddings snake_case = layernorm_embedding super().__init__( pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , decoder_start_token_id=__snake_case , **__snake_case , )
127
def UpperCAmelCase__ (UpperCamelCase_ = 4_00_00_00 ): """simple docstring""" snake_case = [0, 1] snake_case = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 snake_case = 0 for j in range(len(UpperCamelCase_ ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f'''{solution() = }''')
127
1
from __future__ import annotations __UpperCamelCase : List[str] = tuple[int, int, int] __UpperCamelCase : Optional[Any] = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase __UpperCamelCase : Optional[Any] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # -------------------------- default selection -------------------------- # rotors -------------------------- __UpperCamelCase : int = "EGZWVONAHDCLFQMSIPJBYUKXTR" __UpperCamelCase : List[Any] = "FOBHMDKEXQNRAULPGSJVTYICZW" __UpperCamelCase : Dict = "ZJXESIUQLHAVRMDOYGTNFWPBKC" # reflector -------------------------- __UpperCamelCase : str = { "A": "N", "N": "A", "B": "O", "O": "B", "C": "P", "P": "C", "D": "Q", "Q": "D", "E": "R", "R": "E", "F": "S", "S": "F", "G": "T", "T": "G", "H": "U", "U": "H", "I": "V", "V": "I", "J": "W", "W": "J", "K": "X", "X": "K", "L": "Y", "Y": "L", "M": "Z", "Z": "M", } # -------------------------- extra rotors -------------------------- __UpperCamelCase : Optional[int] = "RMDJXFUWGISLHVTCQNKYPBEZOA" __UpperCamelCase : Dict = "SGLCPQWZHKXAREONTFBVIYJUDM" __UpperCamelCase : Optional[Any] = "HVSICLTYKQUBXDWAJZOMFGPREN" __UpperCamelCase : List[Any] = "RZWQHFMVDBKICJLNTUXAGYPSOE" __UpperCamelCase : Optional[Any] = "LFKIJODBEGAMQPXVUHYSTCZRWN" __UpperCamelCase : Optional[Any] = "KOAEGVDHXPQZMLFTYWJNBRCIUS" def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]: # Checks if there are 3 unique rotors if (unique_rotsel := len(set(__lowerCamelCase ) )) < 3: a = f'Please use 3 unique rotors (not {unique_rotsel})' raise Exception(__lowerCamelCase ) # Checks if rotor positions are valid a , a , a = rotpos if not 0 < rotorposa <= len(__lowerCamelCase ): a = f'First rotor position is not within range of 1..26 ({rotorposa}' raise ValueError(__lowerCamelCase ) if not 0 < rotorposa <= len(__lowerCamelCase ): a = f'Second rotor position is not within range of 1..26 ({rotorposa})' raise ValueError(__lowerCamelCase ) if not 0 < rotorposa <= len(__lowerCamelCase ): a = f'Third rotor position is not within range of 1..26 ({rotorposa})' raise ValueError(__lowerCamelCase ) # Validates string and returns dict a = _plugboard(__lowerCamelCase ) return rotpos, rotsel, pbdict def __A ( __lowerCamelCase ) -> dict[str, str]: # tests the input string if it # a) is type string # b) has even length (so pairs can be made) if not isinstance(__lowerCamelCase , __lowerCamelCase ): a = f'Plugboard setting isn\'t type string ({type(__lowerCamelCase )})' raise TypeError(__lowerCamelCase ) elif len(__lowerCamelCase ) % 2 != 0: a = f'Odd number of symbols ({len(__lowerCamelCase )})' raise Exception(__lowerCamelCase ) elif pbstring == "": return {} pbstring.replace(""" """ , """""" ) # Checks if all characters are unique a = set() for i in pbstring: if i not in abc: a = f'\'{i}\' not in list of symbols' raise Exception(__lowerCamelCase ) elif i in tmppbl: a = f'Duplicate symbol ({i})' raise Exception(__lowerCamelCase ) else: tmppbl.add(__lowerCamelCase ) del tmppbl # Created the dictionary a = {} for j in range(0 , len(__lowerCamelCase ) - 1 , 2 ): a = pbstring[j + 1] a = pbstring[j] return pb def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = (rotora, rotora, rotora) , __lowerCamelCase = "" , ) -> str: a = text.upper() a , a , a = _validator( __lowerCamelCase , __lowerCamelCase , plugb.upper() ) a , a , a = rotor_position a , a , a = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 a = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: a = plugboard[symbol] # rotor ra -------------------------- a = abc.index(__lowerCamelCase ) + rotorposa a = rotora[index % len(__lowerCamelCase )] # rotor rb -------------------------- a = abc.index(__lowerCamelCase ) + rotorposa a = rotora[index % len(__lowerCamelCase )] # rotor rc -------------------------- a = abc.index(__lowerCamelCase ) + rotorposa a = rotora[index % len(__lowerCamelCase )] # reflector -------------------------- # this is the reason you don't need another machine to decipher a = reflector[symbol] # 2nd rotors a = abc[rotora.index(__lowerCamelCase ) - rotorposa] a = abc[rotora.index(__lowerCamelCase ) - rotorposa] a = abc[rotora.index(__lowerCamelCase ) - rotorposa] # 2nd plugboard if symbol in plugboard: a = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(__lowerCamelCase ): a = 0 rotorposa += 1 if rotorposa >= len(__lowerCamelCase ): a = 0 rotorposa += 1 if rotorposa >= len(__lowerCamelCase ): a = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(__lowerCamelCase ) return "".join(__lowerCamelCase ) if __name__ == "__main__": __UpperCamelCase : List[Any] = "This is my Python script that emulates the Enigma machine from WWII." __UpperCamelCase : Union[str, Any] = (1, 1, 1) __UpperCamelCase : List[Any] = "pictures" __UpperCamelCase : Tuple = (rotora, rotora, rotora) __UpperCamelCase : str = enigma(message, rotor_pos, rotor_sel, pb) print("Encrypted message:", en) print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
347
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = (IPNDMScheduler,) UpperCamelCase__ = (('''num_inference_steps''', 50),) def lowerCamelCase__ ( self :Any , **__magic_name__ :Optional[Any] ): '''simple docstring''' a = {"""num_train_timesteps""": 1000} config.update(**__magic_name__ ) return config def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple=0 , **__magic_name__ :Optional[int] ): '''simple docstring''' a = dict(self.forward_default_kwargs ) a = kwargs.pop("""num_inference_steps""" , __magic_name__ ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__magic_name__ ) a = scheduler_class(**__magic_name__ ) scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residuals a = dummy_past_residuals[:] if time_step is None: a = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__magic_name__ ) a = scheduler_class.from_pretrained(__magic_name__ ) new_scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residuals a = dummy_past_residuals[:] a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[Any]=0 , **__magic_name__ :Any ): '''simple docstring''' a = dict(self.forward_default_kwargs ) a = kwargs.pop("""num_inference_steps""" , __magic_name__ ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__magic_name__ ) scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residuals (must be after setting timesteps) a = dummy_past_residuals[:] if time_step is None: a = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__magic_name__ ) a = scheduler_class.from_pretrained(__magic_name__ ) # copy over dummy past residuals new_scheduler.set_timesteps(__magic_name__ ) # copy over dummy past residual (must be after setting timesteps) a = dummy_past_residuals[:] a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowerCamelCase__ ( self :Optional[Any] , **__magic_name__ :Optional[int] ): '''simple docstring''' a = self.scheduler_classes[0] a = self.get_scheduler_config(**__magic_name__ ) a = scheduler_class(**__magic_name__ ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter scheduler.set_timesteps(__magic_name__ ) for i, t in enumerate(scheduler.timesteps ): a = model(__magic_name__ , __magic_name__ ) a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample for i, t in enumerate(scheduler.timesteps ): a = model(__magic_name__ , __magic_name__ ) a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample return sample def lowerCamelCase__ ( self :str ): '''simple docstring''' a = dict(self.forward_default_kwargs ) a = kwargs.pop("""num_inference_steps""" , __magic_name__ ) for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__magic_name__ ) a = self.dummy_sample a = 0.1 * sample if num_inference_steps is not None and hasattr(__magic_name__ , """set_timesteps""" ): scheduler.set_timesteps(__magic_name__ ) elif num_inference_steps is not None and not hasattr(__magic_name__ , """set_timesteps""" ): a = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] a = dummy_past_residuals[:] a = scheduler.timesteps[5] a = scheduler.timesteps[6] a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=__magic_name__ , time_step=__magic_name__ ) def lowerCamelCase__ ( self :Dict ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=__magic_name__ , time_step=__magic_name__ ) def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = self.full_loop() a = torch.mean(torch.abs(__magic_name__ ) ) assert abs(result_mean.item() - 254_0529 ) < 10
347
1
'''simple docstring''' from math import pi def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ): return 2 * pi * radius * (angle / 3_6_0) if __name__ == "__main__": print(arc_length(90, 10))
83
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _snake_case = logging.get_logger(__name__) _snake_case = {'vocab_file': 'spiece.model'} _snake_case = { 'vocab_file': { 'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model', } } class UpperCamelCase ( snake_case_ ): def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : int="<sep>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : Any="<cls>" , UpperCAmelCase__ : Optional[Any]="<mask>" , UpperCAmelCase__ : int=["<eop>", "<eod>"] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : List[str] , ) -> None: _a : Optional[int] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token _a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , ) _a : Optional[Any] = 3 _a : Tuple = do_lower_case _a : Tuple = remove_space _a : Tuple = keep_accents _a : Tuple = vocab_file _a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase__ ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( """You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """ """See https://pypi.org/project/jieba/ for installation.""" ) _a : int = jieba _a : Tuple = str.maketrans(""" \n""" , """\u2582\u2583""" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def _lowercase ( self : Optional[Any] ) -> Any: return len(self.sp_model ) def _lowercase ( self : str ) -> Union[str, Any]: _a : int = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ) -> List[str]: _a : Tuple = self.__dict__.copy() _a : Tuple = None return state def __setstate__( self : Any , UpperCAmelCase__ : Dict ) -> Dict: _a : Tuple = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _a : Tuple = {} _a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowercase ( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Dict: if self.remove_space: _a : Optional[int] = """ """.join(inputs.strip().split() ) else: _a : List[Any] = inputs _a : int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: _a : Optional[Any] = unicodedata.normalize("""NFKD""" , UpperCAmelCase__ ) _a : Dict = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] ) if self.do_lower_case: _a : Union[str, Any] = outputs.lower() return outputs def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]: _a : str = self.preprocess_text(UpperCAmelCase__ ) _a : Dict = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ ) _a : Union[str, Any] = [] for piece in pieces: if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): _a : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _a : Dict = cur_pieces[1:] else: _a : Any = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCAmelCase__ ) else: new_pieces.append(UpperCAmelCase__ ) return new_pieces def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> int: return self.sp_model.PieceToId(UpperCAmelCase__ ) def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Any: return self.sp_model.IdToPiece(UpperCAmelCase__ ) def _lowercase ( self : Any , UpperCAmelCase__ : Any ) -> Dict: _a : Dict = """""".join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , """ """ ).strip() return out_string def _lowercase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]: _a : Optional[Any] = [self.sep_token_id] _a : Dict = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowercase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ ) if token_ids_a is not None: return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] return ([0] * len(UpperCAmelCase__ )) + [1, 1] def _lowercase ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]: _a : Any = [self.sep_token_id] _a : Optional[Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(UpperCAmelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _a : Union[str, Any] = os.path.join( UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase__ , """wb""" ) as fi: _a : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase__ ) return (out_vocab_file,) def _lowercase ( self : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str] ) -> List[str]: _a : Tuple = super()._decode(*UpperCAmelCase__ , **UpperCAmelCase__ ) _a : Optional[Any] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" ) return text
294
0
_A = "Alexander Joslin" import operator as op from .stack import Stack def lowercase_ ( A__ ) -> int: """simple docstring""" snake_case = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub} snake_case = Stack() snake_case = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(A__ ) ) elif i in operators: # RULE 2 operator_stack.push(A__ ) elif i == ")": # RULE 4 snake_case = operator_stack.peek() operator_stack.pop() snake_case = operand_stack.peek() operand_stack.pop() snake_case = operand_stack.peek() operand_stack.pop() snake_case = operators[opr](A__ , A__ ) operand_stack.push(A__ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": _A = "(5 + ((4 * 2) * (2 + 3)))" # answer = 45 print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
137
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class lowerCamelCase ( unittest.TestCase ): def UpperCAmelCase(self : Tuple ) -> List[str]: snake_case = ["a", "b", "c"] # Defaults to last layer if both are None snake_case , snake_case = get_aligned_output_features_output_indices(_A , _A , _A ) self.assertEqual(_A , ["c"] ) self.assertEqual(_A , [2] ) # Out indices set to match out features snake_case , snake_case = get_aligned_output_features_output_indices(["a", "c"] , _A , _A ) self.assertEqual(_A , ["a", "c"] ) self.assertEqual(_A , [0, 2] ) # Out features set to match out indices snake_case , snake_case = get_aligned_output_features_output_indices(_A , [0, 2] , _A ) self.assertEqual(_A , ["a", "c"] ) self.assertEqual(_A , [0, 2] ) # Out features selected from negative indices snake_case , snake_case = get_aligned_output_features_output_indices(_A , [-3, -1] , _A ) self.assertEqual(_A , ["a", "c"] ) self.assertEqual(_A , [-3, -1] ) def UpperCAmelCase(self : Optional[int] ) -> str: # Stage names must be set with self.assertRaises(_A ): verify_out_features_out_indices(["a", "b"] , (0, 1) , _A ) # Out features must be a list with self.assertRaises(_A ): verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] ) # Out features must be a subset of stage names with self.assertRaises(_A ): verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] ) # Out indices must be a list or tuple with self.assertRaises(_A ): verify_out_features_out_indices(_A , 0 , ["a", "b"] ) # Out indices must be a subset of stage names with self.assertRaises(_A ): verify_out_features_out_indices(_A , (0, 1) , ["a"] ) # Out features and out indices must be the same length with self.assertRaises(_A ): verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] ) # Out features should match out indices with self.assertRaises(_A ): verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] ) # Out features and out indices should be in order with self.assertRaises(_A ): verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] ) # Check passes with valid inputs verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] ) def UpperCAmelCase(self : List[str] ) -> str: snake_case = BackboneMixin() snake_case = ["a", "b", "c"] snake_case = ["a", "c"] snake_case = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly snake_case = ["a", "b"] self.assertEqual(backbone.out_features , ["a", "b"] ) self.assertEqual(backbone.out_indices , [0, 1] ) snake_case = [-3, -1] self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [-3, -1] )
137
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class lowerCamelCase__ : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ): """simple docstring""" snake_case : str = parent snake_case : Optional[int] = 13 snake_case : Optional[Any] = 7 snake_case : Optional[int] = True snake_case : str = True snake_case : List[str] = True snake_case : Optional[Any] = True snake_case : Dict = 99 snake_case : str = 32 snake_case : List[Any] = 2 snake_case : Any = 4 snake_case : str = 37 snake_case : Dict = "gelu" snake_case : Optional[Any] = 0.1 snake_case : List[str] = 0.1 snake_case : List[str] = 512 snake_case : str = 16 snake_case : Union[str, Any] = 2 snake_case : Union[str, Any] = 0.02 snake_case : Any = 3 snake_case : Dict = 4 snake_case : List[Any] = None def lowerCamelCase_ ( self ): """simple docstring""" snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case : Union[str, Any] = None if self.use_input_mask: snake_case : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) snake_case : Optional[int] = None if self.use_token_type_ids: snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case : List[str] = None snake_case : List[Any] = None snake_case : Dict = None if self.use_labels: snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case : Any = ids_tensor([self.batch_size] , self.num_choices ) snake_case : List[str] = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=SCREAMING_SNAKE_CASE , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" snake_case : Dict = TFRoFormerModel(config=SCREAMING_SNAKE_CASE ) snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} snake_case : str = [input_ids, input_mask] snake_case : Any = model(SCREAMING_SNAKE_CASE ) snake_case : Tuple = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" snake_case : List[str] = True snake_case : Tuple = TFRoFormerForCausalLM(config=SCREAMING_SNAKE_CASE ) snake_case : Any = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } snake_case : List[str] = model(SCREAMING_SNAKE_CASE )["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" snake_case : List[str] = TFRoFormerForMaskedLM(config=SCREAMING_SNAKE_CASE ) snake_case : List[str] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } snake_case : Tuple = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" snake_case : Any = self.num_labels snake_case : Dict = TFRoFormerForSequenceClassification(config=SCREAMING_SNAKE_CASE ) snake_case : str = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" snake_case : Tuple = self.num_choices snake_case : List[str] = TFRoFormerForMultipleChoice(config=SCREAMING_SNAKE_CASE ) snake_case : int = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) snake_case : str = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) snake_case : int = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) snake_case : List[Any] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" snake_case : int = self.num_labels snake_case : Any = TFRoFormerForTokenClassification(config=SCREAMING_SNAKE_CASE ) snake_case : Dict = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } snake_case : List[Any] = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" snake_case : Optional[int] = TFRoFormerForQuestionAnswering(config=SCREAMING_SNAKE_CASE ) snake_case : int = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } snake_case : Dict = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Optional[int] = self.prepare_config_and_inputs() ( ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ) : Any = config_and_inputs snake_case : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class lowerCamelCase__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): a__ : List[Any] = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) a__ : Union[str, Any] = ( { """feature-extraction""": TFRoFormerModel, """fill-mask""": TFRoFormerForMaskedLM, """question-answering""": TFRoFormerForQuestionAnswering, """text-classification""": TFRoFormerForSequenceClassification, """text-generation""": TFRoFormerForCausalLM, """token-classification""": TFRoFormerForTokenClassification, """zero-shot""": TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) a__ : str = False a__ : Dict = False def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def lowerCamelCase_ ( self ): """simple docstring""" snake_case : List[Any] = TFRoFormerModelTester(self ) snake_case : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 ) def lowerCamelCase_ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*SCREAMING_SNAKE_CASE ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE ) @slow def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Dict = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_tf class lowerCamelCase__ ( unittest.TestCase ): @slow def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Optional[Any] = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) snake_case : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] ) snake_case : Dict = model(SCREAMING_SNAKE_CASE )[0] # TODO Replace vocab size snake_case : Dict = 50_000 snake_case : Optional[int] = [1, 6, vocab_size] self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. snake_case : List[str] = tf.constant( [ [ [-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46], [-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07], [-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) @require_tf class lowerCamelCase__ ( unittest.TestCase ): a__ : Optional[int] = 1e-4 def lowerCamelCase_ ( self ): """simple docstring""" snake_case : str = tf.constant([[4, 10]] ) snake_case : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) snake_case : Dict = emba(input_ids.shape ) snake_case : int = tf.constant( [[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] ) tf.debugging.assert_near(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=self.tolerance ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : int = tf.constant( [ [0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00], [0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17], [0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70], ] ) snake_case : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 ) emba([2, 16, 512] ) snake_case : Dict = emba.weight[:3, :5] tf.debugging.assert_near(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=self.tolerance ) @require_tf class lowerCamelCase__ ( unittest.TestCase ): a__ : List[str] = 1e-4 def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Dict = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 snake_case : str = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 snake_case : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) snake_case : Any = embed_positions([2, 16, 768] )[None, None, :, :] snake_case , snake_case : Optional[Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) snake_case : Union[str, Any] = tf.constant( [ [0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00], [-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43], [-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85], [-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71], [0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80], [3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53], ] ) snake_case : List[str] = tf.constant( [ [0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00], [0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43], [1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85], [2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71], [-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80], [-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , SCREAMING_SNAKE_CASE , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , SCREAMING_SNAKE_CASE , atol=self.tolerance )
148
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class lowerCamelCase__ ( lowerCamelCase_ , unittest.TestCase ): a__ : Union[str, Any] = XLMRobertaTokenizer a__ : Optional[int] = XLMRobertaTokenizerFast a__ : List[str] = True a__ : List[Any] = True def lowerCamelCase_ ( self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing snake_case : Any = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : List[Any] = "<pad>" snake_case : Tuple = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 1_002 ) def lowerCamelCase_ ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_002 ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : str = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE ) snake_case : Optional[Any] = tokenizer.tokenize("This is a test" ) self.assertListEqual(SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) snake_case : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) snake_case : Optional[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) self.assertListEqual( SCREAMING_SNAKE_CASE , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ) self.assertListEqual( SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def lowerCamelCase_ ( self ): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return snake_case : List[str] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) snake_case : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) snake_case : Tuple = tempfile.mkdtemp() snake_case : Tuple = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE ) snake_case : List[str] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) snake_case : Dict = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f ) self.assertSequenceEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Checks everything loads correctly in the same way snake_case : int = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE ) snake_case : Tuple = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(SCREAMING_SNAKE_CASE ) # Save tokenizer rust, legacy_format=True snake_case : Tuple = tempfile.mkdtemp() snake_case : str = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE ) snake_case : str = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE ) # Checks it save with the same files self.assertSequenceEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Checks everything loads correctly in the same way snake_case : Dict = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE ) snake_case : Dict = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) shutil.rmtree(SCREAMING_SNAKE_CASE ) # Save tokenizer rust, legacy_format=False snake_case : List[str] = tempfile.mkdtemp() snake_case : List[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE ) snake_case : Union[str, Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE ) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way snake_case : Optional[Any] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE ) snake_case : Union[str, Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) shutil.rmtree(SCREAMING_SNAKE_CASE ) @cached_property def lowerCamelCase_ ( self ): """simple docstring""" return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" ) def lowerCamelCase_ ( self ): """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(SCREAMING_SNAKE_CASE , f.name ) snake_case : int = XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE ) snake_case : Tuple = pickle.dumps(SCREAMING_SNAKE_CASE ) pickle.loads(SCREAMING_SNAKE_CASE ) def lowerCamelCase_ ( self ): """simple docstring""" if not self.test_rust_tokenizer: return snake_case : Union[str, Any] = self.get_tokenizer() snake_case : Dict = self.get_rust_tokenizer() snake_case : Optional[Any] = "I was born in 92000, and this is falsé." snake_case : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE ) snake_case : Union[str, Any] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) snake_case : str = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) snake_case : str = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) snake_case : Union[str, Any] = self.get_rust_tokenizer() snake_case : Tuple = tokenizer.encode(SCREAMING_SNAKE_CASE ) snake_case : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def lowerCamelCase_ ( self ): """simple docstring""" snake_case : int = "Hello World!" snake_case : Optional[Any] = [0, 35_378, 6_661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE ) ) @slow def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Dict = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) snake_case : Dict = [ 0, 3_293, 83, 10, 4_552, 4_989, 7_986, 678, 10, 5_915, 111, 179_459, 124_850, 4, 6_044, 237, 12, 6, 5, 6, 4, 6_780, 705, 15, 1_388, 44, 378, 10_114, 711, 152, 20, 6, 5, 22_376, 642, 1_221, 15_190, 34_153, 450, 5_608, 959, 1_119, 57_702, 136, 186, 47, 1_098, 29_367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6_044, 237, 6_284, 50_901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE ) ) @slow def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Any = {"input_ids": [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
148
1
import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin _UpperCAmelCase : Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right _UpperCAmelCase : Tuple = 25_6047 _UpperCAmelCase : Optional[Any] = 25_6145 @require_sentencepiece @require_tokenizers class lowercase ( __snake_case , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Tuple = NllbTokenizer __SCREAMING_SNAKE_CASE : Any = NllbTokenizerFast __SCREAMING_SNAKE_CASE : Optional[Any] = True __SCREAMING_SNAKE_CASE : Tuple = True __SCREAMING_SNAKE_CASE : List[Any] = {} def a ( self ): super().setUp() # We have a SentencePiece fixture for testing snake_case_ = NllbTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self ): snake_case_ = NllbTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ ) snake_case_ = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowerCamelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) snake_case_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) snake_case_ = tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) snake_case_ = tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def a ( self ): snake_case_ = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) snake_case_ = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) snake_case_ = tempfile.mkdtemp() snake_case_ = tokenizer_r.save_pretrained(lowerCamelCase_ ) snake_case_ = tokenizer_p.save_pretrained(lowerCamelCase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) snake_case_ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ ) # Checks everything loads correctly in the same way snake_case_ = tokenizer_r.from_pretrained(lowerCamelCase_ ) snake_case_ = tokenizer_p.from_pretrained(lowerCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) ) shutil.rmtree(lowerCamelCase_ ) # Save tokenizer rust, legacy_format=True snake_case_ = tempfile.mkdtemp() snake_case_ = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ ) snake_case_ = tokenizer_p.save_pretrained(lowerCamelCase_ ) # Checks it save with the same files self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ ) # Checks everything loads correctly in the same way snake_case_ = tokenizer_r.from_pretrained(lowerCamelCase_ ) snake_case_ = tokenizer_p.from_pretrained(lowerCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) ) shutil.rmtree(lowerCamelCase_ ) # Save tokenizer rust, legacy_format=False snake_case_ = tempfile.mkdtemp() snake_case_ = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ ) snake_case_ = tokenizer_p.save_pretrained(lowerCamelCase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way snake_case_ = tokenizer_r.from_pretrained(lowerCamelCase_ ) snake_case_ = tokenizer_p.from_pretrained(lowerCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) ) shutil.rmtree(lowerCamelCase_ ) @require_torch def a ( self ): if not self.test_seqaseq: return snake_case_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Longer text that will definitely require truncation. snake_case_ = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for""" """ Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons""" """ will only worsen the violence and misery for millions of people.""", ] snake_case_ = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al""" """ Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi""" """ că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] try: snake_case_ = tokenizer.prepare_seqaseq_batch( src_texts=lowerCamelCase_ , tgt_texts=lowerCamelCase_ , max_length=3 , max_target_length=10 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 10 ) # max_target_length will default to max_length if not specified snake_case_ = tokenizer.prepare_seqaseq_batch( lowerCamelCase_ , tgt_texts=lowerCamelCase_ , max_length=3 , return_tensors='pt' ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 3 ) snake_case_ = tokenizer.prepare_seqaseq_batch( src_texts=lowerCamelCase_ , max_length=3 , max_target_length=10 , return_tensors='pt' ) self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 ) self.assertNotIn('decoder_input_ids' , lowerCamelCase_ ) @unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' ) def a ( self ): pass def a ( self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = [AddedToken('<special>' , lstrip=lowerCamelCase_ )] snake_case_ = self.rust_tokenizer_class.from_pretrained( lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ ) snake_case_ = tokenizer_r.encode('Hey this is a <special> token' ) snake_case_ = tokenizer_r.encode('<special>' , add_special_tokens=lowerCamelCase_ )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: snake_case_ = self.rust_tokenizer_class.from_pretrained( lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , ) snake_case_ = self.tokenizer_class.from_pretrained( lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ ) snake_case_ = tokenizer_p.encode('Hey this is a <special> token' ) snake_case_ = tokenizer_cr.encode('Hey this is a <special> token' ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): __SCREAMING_SNAKE_CASE : Tuple = "facebook/nllb-200-distilled-600M" __SCREAMING_SNAKE_CASE : Tuple = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] __SCREAMING_SNAKE_CASE : List[Any] = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] __SCREAMING_SNAKE_CASE : List[str] = [ 256_047, 16_297, 134_408, 8_165, 248_066, 14_734, 950, 1_135, 105_721, 3_573, 83, 27_352, 108, 49_486, 2, ] @classmethod def a ( cls ): snake_case_ = NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' ) snake_case_ = 1 return cls def a ( self ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 25_6001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 25_6002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 25_6057 ) def a ( self ): snake_case_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ ) def a ( self ): self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids ) # fmt: off snake_case_ = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047] # fmt: on snake_case_ = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) snake_case_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ ) def a ( self ): snake_case_ = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , lowerCamelCase_ ) snake_case_ = 10 snake_case_ = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0] self.assertEqual(ids[-1] , 2 ) self.assertEqual(ids[0] , lowerCamelCase_ ) self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ ) def a ( self ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_6203, 3] ) def a ( self ): snake_case_ = tempfile.mkdtemp() snake_case_ = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCamelCase_ ) snake_case_ = NllbTokenizer.from_pretrained(lowerCamelCase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ ) @require_torch def a ( self ): snake_case_ = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) snake_case_ = shift_tokens_right( batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual((2, 15) , batch.input_ids.shape ) self.assertEqual((2, 15) , batch.attention_mask.shape ) snake_case_ = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def a ( self ): snake_case_ = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors='pt' ) snake_case_ = self.tokenizer( text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors='pt' ) snake_case_ = targets["""input_ids"""] snake_case_ = shift_tokens_right( lowerCamelCase_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def a ( self ): snake_case_ = self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' ) self.assertEqual( nested_simplify(lowerCamelCase_ ) , { # A, test, EOS, en_XX 'input_ids': [[25_6047, 70, 7356, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 25_6057, } , ) @require_torch def a ( self ): snake_case_ = True snake_case_ = self.tokenizer( 'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' ) self.assertEqual( inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] ) snake_case_ = False snake_case_ = self.tokenizer( 'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' ) self.assertEqual( inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
365
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _UpperCAmelCase : Dict = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[int] = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Any = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Any = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys _UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
200
0
class A__ : """simple docstring""" def __init__( self , lowercase) -> None: '''simple docstring''' a__ : Optional[Any] = len(lowercase) a__ : Tuple = [0] * len_array if len_array > 0: a__ : List[Any] = array[0] for i in range(1 , lowercase): a__ : List[str] = self.prefix_sum[i - 1] + array[i] def __lowercase ( self , lowercase , lowercase) -> int: '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def __lowercase ( self , lowercase) -> bool: '''simple docstring''' a__ : Union[str, Any] = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(lowercase) return False if __name__ == "__main__": import doctest doctest.testmod()
99
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def A_ ( A__ ) -> Tuple: # A local function to see if a dot lands in the circle. def is_in_circle(A__ , A__ ) -> bool: a__ : List[str] = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle a__ : List[str] = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(A__ ) ) # The ratio of the area for circle to square is pi/4. a__ : Optional[Any] = proportion * 4 print(F'The estimated value of pi is {pi_estimate}' ) print(F'The numpy value of pi is {pi}' ) print(F'The total error is {abs(pi - pi_estimate )}' ) def A_ ( A__ , A__ , A__ = 0.0 , A__ = 1.0 , ) -> float: return mean( function_to_integrate(uniform(A__ , A__ ) ) for _ in range(A__ ) ) * (max_value - min_value) def A_ ( A__ , A__ = 0.0 , A__ = 1.0 ) -> None: def identity_function(A__ ) -> float: return x a__ : List[Any] = area_under_curve_estimator( A__ , A__ , A__ , A__ ) a__ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2 print('******************' ) print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' ) print(F'Estimated value is {estimated_value}' ) print(F'Expected value is {expected_value}' ) print(F'Total error is {abs(estimated_value - expected_value )}' ) print('******************' ) def A_ ( A__ ) -> None: def function_to_integrate(A__ ) -> float: return sqrt(4.0 - x * x ) a__ : Dict = area_under_curve_estimator( A__ , A__ , 0.0 , 2.0 ) print('******************' ) print('Estimating pi using area_under_curve_estimator' ) print(F'Estimated value is {estimated_value}' ) print(F'Expected value is {pi}' ) print(F'Total error is {abs(estimated_value - pi )}' ) print('******************' ) if __name__ == "__main__": import doctest doctest.testmod()
99
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _UpperCAmelCase : int = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[int] = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : int = ["""LayoutLMv2FeatureExtractor"""] _UpperCAmelCase : Any = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : List[Any] = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys _UpperCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
365
from collections import deque class lowerCAmelCase : def __init__( self : str , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : int ) -> None: lowerCamelCase__ : Optional[int] = process_name # process name lowerCamelCase__ : Optional[int] = arrival_time # arrival time of the process # completion time of finished process or last interrupted time lowerCamelCase__ : str = arrival_time lowerCamelCase__ : List[Any] = burst_time # remaining burst time lowerCamelCase__ : Any = 0 # total time of the process wait in ready queue lowerCamelCase__ : Tuple = 0 # time from arrival time to completion time class lowerCAmelCase : def __init__( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : list[int] , UpperCAmelCase : deque[Process] , UpperCAmelCase : int , ) -> None: # total number of mlfq's queues lowerCamelCase__ : Optional[int] = number_of_queues # time slice of queues that round robin algorithm applied lowerCamelCase__ : List[str] = time_slices # unfinished process is in this ready_queue lowerCamelCase__ : List[str] = queue # current time lowerCamelCase__ : Optional[Any] = current_time # finished process is in this sequence queue lowerCamelCase__ : deque[Process] = deque() def A_ ( self : Tuple ) -> list[str]: lowerCamelCase__ : Union[str, Any] = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def A_ ( self : Tuple , UpperCAmelCase : list[Process] ) -> list[int]: lowerCamelCase__ : Tuple = [] for i in range(len(UpperCAmelCase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def A_ ( self : Union[str, Any] , UpperCAmelCase : list[Process] ) -> list[int]: lowerCamelCase__ : int = [] for i in range(len(UpperCAmelCase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def A_ ( self : Optional[int] , UpperCAmelCase : list[Process] ) -> list[int]: lowerCamelCase__ : Tuple = [] for i in range(len(UpperCAmelCase ) ): completion_times.append(queue[i].stop_time ) return completion_times def A_ ( self : str , UpperCAmelCase : deque[Process] ) -> list[int]: return [q.burst_time for q in queue] def A_ ( self : int , UpperCAmelCase : Process ) -> int: process.waiting_time += self.current_time - process.stop_time return process.waiting_time def A_ ( self : Optional[int] , UpperCAmelCase : deque[Process] ) -> deque[Process]: lowerCamelCase__ : deque[Process] = deque() # sequence deque of finished process while len(UpperCAmelCase ) != 0: lowerCamelCase__ : List[Any] = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(UpperCAmelCase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 lowerCamelCase__ : Optional[int] = 0 # set the process's turnaround time because it is finished lowerCamelCase__ : Union[str, Any] = self.current_time - cp.arrival_time # set the completion time lowerCamelCase__ : Any = self.current_time # add the process to queue that has finished queue finished.append(UpperCAmelCase ) self.finish_queue.extend(UpperCAmelCase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def A_ ( self : str , UpperCAmelCase : deque[Process] , UpperCAmelCase : int ) -> tuple[deque[Process], deque[Process]]: lowerCamelCase__ : deque[Process] = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(UpperCAmelCase ) ): lowerCamelCase__ : Dict = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(UpperCAmelCase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time lowerCamelCase__ : List[str] = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(UpperCAmelCase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished lowerCamelCase__ : Any = 0 # set the finish time lowerCamelCase__ : int = self.current_time # update the process' turnaround time because it is finished lowerCamelCase__ : Dict = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(UpperCAmelCase ) self.finish_queue.extend(UpperCAmelCase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def A_ ( self : Dict ) -> deque[Process]: # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): lowerCamelCase__ , lowerCamelCase__ : Any = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest _UpperCAmelCase : List[str] = Process("""P1""", 0, 53) _UpperCAmelCase : Union[str, Any] = Process("""P2""", 0, 17) _UpperCAmelCase : int = Process("""P3""", 0, 68) _UpperCAmelCase : str = Process("""P4""", 0, 24) _UpperCAmelCase : Optional[int] = 3 _UpperCAmelCase : Optional[Any] = [17, 25] _UpperCAmelCase : Optional[int] = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])}) _UpperCAmelCase : Tuple = Process("""P1""", 0, 53) _UpperCAmelCase : Any = Process("""P2""", 0, 17) _UpperCAmelCase : Any = Process("""P3""", 0, 68) _UpperCAmelCase : List[Any] = Process("""P4""", 0, 24) _UpperCAmelCase : List[str] = 3 _UpperCAmelCase : Optional[int] = [17, 25] _UpperCAmelCase : Optional[int] = deque([Pa, Pa, Pa, Pa]) _UpperCAmelCase : Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0) _UpperCAmelCase : Dict = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F"""waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print completion times of processes(P1, P2, P3, P4) print( F"""completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print total turnaround times of processes(P1, P2, P3, P4) print( F"""turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print sequence of finished processes print( F"""sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}""" )
45
0
"""simple docstring""" def lowercase_ ( _snake_case ): if a < 0: raise ValueError("""Input value must be a positive integer""" ) elif isinstance(_snake_case ,_snake_case ): raise TypeError("""Input value must be a 'int' type""" ) return bin(_snake_case ).count("""1""" ) if __name__ == "__main__": import doctest doctest.testmod()
25
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCamelCase ( lowercase ): UpperCAmelCase : Optional[Any] = ["""image_processor""", """tokenizer"""] UpperCAmelCase : Dict = """CLIPImageProcessor""" UpperCAmelCase : Dict = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""") def __init__(self : Union[str, Any] , _A : Dict=None , _A : Tuple=None , **_A : Optional[int]) -> Optional[int]: __snake_case : str = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , _A , ) __snake_case : List[Any] = kwargs.pop('feature_extractor') __snake_case : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(_A , _A) def __call__(self : Dict , _A : Tuple=None , _A : Optional[int]=None , _A : Tuple=None , **_A : Any) -> int: if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.') if text is not None: __snake_case : List[str] = self.tokenizer(_A , return_tensors=_A , **_A) if images is not None: __snake_case : Any = self.image_processor(_A , return_tensors=_A , **_A) if text is not None and images is not None: __snake_case : Any = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A) , tensor_type=_A) def _lowercase (self : List[Any] , *_A : Dict , **_A : int) -> int: return self.tokenizer.batch_decode(*_A , **_A) def _lowercase (self : List[Any] , *_A : Union[str, Any] , **_A : Tuple) -> Optional[Any]: return self.tokenizer.decode(*_A , **_A) @property def _lowercase (self : Union[str, Any]) -> List[Any]: __snake_case : Dict = self.tokenizer.model_input_names __snake_case : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
172
0
from __future__ import annotations from fractions import Fraction def lowerCamelCase ( a_ , a_ ) -> bool: return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def lowerCamelCase ( a_ ) -> list[str]: lowerCAmelCase_ = [] lowerCAmelCase_ = 11 lowerCAmelCase_ = int('1' + '0' * digit_len ) for num in range(a_ , a_ ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(a_ , a_ ): solutions.append(F'''{num}/{den}''' ) den += 1 num += 1 lowerCAmelCase_ = 10 return solutions def lowerCamelCase ( a_ = 2 ) -> int: lowerCAmelCase_ = 1.0 for fraction in fraction_list(a_ ): lowerCAmelCase_ = Fraction(a_ ) result *= frac.denominator / frac.numerator return int(a_ ) if __name__ == "__main__": print(solution())
352
import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor lowerCamelCase_ = logging.get_logger(__name__) class a_ ( a_ ): '''simple docstring''' def __init__( self , *lowercase_ , **lowercase_ ) -> None: '''simple docstring''' warnings.warn( 'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PoolFormerImageProcessor instead.' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
14
0