ComponentSpec:\n \"\"\"Returns double straight.\n Args:\n component: for cutback.\n cross_section: specification (CrossSection, string or dict).\n port1: name of first optical port.\n port2: name of second optical port.\n straight_length: length of straight.\n kwargs: cross_section settings.\n \"\"\"\n xs = gf.get_cross_section(cross_section, **kwargs)\n METHOD_NAME = gf.Component()\n straight_component = straight(\n length=straight_length or xs.radius * 2, cross_section=xs\n )\n straight_component2 = straight(\n length=straight_length or xs.radius * 2, cross_section=xs\n )\n straight_r = METHOD_NAME << straight_component\n straight_r2 = METHOD_NAME << straight_component2.mirror((1, 0))\n straight_r2 = straight_r2.move(\n origin=(0, 0),\n destination=(0, -component.ports[port1].y + component.ports[port2].y),\n )\n METHOD_NAME.add_port(\"o1\", port=straight_r.ports[\"o1\"])\n METHOD_NAME.add_port(\"o2\", port=straight_r2.ports[\"o1\"])\n METHOD_NAME.add_port(\"o3\", port=straight_r2.ports[\"o2\"])\n METHOD_NAME.add_port(\"o4\", port=straight_r.ports[\"o2\"])\n return METHOD_NAME"},"ids":{"kind":"list like","value":[9590,2152],"string":"[\n 9590,\n 2152\n]"}}},{"rowIdx":61,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(self):\n \"\"\" BaseDirectory with no existence check accepts any pathlib path.\n \"\"\"\n foo = SimpleBaseDirectory()\n foo.path = pathlib.Path(\"!!!\")\n self.assertIsInstance(foo.path, str)"},"ids":{"kind":"list like","value":[9,53,1186,2147,11771],"string":"[\n 9,\n 53,\n 1186,\n 2147,\n 11771\n]"}}},{"rowIdx":62,"cells":{"text":{"kind":"string","value":"def METHOD_NAME():\n fmt = \"\"\"\n\t\t# comments are allowed\n\t\t> # big endian (see documentation for struct)\n\t\t# empty lines are allowed:\n\t\tashort: h\n\t\talong: l\n\t\tabyte: b\t# a byte\n\t\tachar: c\n\t\tastr: 5s\n\t\tafloat: f; adouble: d\t# multiple \"statements\" are allowed\n\t\tafixed: 16.16F\n\t\tabool: ?\n\t\tapad: x\n\t\"\"\"\n print(\"size:\", calcsize(fmt))\n class foo(object):\n pass\n i = foo()\n i.ashort = 0x7FFF\n i.along = 0x7FFFFFFF\n i.abyte = 0x7F\n i.achar = \"a\"\n i.astr = \"12345\"\n i.afloat = 0.5\n i.adouble = 0.5\n i.afixed = 1.5\n i.abool = True\n data = pack(fmt, i)\n print(\"data:\", repr(data))\n print(unpack(fmt, data))\n i2 = foo()\n unpack(fmt, data, i2)\n print(vars(i2))"},"ids":{"kind":"list like","value":[9],"string":"[\n 9\n]"}}},{"rowIdx":63,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(tmp_path):\n outfilename = tmp_path / \"vu_tide_hourly_p0.dfs0\"\n ds = mikeio.read(\"tests/testdata/vu_tide_hourly.dfs1\")\n assert ds.n_elements > 1\n ds_0 = ds.isel(0, axis=\"space\")\n assert ds_0.n_elements == 1\n ds_0_0 = ds_0.isel(0)\n assert ds_0_0.n_timesteps == 1\n ds_0_0.to_dfs(outfilename)\n dsnew = mikeio.read(outfilename)\n assert dsnew.n_timesteps == 1"},"ids":{"kind":"list like","value":[9,1472,1669,61,97,367,2085],"string":"[\n 9,\n 1472,\n 1669,\n 61,\n 97,\n 367,\n 2085\n]"}}},{"rowIdx":64,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(self):\n self.window.show_all()\n self.window.present()"},"ids":{"kind":"list like","value":[697],"string":"[\n 697\n]"}}},{"rowIdx":65,"cells":{"text":{"kind":"string","value":"async def METHOD_NAME(\n auth: AcaPyAuth = Depends(acapy_auth),"},"ids":{"kind":"list like","value":[129,1837],"string":"[\n 129,\n 1837\n]"}}},{"rowIdx":66,"cells":{"text":{"kind":"string","value":"def METHOD_NAME():\n session = requests.Session()\n make_session_public_only(session, 'demo_domain', src="https://huggingface.co/datasets/Mlxa/testing")\n return session"},"ids":{"kind":"list like","value":[0,1,240],"string":"[\n 0,\n 1,\n 240\n]"}}},{"rowIdx":67,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(self):\n self.assertEqual(build_password(\"plain\"), \"plaintext:plain\")"},"ids":{"kind":"list like","value":[9,235,11129],"string":"[\n 9,\n 235,\n 11129\n]"}}},{"rowIdx":68,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(\n user_id: str\n) -> List[learner_group_domain.LearnerGroup]:\n \"\"\"Returns a list of learner groups of the given facilitator.\n Args:\n user_id: str. The id of the facilitator.\n Returns:\n list(LearnerGroup). A list of learner groups of the given facilitator.\n \"\"\"\n learner_grp_models = (\n learner_group_models.LearnerGroupModel.get_by_facilitator_id(user_id))\n if not learner_grp_models:\n return []\n return [\n learner_group_services.get_learner_group_from_model(model)\n for model in learner_grp_models\n ]"},"ids":{"kind":"list like","value":[19,5916,861,47,-1],"string":"[\n 19,\n 5916,\n 861,\n 47,\n -1\n]"}}},{"rowIdx":69,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(self, value: Optional[float]) -> None:\n \"\"\"When not draining we pass thru to the socket,\n since when draining we control the timeout.\n \"\"\"\n if value is not None:\n self._recv_timeout_sec = value\n if self._drain_thread is None:\n socket.socket.METHOD_NAME(self, value)"},"ids":{"kind":"list like","value":[4247],"string":"[\n 4247\n]"}}},{"rowIdx":70,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(\n self,\n description: str,\n params: Mapping[str, Any],\n url: bool | None = False,\n provider: ExternalProviders | None = None,\n) -> str:\n if self.user:\n name = self.user.name or self.user.email\n else:\n name = \"Sentry\"\n issue_name = self.group.qualified_short_id or \"an issue\"\n if url and self.group.qualified_short_id:\n group_url = self.group.get_absolute_url(params={\"referrer\": \"activity_notification\"})\n issue_name = f\"{self.format_url(text=self.group.qualified_short_id, url=group_url, provider=provider)}\"\n context = {\"author\": name, \"an issue\": issue_name}\n context.update(params)\n return description.format(**context)"},"ids":{"kind":"list like","value":[1067,947,526],"string":"[\n 1067,\n 947,\n 526\n]"}}},{"rowIdx":71,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(self, native_face):\n self._face = native_face\n self._loops = [RhinoBrepLoop(loop) for loop in native_face.Loops]\n self._surface = RhinoNurbsSurface.from_rhino(self._face.UnderlyingSurface().ToNurbsSurface())"},"ids":{"kind":"list like","value":[0,4805],"string":"[\n 0,\n 4805\n]"}}},{"rowIdx":72,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(self, user):\n return self.get_for_user(user, teammembership__role=TeamMembership.ROLE.OWNER)"},"ids":{"kind":"list like","value":[19,2013,6969],"string":"[\n 19,\n 2013,\n 6969\n]"}}},{"rowIdx":73,"cells":{"text":{"kind":"string","value":"def METHOD_NAME():\n column = BigqueryColumn(\n name=\"date\",\n field_path=\"date\",\n ordinal_position=1,\n data_type=\"TIMESTAMP\",\n is_partition_column=True,\n cluster_column_position=None,\n comment=None,\n is_nullable=False,\n )\n partition_info = PartitionInfo(type=\"DAY\", field=\"date\", column=column)\n profiler = BigqueryProfiler(config=BigQueryV2Config(), report=BigQueryV2Report())\n test_table = BigqueryTable(\n name=\"test_table\",\n comment=\"test_comment\",\n rows_count=1,\n size_in_bytes=1,\n last_altered=datetime.now(timezone.utc),\n created=datetime.now(timezone.utc),\n partition_info=partition_info,\n max_partition_id=\"20200101\",\n )\n query = profiler.generate_partition_profiler_query(\n project=\"test_project\",\n schema=\"test_dataset\",\n table=test_table,\n )\n expected_query = \"\"\""},"ids":{"kind":"list like","value":[9,567,1724,1816,2312,7275,539],"string":"[\n 9,\n 567,\n 1724,\n 1816,\n 2312,\n 7275,\n 539\n]"}}},{"rowIdx":74,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(self, inputs, metric, functional_metric, ref_metric, ignore_index):\n \"\"\"Test functional implementation of metric.\"\"\"\n preds, target = inputs\n if ignore_index is not None:\n target = inject_ignore_index(target, ignore_index)\n self.run_functional_metric_test(\n preds=preds,\n target=target,\n metric_functional=functional_metric,\n reference_metric=partial(_sklearn_ranking, fn=ref_metric, ignore_index=ignore_index),\n metric_args={\n \"num_labels\": NUM_CLASSES,\n \"ignore_index\": ignore_index,\n },\n )"},"ids":{"kind":"list like","value":[9,9585,4510,4167],"string":"[\n 9,\n 9585,\n 4510,\n 4167\n]"}}},{"rowIdx":75,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(self, positions: TensorType[\"bs\":..., 3]) -> TensorType[\"bs\":..., 1]:\n \"\"\"Returns only the density. Used primarily with the density grid.\n Args:\n positions: the origin of the samples/frustums\n \"\"\"\n # Need to figure out a better way to descibe positions with a ray.\n ray_samples = RaySamples(\n frustums=Frustums(\n origins=positions,\n directions=torch.ones_like(positions),\n starts=torch.zeros_like(positions[..., :1]),\n ends=torch.zeros_like(positions[..., :1]),\n pixel_area=torch.ones_like(positions[..., :1]),\n )\n )\n density, _ = self.get_density(ray_samples)\n return density"},"ids":{"kind":"list like","value":[2915,667],"string":"[\n 2915,\n 667\n]"}}},{"rowIdx":76,"cells":{"text":{"kind":"string","value":" METHOD_NAME(self, old_name, new_name, merge=False):"},"ids":{"kind":"list like","value":[1887,2010],"string":"[\n 1887,\n 2010\n]"}}},{"rowIdx":77,"cells":{"text":{"kind":"string","value":"def METHOD_NAME():\n examinee = create_upgrade_pr(\n from_ref=cm.ComponentReference(\n name='c1',\n componentName='c1',\n version='1.2.3',\n ),\n to_ref=cm.ComponentReference(\n name='c1',\n componentName='c1',\n version='2.0.0',\n ),\n )\n cref = cm.ComponentReference(\n name='c1',\n componentName='c1',\n version='6.0.0',\n )\n reference_component = cm.Component(\n name='c1',\n version='6.6.6',\n repositoryContexts=(),\n provider=None,\n sources=(),\n resources=(),\n componentReferences=()\n )\n # test with reference component not declaring this dependency\n assert not examinee.is_obsolete(reference_component=reference_component)\n # add differently-named dependency with greater version\n reference_component.componentReferences = (\n dataclasses.replace(cref, componentName='other-name'),\n )\n assert not examinee.is_obsolete(reference_component=reference_component)\n # add same-named web dependency with lesser version\n reference_component.componentReferences = (\n dataclasses.replace(cref, version='0.0.1'),\n )\n assert not examinee.is_obsolete(reference_component=reference_component)\n # add same-named resource of greater version but different type\n # todo: we should actually also test dependencies towards resources of two different types\n reference_component.resources = (\n cm.Resource(\n name='c1',\n version='6.0.0',\n type=cm.ArtefactType.BLOB,\n access=None,\n ),\n )\n assert not examinee.is_obsolete(reference_component=reference_component)\n # finally, add greater dependency of matching type and name\n reference_component.componentReferences = (\n dataclasses.replace(cref, version='9.9.9'),\n )\n assert examinee.is_obsolete(reference_component=reference_component)"},"ids":{"kind":"list like","value":[9,137,8439],"string":"[\n 9,\n 137,\n 8439\n]"}}},{"rowIdx":78,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(testsystem_names, niterations=5):\n \"\"\"\n Run sampler stack on named test systems.\n Parameters\n ----------\n testsystem_names : list of str\n Names of test systems to run\n niterations : int, optional, default=5\n Number of iterations to run\n \"\"\"\n for testsystem_name in testsystem_names:\n import perses.tests.testsystems\n testsystem_class = getattr(perses.tests.testsystems, testsystem_name)\n # Instantiate test system.\n testsystem = testsystem_class()\n # Test MCMCSampler samplers.\n for environment in testsystem.environments:\n mcmc_sampler = testsystem.mcmc_samplers[environment]\n f = partial(mcmc_sampler.run, niterations)\n f.description = \"Testing MCMC sampler with %s '%s'\" % (testsystem_name, environment)\n yield f\n # Test ExpandedEnsembleSampler samplers.\n for environment in testsystem.environments:\n exen_sampler = testsystem.exen_samplers[environment]\n f = partial(exen_sampler.run, niterations)\n f.description = \"Testing expanded ensemble sampler with %s '%s'\" % (testsystem_name, environment)\n yield f\n # Test SAMSSampler samplers.\n for environment in testsystem.environments:\n sams_sampler = testsystem.sams_samplers[environment]\n f = partial(sams_sampler.run, niterations)\n f.description = \"Testing SAMS sampler with %s '%s'\" % (testsystem_name, environment)\n yield f\n # Test MultiTargetDesign sampler, if present.\n if hasattr(testsystem, 'designer') and (testsystem.designer is not None):\n f = partial(testsystem.designer.run, niterations)\n f.description = \"Testing MultiTargetDesign sampler with %s transfer free energy from vacuum -> %s\" % (testsystem_name, environment)\n yield f"},"ids":{"kind":"list like","value":[22,17407],"string":"[\n 22,\n 17407\n]"}}},{"rowIdx":79,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(self) -> Response:\n \"\"\"\n Get a list with all of the tabels in TDEngine\n \"\"\"\n q = 'SHOW TABLES;'\n return self.native_query(q)"},"ids":{"kind":"list like","value":[19,2253],"string":"[\n 19,\n 2253\n]"}}},{"rowIdx":80,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(\n self,\n configs: List[Config[ModelConfig]],\n performances: List[Performance],\n) -> None:\n super().METHOD_NAME(configs, performances)\n # We need to sort by dataset to have the same ordering for each model config\n ordering = np.argsort([c.dataset.name() for c in configs])\n performance_df = Performance.to_dataframe(performances)\n # Extract all metrics\n metric_map = defaultdict(list)\n for i in ordering:\n metric_map[configs[i].model].append(\n performance_df.iloc[i][self.objectives].to_numpy(), # type: ignore\n )\n # Build the properties\n self.metrics = np.stack(list(metric_map.values()), axis=1)\n self.model_indices = {model: i for i, model in enumerate(metric_map)}\n # If we are in the multi-objective setting, we have to apply dataset-level quantile\n # normalization of each objective. Otherwise, we perform standardization.\n if not self.enforce_single_objective and len(self.objectives) > 1:\n transformer = QuantileTransformer(\n n_quantiles=min(1000, self.metrics.shape[0])\n )\n self.metrics = np.stack(\n [\n transformer.fit_transform(dataset_metrics)\n for dataset_metrics in self.metrics\n ]\n )\n else:\n transformer = StandardScaler()\n self.metrics = np.stack(\n [\n transformer.fit_transform(dataset_metrics)\n for dataset_metrics in self.metrics\n ]\n )"},"ids":{"kind":"list like","value":[90],"string":"[\n 90\n]"}}},{"rowIdx":81,"cells":{"text":{"kind":"string","value":"async def METHOD_NAME(mock_iam_client):\n group = await get_group(EXAMPLE_GROUPNAME, mock_iam_client)\n assert group[\"GroupName\"] == EXAMPLE_GROUPNAME"},"ids":{"kind":"list like","value":[9,19,846],"string":"[\n 9,\n 19,\n 846\n]"}}},{"rowIdx":82,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(self, Paramsmulticast):\n # controle parameters multicast\n return self.api.SetMulticastMultiSessionParameters(Paramsmulticast)"},"ids":{"kind":"list like","value":[5315,0,138,457,240,386],"string":"[\n 5315,\n 0,\n 138,\n 457,\n 240,\n 386\n]"}}},{"rowIdx":83,"cells":{"text":{"kind":"string","value":"f METHOD_NAME(self):"},"ids":{"kind":"list like","value":[9,356,171],"string":"[\n 9,\n 356,\n 171\n]"}}},{"rowIdx":84,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(m):\n opt = pyo.SolverFactory('gurobi')\n res = opt.solve(m)\n assert_optimal_termination(res)"},"ids":{"kind":"list like","value":[283,5295,708],"string":"[\n 283,\n 5295,\n 708\n]"}}},{"rowIdx":85,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(self) -> str:\n \"\"\"\n Resource ID.\n \"\"\"\n return pulumi.get(self, \"id\")"},"ids":{"kind":"list like","value":[147],"string":"[\n 147\n]"}}},{"rowIdx":86,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(self):\n return self.event.METHOD_NAME + f\"/session/{self.id}\""},"ids":{"kind":"list like","value":[1055,548],"string":"[\n 1055,\n 548\n]"}}},{"rowIdx":87,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(colorer, s, i):\n return colorer.match_seq_regexp(s, i, kind=\"label\", regexp=\"`[A-z0-9]+[^`]+`_{1,2}\")"},"ids":{"kind":"list like","value":[3183,6935],"string":"[\n 3183,\n 6935\n]"}}},{"rowIdx":88,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(self):\n for pos in self:\n seq = pos.l10n_es_simplified_invoice_sequence_id\n pos.l10n_es_simplified_invoice_number = (\n seq._get_current_sequence().number_next_actual\n )\n pos.l10n_es_simplified_invoice_prefix = seq._get_prefix_suffix()[0]\n pos.l10n_es_simplified_invoice_padding = seq.padding"},"ids":{"kind":"list like","value":[226,8018,2486,771],"string":"[\n 226,\n 8018,\n 2486,\n 771\n]"}}},{"rowIdx":89,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(self):\n x = tensor.Tensor(np.array([1, 2, 3]))\n self.assertEqual(x.rank, 1)"},"ids":{"kind":"list like","value":[9,1499,137,206,43,798],"string":"[\n 9,\n 1499,\n 137,\n 206,\n 43,\n 798\n]"}}},{"rowIdx":90,"cells":{"text":{"kind":"string","value":"def METHOD_NAME():\n assert not np.isnan(atmosphere.get_relative_airmass(10))"},"ids":{"kind":"list like","value":[9,10054,1997],"string":"[\n 9,\n 10054,\n 1997\n]"}}},{"rowIdx":91,"cells":{"text":{"kind":"string","value":"def METHOD_NAME():\n \"\"\"Return the default filters (all available filters).\"\"\"\n return dict((name, set(PlayerIter(name))) for name in PlayerIter.filters)"},"ids":{"kind":"list like","value":[19,235,469],"string":"[\n 19,\n 235,\n 469\n]"}}},{"rowIdx":92,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(self, token_ids: Sequence[bytes]) -> Sequence[KlerosToken]:\n queries = []\n for token_id in token_ids:\n queries.append(self.kleros_contract.functions.getTokenInfo(token_id))\n # name string, ticker string, addr address, symbolMultihash string, status uint8, numberOfRequests uint256\n token_infos = self.ethereum_client.batch_call(queries)\n return [KlerosToken(*token_info) for token_info in token_infos]"},"ids":{"kind":"list like","value":[19,466,100],"string":"[\n 19,\n 466,\n 100\n]"}}},{"rowIdx":93,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(\n self, aligned_segment_starting_times: List[List[float]], stub_test: bool = False\n):\n \"\"\"\n Align the individual starting time for each video in this interface relative to the common session start time.\n Must be in units seconds relative to the common 'session_start_time'.\n Parameters\n ----------\n aligned_segment_starting_times : list of list of floats\n The relative starting times of each video.\n Outer list is over file paths (readers).\n Inner list is over segments of each recording.\n \"\"\"\n number_of_files_from_starting_times = len(aligned_segment_starting_times)\n assert number_of_files_from_starting_times == len(self.readers_list), (\n f\"The length of the outer list of 'starting_times' ({number_of_files_from_starting_times}) \"\n \"does not match the number of files ({len(self.readers_list)})!\"\n )\n for file_index, (reader, aligned_segment_starting_times_by_file) in enumerate(\n zip(self.readers_list, aligned_segment_starting_times)\n ):\n number_of_segments = reader.header[\"nb_segment\"][0]\n assert number_of_segments == len(\n aligned_segment_starting_times_by_file\n ), f\"The length of starting times index {file_index} does not match the number of segments of that reader!\"\n reader._t_starts = aligned_segment_starting_times_by_file"},"ids":{"kind":"list like","value":[0,7546,4373,8466,3148],"string":"[\n 0,\n 7546,\n 4373,\n 8466,\n 3148\n]"}}},{"rowIdx":94,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(self, msg):\n pass"},"ids":{"kind":"list like","value":[69,5862],"string":"[\n 69,\n 5862\n]"}}},{"rowIdx":95,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(self, output, identifier):\n return self._wrapped.METHOD_NAME(output._lines, identifier)"},"ids":{"kind":"list like","value":[19,99,280,146],"string":"[\n 19,\n 99,\n 280,\n 146\n]"}}},{"rowIdx":96,"cells":{"text":{"kind":"string","value":"def METHOD_NAME():\n x = np.zeros((5, 5), dtype=int)\n array_2d_view_assign(x[::, ::], 9)\n array_2d_view_assign(x[:2:2, :2:3], 10)\n array_2d_view_assign(x[3::2, 3::3], 11)\n array_2d_view_assign(x[1:2, 2:3], 12)\n array_1d_view_assign(x[0, :], 1)\n array_1d_view_assign(x[1, ::2], 2)\n array_1d_view_assign(x[2, 1:4:2], 3)\n array_1d_view_assign(x[3, 3:4], 4)\n array_1d_view_assign(x[:, 0], 5)\n array_1d_view_assign(x[::2, 1], 6)\n array_1d_view_assign(x[1:4:2, 2], 7)\n array_1d_view_assign(x[3:4, 3], 8)\n for i in range(np.shape(x)[0]):\n for j in range(np.shape(x)[1]):\n print(x[i][j])"},"ids":{"kind":"list like","value":[877,1085,1179],"string":"[\n 877,\n 1085,\n 1179\n]"}}},{"rowIdx":97,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(iterable):\n \"\"\"Test whether visitors properly set the type constraint of the a For node representing for/else statement\n iterating over a heterogeneous list.\n \"\"\"\n assume(type(iterable[0]) != type(iterable[1]))\n val_types = [type(val) for val in iterable]\n if int in val_types:\n assume(bool not in val_types)\n if bool in val_types:\n assume(int not in val_types)\n program = f\"for elt in {iterable}:\\n\" f\" x = elt\\n\"\n module, TypeInferrer = cs._parse_text(program)\n for_node = list(module.nodes_of_class(nodes.For))[0]\n local_type_var = module.type_environment.lookup_in_env(\"x\")\n inferred_type = TypeInferrer.type_constraints.resolve(local_type_var).getValue()\n assert inferred_type == Any"},"ids":{"kind":"list like","value":[9,43,5565,245],"string":"[\n 9,\n 43,\n 5565,\n 245\n]"}}},{"rowIdx":98,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(plistpath, content):\n \"\"\"A test utility to create a plist file with known content.\n Ensures that the directory for the file exists, and writes an XML plist with\n specific content.\n :param plistpath: The path for the plist file to create.\n :param content: A dictionary of content that plistlib can use to create the plist\n file.\n :returns: The path to the file that was created.\n \"\"\"\n plistpath.parent.mkdir(parents=True, exist_ok=True)\n with plistpath.open(\"wb\") as f:\n plistlib.dump(content, f)\n return plistpath"},"ids":{"kind":"list like","value":[129,5953,171],"string":"[\n 129,\n 5953,\n 171\n]"}}},{"rowIdx":99,"cells":{"text":{"kind":"string","value":"def METHOD_NAME(instance, check, aggregator):\n del instance['custom_queries']\n with mock.patch(\n 'datadog_checks.ibm_was.IbmWasCheck.make_request', return_value=mock_data('perfservlet-multiple-nodes.xml')\n ):\n check = check(instance)\n check.check(instance)\n node = 'node:cmhqlvij2a04'\n for metric_name, metrics in aggregator._metrics.items():\n for metric in metrics:\n if 'server:IJ2Server02' in metric.tags:\n assert node in metric.tags, \"Expected '{}' tag in '{}' tags, found {}\".format(\n node, metric_name, metric.tags\n )"},"ids":{"kind":"list like","value":[9,2786,163,82],"string":"[\n 9,\n 2786,\n 163,\n 82\n]"}}}],"truncated":true},"paginationData":{"pageIndex":0,"numItemsPerPage":100,"numTotalItems":270000,"offset":0,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NzQ1MzM0MSwic3ViIjoiL2RhdGFzZXRzL01seGEvYmVydC1kYXRhc2V0IiwiZXhwIjoxNzU3NDU2OTQxLCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0.Ag-_3GTUSEK4-Czs31nFMOW3aQb6QoTMoP1LulHat_U_f8S93a1pGb29tnNqIl_wNOYCDovOOdDitSB0fPvrCQ","displayUrls":true},"dataset":"Mlxa/bert-dataset","isGated":false,"isPrivate":false,"hasParquetFormat":true,"author":{"_id":"64ac5b142af65e43e6f6084a","avatarUrl":"/avatars/487ac751ccd971428a47de8ad35dc79a.svg","fullname":"Mikhail Budnikov","name":"Mlxa","type":"user","isPro":false,"isHf":false,"isHfAdmin":false,"isMod":false,"followerCount":6},"compact":true}">
Dataset Viewer
Auto-converted to Parquet
text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(self, session): pass
[ 69, 2333 ]
def METHOD_NAME(g): g.cmd(b's', b'T05thread:01;')
[ 8149, 367 ]
def METHOD_NAME(self): self.check('/admin/default/shell') ws_url = server.base_url.replace('http://', 'ws://') + '/admin/default/webshell-data' ws = create_connection(ws_url) # Python expressions are computed ws.send('1 + 2') eq_(ws.recv(), '3') # Session state is maintained. Gramex can be imported ws.send('import gramex') eq_(ws.recv(), '') ws.send('gramex.__version__') eq_(ast.literal_eval(ws.recv()), gramex.__version__) # handler is available for use ws.send('handler.session') result = ast.literal_eval(ws.recv()) ok_('_t' in result and 'id' in result)
[ 9, 2770 ]
def METHOD_NAME(self, x): self.__buf.write(struct.pack('>L', x))
[ 1699, 11068 ]
def METHOD_NAME(self): action = ChatJoinRequestHandler(self.callback) for attr in action.__slots__: assert getattr(action, attr, "err") != "err", f"got extra slot '{attr}'" assert len(mro_slots(action)) == len(set(mro_slots(action))), "duplicate slot"
[ 9, 3572, 3573 ]
def METHOD_NAME(self) -> str: """ Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} """ return pulumi.get(self, "id")
[ 147 ]
def METHOD_NAME(path: Optional[Path] = None) -> Path: if path is None: path = Path.cwd() here = path while here.parent != here: config = here / ".neuro.toml" if config.exists(): return here here = here.parent raise ConfigError(f"Project root is not found for {path}")
[ 416, 155, 1563 ]
def METHOD_NAME(): parser = argparse.ArgumentParser( description=USAGE, prog="ddtrace-run", usage="ddtrace-run <your usual python command>", formatter_class=argparse.RawTextHelpFormatter, ) parser.add_argument("command", nargs=argparse.REMAINDER, type=str, help="Command string to execute.") parser.add_argument("-d", "--debug", help="enable debug mode (disabled by default)", action="https://huggingface.co/datasets/Mlxa/store_true") parser.add_argument( "-i", "--info", help=( "print library info useful for debugging. Only reflects configurations made via environment " "variables, not those made in code." ), action="https://huggingface.co/datasets/Mlxa/store_true", ) parser.add_argument("-p", "--profiling", help="enable profiling (disabled by default)", action="https://huggingface.co/datasets/Mlxa/store_true") parser.add_argument("-v", "--version", action="https://huggingface.co/datasets/Mlxa/version", version="%(prog)s " + ddtrace.__version__) parser.add_argument("-nc", "--colorless", help="print output of command without color", action="https://huggingface.co/datasets/Mlxa/store_true") args = parser.parse_args() if args.profiling: os.environ["DD_PROFILING_ENABLED"] = "true" if args.debug or ddtrace.config._debug_mode: logging.basicConfig(level=logging.DEBUG) os.environ["DD_TRACE_DEBUG"] = "true" if args.info: # Inline imports for performance. from ddtrace.internal.debug import pretty_collect print(pretty_collect(ddtrace.tracer, color=not args.colorless)) sys.exit(0) root_dir = os.path.dirname(ddtrace.__file__) log.debug("ddtrace root: %s", root_dir) bootstrap_dir = os.path.join(root_dir, "bootstrap") log.debug("ddtrace bootstrap: %s", bootstrap_dir) _add_bootstrap_to_pythonpath(bootstrap_dir) log.debug("PYTHONPATH: %s", os.environ["PYTHONPATH"]) log.debug("sys.path: %s", sys.path) if not args.command: parser.print_help() sys.exit(1) # Find the executable path executable = find_executable(args.command[0]) if executable is None: print("ddtrace-run: failed to find executable '%s'.\n" % args.command[0]) parser.print_usage() sys.exit(1) log.debug("program executable: %s", executable) if os.path.basename(executable) == "uwsgi": print( ( "ddtrace-run has known compatibility issues with uWSGI where the " "tracer is not started properly in uWSGI workers which can cause " "broken behavior. It is recommended you remove ddtrace-run and " "update your uWSGI configuration following " "https://ddtrace.readthedocs.io/en/stable/advanced_usage.html#uwsgi." ) ) try: os.execl(executable, executable, *args.command[1:]) except PermissionError: print("ddtrace-run: permission error while launching '%s'" % executable) print("Did you mean `ddtrace-run python %s`?" % executable) sys.exit(1) except Exception: print("ddtrace-run: error launching '%s'" % executable) raise sys.exit(0)
[ 57 ]
def METHOD_NAME(iterable, n): """ Split a interable into chunks of length n with the final element being the remainder len < n if n does not divide evenly """ len_iter = len(iterable) return [iterable[i: min(i + n, len_iter)] for i in range(0, len_iter, n)]
[ 1828, 293 ]
def METHOD_NAME(self): log.debug("Loading live event") res = self.request("GET", self.live_url) for event in res.get("events", []): return "event/{sportId}/{propertyId}/{tournamentId}/{id}".format(**event)
[ 19, 1824, 147 ]
def METHOD_NAME(n_servers, i=None): return server_n
[ 1260, 2122, 1170, 163 ]
def METHOD_NAME(self) -> 'outputs.PrivateEndpointConnectionPropertiesResponse': """ Resource properties. """ return pulumi.get(self, "properties")
[ 748 ]
def METHOD_NAME(self): cli_params = ['application_name', 'config_file', 'eu-west-1', '--destinationTableAutoCreate', '--connection-pre-test', 'False'] config_reader = GlobalConfigParametersReader() default_parameters = config_reader.get_config_key_values_updated_with_cli_args(cli_params) expected_value = True returned_value = default_parameters['destinationTableAutoCreate'] self.assertEqual(expected_value, returned_value)
[ 9, 285, 200, 781, 7440, 235, 99 ]
def METHOD_NAME( staff_api_client, permission_manage_shipping, shipping_method ): # given shipping_method.store_value_in_private_metadata({PUBLIC_KEY: PUBLIC_VALUE}) shipping_method.save(update_fields=["metadata"]) shipping_method_id = graphene.Node.to_global_id( "ShippingMethodType", shipping_method.pk ) # when response = execute_clear_private_metadata_for_item( staff_api_client, permission_manage_shipping, shipping_method_id, "ShippingMethodType", ) # then assert item_without_private_metadata( response["data"]["deletePrivateMetadata"]["item"], shipping_method, shipping_method_id, )
[ 9, 34, 547, 773, 43, 850, 103 ]
def METHOD_NAME(self): if not session.user: raise Forbidden # If the user cannot manage the whole event see if anything gives them # limited management access. if not self.event.can_manage(session.user): urls = sorted(values_from_signal(signals.event_management.management_url.send(self.event), single_value=True)) response = redirect(urls[0]) if urls else None raise Forbidden(response=response) RHManageEventBase.METHOD_NAME(self) # mainly to trigger the legacy "event locked" check
[ 250, 1089 ]
def METHOD_NAME(cursor) -> List[Tuple[DbTableSchema, str]]: schemas: Dict = {} for row in cursor.fetchall(): table_schema_name: str = row[_TABLE_SCHEMA] table_name: DbTableMeta = DbTableMeta(row[_TABLE_NAME]) table_column: DbColumn = DbColumn( name=row[_COLUMN_NAME], type=row[_UDT_NAME], ordinal_position=row[_ORDINAL_POSITION], ) try: table_database = row[_TABLE_DATABASE] except IndexError: table_database = None # Attempt to get table schema table_key = ".".join( filter(None, [table_database, table_schema_name, table_name.name]) ) # table_key: str = f"{table_schema_name}.{table_name}" table_schema: Optional[DbTableSchema] table_schema, _ = schemas.get(table_key) or (None, None) if table_schema: # Add column to existing table schema. schemas[table_key][0].columns.append(table_column) else: # Create new table schema with column. schemas[table_key] = ( DbTableSchema( schema_name=table_schema_name, table_name=table_name, columns=[table_column], ), table_database, ) return list(schemas.values())
[ 214, 539, 1571 ]
def METHOD_NAME(): # Try again with a target with a stretched y axis. A_orig = np.array([[-3, 3], [-2, 3], [-2, 2], [-3, 2]], dtype=float) B_orig = np.array([[3, 40], [1, 0], [3, -40], [5, 0]], dtype=float) A, A_mu = _centered(A_orig) B, B_mu = _centered(B_orig) R, s = orthogonal_procrustes(A, B) scale = s / np.square(norm(A)) B_approx = scale * np.dot(A, R) + B_mu expected = np.array([[3, 21], [-18, 0], [3, -21], [24, 0]], dtype=float) assert_allclose(B_approx, expected, atol=1e-8) # Check disparity symmetry. expected_disparity = 0.4501246882793018 AB_disparity = np.square(norm(B_approx - B_orig) / norm(B)) assert_allclose(AB_disparity, expected_disparity) R, s = orthogonal_procrustes(B, A) scale = s / np.square(norm(B)) A_approx = scale * np.dot(B, R) + A_mu BA_disparity = np.square(norm(A_approx - A_orig) / norm(A)) assert_allclose(BA_disparity, expected_disparity)
[ 9, 5329, 5330, 14262, 1441 ]
def METHOD_NAME( mock_smb_client: SMBClient, smb_remote_access_client: SMBRemoteAccessClient, ): tags = EXPLOITER_TAGS.copy() smb_remote_access_client.login(FULL_CREDENTIALS[0], set()) smb_remote_access_client.execute_agent(DESTINATION_PATH, tags) assert tags == EXPLOITER_TAGS.union(EXECUTION_TAGS)
[ 9, 750, 7909 ]
def METHOD_NAME(self) -> str: """ Gets the workflow trigger callback URL relative path. """ return pulumi.get(self, "relative_path")
[ 1821, 157 ]
def METHOD_NAME(self): form_data = { "name": "Assunto 2", "visible": True, "init_date": datetime.now() + timedelta(days=2), "end_date": datetime.now() + timedelta(days=3), "subscribe_begin": datetime.now(), "subscribe_end": datetime.now() + timedelta(days=1), "category": self.category, "tags": "teste,test,testando" } form = SubjectForm(data=form_data, initial={"category": self.category}) form.save() subject = Subject.objects.latest("id") tags = [str(t) for t in subject.tags.all()] self.assertIn("teste", tags) self.assertIn("test", tags) self.assertIn("testando", tags)
[ 9, 1029, 114 ]
def METHOD_NAME(self): self.deployment_type = "AllAtOnce" self.pre_traffic_hook = "pre_traffic_function_ref" self.post_traffic_hook = "post_traffic_function_ref" self.alarms = ["alarm1ref", "alarm2ref"] self.role = {"Ref": "MyRole"} self.trigger_configurations = { "TriggerEvents": ["DeploymentSuccess", "DeploymentFailure"], "TriggerTargetArn": {"Ref": "MySNSTopic"}, "TriggerName": "TestTrigger", } self.condition = "condition"
[ 0, 1 ]
def METHOD_NAME(x, n): c = 0.9 mu = (np.arange(1, n+1) - 0.5)/n return x - 1/(1 - c/(2*n) * (mu[:,None]*x / (mu[:,None] + mu)).sum(axis=1))
[ 474, 1327 ]
def METHOD_NAME(): args = argsparser() config_parser = ConfigParser(args) args = config_parser.parser() random.seed(args.seed) np.random.seed(args.seed) paddle.seed(args.seed) paddle.device.set_device(args.device) class_name = args.category assert class_name in mvtec.CLASS_NAMES print("Testing model for {}".format(class_name)) # build model model = get_model(args.method)(arch=args.backbone, pretrained=False, k=args.k, method=args.method) model.eval() state = paddle.load(args.model_path) model.model.set_dict(state["params"]) model.load(state["stats"]) model.eval() # build data MVTecDataset = mvtec.MVTecDataset(is_predict=True) transform_x = MVTecDataset.get_transform_x() x = Image.open(args.img_path).convert('RGB') x = transform_x(x).unsqueeze(0) predict(args, model, x)
[ 57 ]
def METHOD_NAME(api_dir, xml_dir): import subprocess, sys try: # We don't generate groups since we create those manually ret = subprocess.call('breathe-apidoc -m -o %s -p openucx %s -g struct,file' % (api_dir, xml_dir), shell=True) if ret < 0: sys.stderr.write('breathe-apidoc error code %s' % (-ret)) except OSError as e: sys.stderr.write('breathe-apidoc execution failed: %s' % e)
[ 22, 4892 ]
def METHOD_NAME( tmp_path: Path, filename: str, fmt: str | None, data: str, expected: Any, testing_metadata, ): path = tmp_path / filename path.write_text(data) assert ( jinja_context.load_file_data(str(path), fmt, config=testing_metadata.config) == expected )
[ 9, 557, 171, 365 ]
def METHOD_NAME(self) -> int: return hash(self)
[ 1161, 544 ]
def METHOD_NAME(self): """Open preferences dialog""" widgets = gamewidget.getWidgets() preferencesDialog.run(widgets) notebook = widgets["preferences_notebook"] self.assertIsNotNone(preferencesDialog.general_tab) notebook.next_page() self.assertIsNotNone(preferencesDialog.hint_tab) notebook.next_page() self.assertIsNotNone(preferencesDialog.theme_tab) notebook.next_page() self.assertIsNotNone(preferencesDialog.sound_tab) notebook.next_page() self.assertIsNotNone(preferencesDialog.save_tab)
[ 9251 ]
def METHOD_NAME(dataarray) -> None: data_repr = fh.short_data_repr_html(dataarray) assert data_repr.startswith("<pre>array")
[ 9, 1707, 365, 92, 382 ]
def METHOD_NAME(self, fileno, new=False): mask = 0 if self.listeners[self.READ].get(fileno): mask |= self.READ_MASK | self.EXC_MASK if self.listeners[self.WRITE].get(fileno): mask |= self.WRITE_MASK | self.EXC_MASK try: if mask: if new: self.poll.METHOD_NAME(fileno, mask) else: try: self.poll.modify(fileno, mask) except (IOError, OSError): self.poll.METHOD_NAME(fileno, mask) else: try: self.poll.unregister(fileno) except (KeyError, IOError, OSError): # raised if we try to remove a fileno that was # already removed/invalid pass except ValueError: # fileno is bad, issue 74 self.remove_descriptor(fileno) raise
[ 372 ]
def METHOD_NAME(self): # restart the collectd mapper to use recently set port c8y_mapper_status = self.startProcess( command=self.sudo, arguments=["systemctl", "restart", "tedge-mapper-collectd.service"], stdouterr="collectd_mapper_restart", ) # check the status of the collectd mapper c8y_mapper_status = self.startProcess( command=self.sudo, arguments=["systemctl", "status", "tedge-mapper-collectd.service"], stdouterr="collectd_mapper_status", ) self.assertGrep( "collectd_mapper_status.out", " MQTT connection error: I/O: Connection refused (os error 111)", contains=False, )
[ 187, 17916, 3782 ]
def METHOD_NAME(self, collection_name, vectors, top_k): # Search vector in milvus collection try: self.set_collection(collection_name) search_params = { "metric_type": METRIC_TYPE, "params": { "nprobe": 16 } } res = self.collection.search( vectors, anns_field="embedding", param=search_params, limit=top_k) LOGGER.debug(f"Successfully search in collection: {res}") return res except Exception as e: LOGGER.error(f"Failed to search vectors in Milvus: {e}") sys.exit(1)
[ 1070, 1742 ]
def METHOD_NAME(q, t, q_len, t_len): """Compute the sliding dot products between a query and a time series. Parameters ---------- q: numpy.array Query. t: numpy.array Time series. q_len: int Length of the query. t_len: int Length of the time series. Output ------ dot_prod: numpy.array Sliding dot products between q and t. """ # Reversing query and padding both query and time series t_padded = np.pad(t, (0, t_len)) q_reversed = np.flipud(q) q_reversed_padded = np.pad(q_reversed, (0, 2 * t_len - q_len)) # Applying FFT to both query and time series t_fft = np.fft.fft(t_padded) q_fft = np.fft.fft(q_reversed_padded) # Applying inverse FFT to obtain the convolution of the time series by # the query element_wise_mult = np.multiply(t_fft, q_fft) inverse_fft = np.fft.ifft(element_wise_mult) # Returns only the valid dot products from inverse_fft dot_prod = inverse_fft[q_len - 1 : t_len].real return dot_prod
[ 3343, 1903, 4866 ]
def METHOD_NAME(file_path, size=None): """ Turn given picture into a smaller version. """ im = Image.open(file_path) if size is not None: (width, height) = size if height == 0: size = get_full_size_from_width(im, width) else: size = im.size im = make_im_bigger_if_needed(im, size) im = fit_to_target_size(im, size) im.thumbnail(size, Image.Resampling.LANCZOS) if im.mode == "CMYK": im = im.convert("RGBA") final = Image.new("RGBA", size, (0, 0, 0, 0)) final.paste( im, (int((size[0] - im.size[0]) / 2), int((size[1] - im.size[1]) / 2)) ) final.save(file_path, "PNG") return file_path
[ 6553, 409, 4137 ]
def METHOD_NAME(self): pass
[ 9, 3637 ]
METHOD_NAME(self):
[ 192 ]
def METHOD_NAME(): group_delete_mock = MagicMock(return_value=True) group_info_mock = MagicMock(return_value={"things": "stuff"}) with patch.dict(group.__salt__, {"group.delete": group_delete_mock}), patch.dict( group.__salt__, {"group.info": group_info_mock} ): ret = group.absent("salt", local=True) assert ret == { "changes": {"salt": ""}, "comment": "Removed group salt", "name": "salt", "result": True, } if salt.utils.platform.is_windows(): group_info_mock.assert_called_once_with("salt") group_delete_mock.assert_called_once_with("salt") else: group_info_mock.assert_called_once_with("salt", root="/") group_delete_mock.assert_called_once_with("salt", local=True)
[ 9, 1447, 41, 125 ]
def METHOD_NAME(bin): if type(bin) == type(bytes()): try: return bytes.decode(bin, encoding='utf-8', errors='strict') except: pass # we want a hexdump in \xNN notation. bin.hex only takes a single char, so we replace that later. return "\\x" + bin.hex(':').replace(':', "\\x") return "ERROR: unknown type in bin_dumper(): " + str(type(bin))
[ 762, 5990 ]
def METHOD_NAME(): # One of these environment variables are guaranteed to exist # from our official docker images. # DISPATCH_VERSION is from a tagged release, and DISPATCH_BUILD is from a # a git based image. return "DISPATCH_VERSION" in os.environ or "DISPATCH_BUILD" in os.environ
[ 137, 223 ]
def METHOD_NAME(validate_event_schema): def inner(message, **kwargs): event = serialize({"logentry": {"message": message}}, **kwargs) validate_event_schema(event) return event["logentry"]["message"] return inner
[ 277, 7331 ]
def METHOD_NAME(): s = vaex.string_column(["aap", None, "noot", "mies"]) o = ["aap", None, "noot", np.nan] x = np.arange(4, dtype=np.float64) x[2] = x[3] = np.nan m = np.ma.array(x, mask=[0, 1, 0, 1]) df = vaex.from_arrays(x=x, m=m, s=s, o=o) x = df.x.dropmissing().tolist() assert (9 not in x) assert np.any(np.isnan(x)), "nan is not a missing value" m = df.m.dropmissing().tolist() assert (m[:1] == [0]) assert np.isnan(m[1]) assert len(m) == 2 assert (df.s.dropmissing().tolist() == ["aap", "noot", "mies"]) assert (df.o.dropmissing().tolist()[:2] == ["aap", "noot"]) # this changed in vaex 4, since the np.nan is considered missing, the whole # columns is seen as string # assert np.isnan(df.o.dropmissing().tolist()[2])
[ 9, -1 ]
def METHOD_NAME(A, node_features, k): """ Compute the k-hop adjacency matrix and aggregated features using message passing. Parameters: A (numpy array or scipy sparse matrix): The adjacency matrix of the graph. node_features (numpy array or scipy sparse matrix): The feature matrix of the nodes. k (int): The number of hops for message passing. Returns: A_k (numpy array): The k-hop adjacency matrix. agg_features (numpy array): The aggregated feature matrix for each node in the k-hop neighborhood. """ # Convert input matrices to sparse matrices if they are not already if not sp.issparse(A): A = sp.csr_matrix(A) if not sp.issparse(node_features): node_features = sp.csr_matrix(node_features) # Compute the k-hop adjacency matrix and the aggregated features A_k = A.copy() agg_features = node_features.copy() for i in tqdm(range(k)): # Compute the message passing for the k-hop neighborhood message = A_k.dot(node_features) # Apply a GCN layer to aggregate the messages agg_features = A_k.dot(agg_features) + message # Update the k-hop adjacency matrix by adding new edges A_k += A_k.dot(A) return A_k.toarray(), agg_features.toarray()
[ 4407, 2367, 277, 7405, 2087 ]
def METHOD_NAME(self): if self.options.shared: self.options.rm_safe("fPIC") self.options["trantor"].shared = True if not self.options.with_orm: del self.options.with_postgres del self.options.with_postgres_batch del self.options.with_mysql del self.options.with_sqlite del self.options.with_redis elif not self.options.with_postgres: del self.options.with_postgres_batch
[ 111 ]
async def METHOD_NAME(self): pass
[ 958, 531, 481 ]
def METHOD_NAME( self, recipe: BaseRecipe, recipe_conf: PerfRecipeConf, results: List[PerfMeasurementResults], ) -> List[List[PerfMeasurementResults]]: results_by_host = self._divide_results_by_host(results) for host_results in results_by_host.values(): yield host_results
[ 846, 51 ]
def METHOD_NAME(): aq17 = ThermoFunDatabase("aq17") T = 298.15 P = 1.0e5 #------------------------------------------------------------------- # Testing attributes and thermodynamic properties of H2O@ #------------------------------------------------------------------- species = aq17.species().get("H2O@") assert species.formula().equivalent("H2O") assert species.substance() == "Water HGK" assert species.aggregateState() == AggregateState.Aqueous assert species.charge() == 0 assert species.molarMass() == pytest.approx(0.0180153) props = species.standardThermoProps(T, P) assert props.G0[0] == pytest.approx(-2.371817e+05) assert props.H0[0] == pytest.approx(-2.858310e+05) assert props.V0[0] == pytest.approx( 1.806862e-05) assert props.Cp0[0] == pytest.approx( 7.532758e+01) #------------------------------------------------------------------- # Testing attributes and thermodynamic properties of CO3-2 #------------------------------------------------------------------- species = aq17.species().get("CO3-2") assert species.formula().equivalent("CO3-2") assert species.substance() == "CO3-2 carbonate ion" assert species.aggregateState() == AggregateState.Aqueous assert species.charge() == -2 assert species.molarMass() == pytest.approx(0.0600100979) props = species.standardThermoProps(T, P) assert props.G0[0] == pytest.approx(-5.279830e+05) assert props.H0[0] == pytest.approx(-6.752359e+05) assert props.V0[0] == pytest.approx(-6.063738e-06) assert props.Cp0[0] == pytest.approx(-3.228612e+02) #------------------------------------------------------------------- # Testing attributes and thermodynamic properties of Ca+2 #------------------------------------------------------------------- species = aq17.species().get("Ca+2") assert species.formula().equivalent("Ca+2") assert species.substance() == "Ca+2 ion" assert species.aggregateState() == AggregateState.Aqueous assert species.charge() == +2 assert species.molarMass() == pytest.approx(0.040076902) props = species.standardThermoProps(T, P) assert props.G0[0] == pytest.approx(-5.528210e+05) assert props.H0[0] == pytest.approx(-5.431003e+05) assert props.V0[0] == pytest.approx(-1.844093e-05) assert props.Cp0[0] == pytest.approx(-3.099935e+01) #------------------------------------------------------------------- # Testing attributes and thermodynamic properties of CO2 #------------------------------------------------------------------- species = aq17.species().get("CO2") assert species.formula().equivalent("CO2") assert species.substance() == "Carbon dioxide (CO2)" assert species.aggregateState() == AggregateState.Gas assert species.charge() == 0 assert species.molarMass() == pytest.approx(0.0440096006) props = species.standardThermoProps(T, P) assert props.G0[0] == pytest.approx(-3.943510e+05) assert props.H0[0] == pytest.approx(-3.935472e+05) assert props.V0[0] == pytest.approx( 0.0000000000) assert props.Cp0[0] == pytest.approx( 3.710812e+01) #------------------------------------------------------------------- # Testing attributes and thermodynamic properties of Calcite #------------------------------------------------------------------- species = aq17.species().get("Calcite") assert species.formula().equivalent("CaCO3") assert species.substance() == "Calcite (cc)" assert species.aggregateState() == AggregateState.CrystallineSolid assert species.charge() == 0 assert species.molarMass() == pytest.approx(0.1000869999) props = species.standardThermoProps(T, P) assert props.G0[0] == pytest.approx(-1.129195e+06) assert props.H0[0] == pytest.approx(-1.207470e+06) assert props.V0[0] == pytest.approx( 3.689000e-05) assert props.Cp0[0] == pytest.approx( 8.337073e+01) with pytest.raises(RuntimeError): assert ThermoFunDatabase("not-a-valid-file-name") with pytest.raises(RuntimeError): assert ThermoFunDatabase.withName("not-a-valid-file-name") with pytest.raises(RuntimeError): assert ThermoFunDatabase.fromFile("not-a-valid-file-name")
[ 9, 12077, 3435, 463 ]
def METHOD_NAME(next_link=None): if not next_link: request = build_list_request( subscription_id=self._config.subscription_id, api_version=api_version, template_url=self.list.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: # make call to next link with the client's api-version _parsed_next_link = urllib.parse.urlparse(next_link) _next_request_params = case_insensitive_dict( { key: [urllib.parse.quote(v) for v in value] for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) _next_request_params["api-version"] = self._config.api_version request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request
[ 123, 377 ]
def METHOD_NAME(self, record: logging.LogRecord) -> str: levelname = record.levelname if self.use_color and levelname in self.COLORS: levelname_with_color = ( self.COLOR_SEQ % (30 + self.COLORS[levelname]) + levelname + self.RESET_SEQ ) record.levelname = levelname_with_color formated_record = logging.Formatter.METHOD_NAME(self, record) record.levelname = ( levelname # Resetting levelname as `record` might be used elsewhere ) return formated_record else: return logging.Formatter.METHOD_NAME(self, record)
[ 275 ]
def METHOD_NAME(self): section = self.doc_structure.add_new_section('mysection') section.writeln('section contents') self.doc_structure.hrefs['foo'] = 'www.foo.com' section.hrefs['bar'] = 'www.bar.com' contents = self.doc_structure.flush_structure() self.assertIn(b'.. _foo: www.foo.com', contents) self.assertIn(b'.. _bar: www.bar.com', contents)
[ 9, 1579, 1011, 12292 ]
def METHOD_NAME(): """ "vendors" notary into docker by copying all of notary into the docker vendor directory - also appending several lines into the Dockerfile because it pulls down notary from github and builds the binaries """ docker_notary_relpath = "vendor/src/github.com/theupdateframework/notary" docker_notary_abspath = os.path.join(DOCKER_DIR, docker_notary_relpath) print("copying notary ({0}) into {1}".format(NOTARY_DIR, docker_notary_abspath)) def ignore_dirs(walked_dir, _): """ Don't vendor everything, particularly not the docker directory recursively, if it happened to be in the notary directory """ if walked_dir == NOTARY_DIR: return [".git", ".cover", "docs", "bin"] elif walked_dir == os.path.join(NOTARY_DIR, "fixtures"): return ["compatibility"] return [] if os.path.exists(docker_notary_abspath): shutil.rmtree(docker_notary_abspath) shutil.copytree( NOTARY_DIR, docker_notary_abspath, symlinks=True, ignore=ignore_dirs) # hack this because docker/docker's Dockerfile checks out a particular version of notary # based on a tag or SHA, and we want to build based on what was vendored in dockerfile_addition = ("\n" "RUN set -x && " "export GO15VENDOREXPERIMENT=1 && " "go build -o /usr/local/bin/notary-server github.com/theupdateframework/notary/cmd/notary-server &&" "go build -o /usr/local/bin/notary github.com/theupdateframework/notary/cmd/notary") with open(os.path.join(DOCKER_DIR, "Dockerfile")) as dockerfile: text = dockerfile.read() if not text.endswith(dockerfile_addition): with open(os.path.join(DOCKER_DIR, "Dockerfile"), 'a+') as dockerfile: dockerfile.write(dockerfile_addition) # hack the makefile so that we tag the built image as something else so we # don't interfere with any other docker test builds with open(os.path.join(DOCKER_DIR, "Makefile"), 'r') as makefile: makefiletext = makefile.read() with open(os.path.join(DOCKER_DIR, "Makefile"), 'wb') as makefile: image_name = os.getenv("DOCKER_TEST_IMAGE_NAME", "notary-docker-vendor-test") text = re.sub("^DOCKER_IMAGE := .+$", "DOCKER_IMAGE := {0}".format(image_name), makefiletext, 1, flags=re.M) makefile.write(text)
[ 1278, 2080 ]
def METHOD_NAME(context, data_dict): return {'success': False, 'msg': 'Not implemented yet in the auth refactor'}
[ 71, 7588 ]
def METHOD_NAME(): if not isRunningAsRoot(): return False if not isMMapSupported(): return False return True
[ 137, 845, 4045, 616 ]
def METHOD_NAME(filename, line): """ Append one line of text to filename. :param filename: Path to the file. :type filename: str :param line: Line to be written. :type line: str """ append_file(filename, line.rstrip("\n") + "\n")
[ 1459, 206, 534 ]
def METHOD_NAME(self, assembler): """ Create a list of functions to be tested and their reference values for the problem """ func_list = [ functions.StructuralMass(assembler), functions.Compliance(assembler), functions.KSDisplacement( assembler, ksWeight=ksweight, direction=[0.0, 0.0, 1.0] ), functions.KSFailure(assembler, ksWeight=ksweight, safetyFactor=1.5), ] return func_list, FUNC_REFS
[ 102, 3168 ]
def METHOD_NAME(request, kube_apis): filtered_ns_1 = create_namespace_with_name_from_yaml(kube_apis.v1, f"filtered-ns-1", f"{TEST_DATA}/common/ns.yaml") filtered_ns_2 = create_namespace_with_name_from_yaml(kube_apis.v1, f"filtered-ns-2", f"{TEST_DATA}/common/ns.yaml") filtered_secret_1 = create_secret_from_yaml( kube_apis.v1, filtered_ns_1, f"{TEST_DATA}/filter-secrets/filtered-secret-1.yaml" ) filtered_secret_2 = create_secret_from_yaml( kube_apis.v1, filtered_ns_2, f"{TEST_DATA}/filter-secrets/filtered-secret-2.yaml" ) nginx_ingress_secret = create_secret_from_yaml( kube_apis.v1, "nginx-ingress", f"{TEST_DATA}/filter-secrets/nginx-ingress-secret.yaml" ) wait_before_test(1) def fin(): if request.config.getoption("--skip-fixture-teardown") == "no": print("Clean up:") if is_secret_present(kube_apis.v1, filtered_secret_1, filtered_ns_1): delete_secret(kube_apis.v1, filtered_secret_1, filtered_ns_1) if is_secret_present(kube_apis.v1, filtered_secret_2, filtered_ns_2): delete_secret(kube_apis.v1, filtered_secret_2, filtered_ns_2) if is_secret_present(kube_apis.v1, nginx_ingress_secret, "nginx-ingress"): delete_secret(kube_apis.v1, nginx_ingress_secret, "nginx-ingress") delete_namespace(kube_apis.v1, filtered_ns_1) delete_namespace(kube_apis.v1, filtered_ns_2) request.addfinalizer(fin)
[ 102, 107, 3619, 61, 107, 2161 ]
def METHOD_NAME(name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, version: Optional[pulumi.Input[str]] = None, workspace_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDataVersionResult]: """ Azure Resource Manager resource envelope. :param str name: Container name. :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str version: Version identifier. :param str workspace_name: Name of Azure Machine Learning workspace. """ ...
[ 19, 365, 281, 146 ]
METHOD_NAME( self ) :
[ 9, 215 ]
def METHOD_NAME(address: str) -> bytes32: hrpgot, data = bech32_decode(address) if data is None: raise ValueError("Invalid Address") decoded = convertbits(data, 5, 8, False) decoded_bytes = bytes32(decoded) return decoded_bytes
[ 1268, 727, 1161 ]
def METHOD_NAME(en_vocab): doc = Doc(en_vocab, words=["hello", "world"]) with make_tempdir() as d: file_path = d / "doc" doc.to_disk(file_path) doc_d = Doc(en_vocab).from_disk(file_path) assert doc.to_bytes() == doc_d.to_bytes()
[ 9, 183, 366, 3544, 113 ]
def METHOD_NAME(self): with self.assertRaises(ValueError): losses.regularization_penalty("l1_l2", 1e-4, [])
[ 9, 6773, 1038, 930, 99 ]
def METHOD_NAME(): """Parse command line arguments using argparse. """ parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument( '-V', '--version', action="https://huggingface.co/datasets/Mlxa/version", version='{0}: v{1} by {2}'.format('%(prog)s', __version__, __author__) ) parser.add_argument( '--always-ok', help='Always returns OK.', dest='ALWAYS_OK', action="https://huggingface.co/datasets/Mlxa/store_true", default=False, ) parser.add_argument( '--defaults-file', help='Specifies a cnf file to read parameters like user, host and password from ' '(instead of specifying them on the command line), ' 'for example `/var/spool/icinga2/.my.cnf`. Default: %(default)s', dest='DEFAULTS_FILE', default=DEFAULT_DEFAULTS_FILE, ) parser.add_argument( '--defaults-group', help='Group/section to read from in the cnf file. Default: %(default)s', dest='DEFAULTS_GROUP', default=DEFAULT_DEFAULTS_GROUP, ) parser.add_argument( '--timeout', help='Network timeout in seconds. Default: %(default)s (seconds)', dest='TIMEOUT', type=int, default=DEFAULT_TIMEOUT, ) return parser.METHOD_NAME()
[ 214, 335 ]
def METHOD_NAME( component: ComponentSpec, cross_section: CrossSectionSpec = "strip", port1: str = "o1", port2: str = "o2", straight_length: float | None = None, **kwargs, ) -> ComponentSpec: """Returns double straight. Args: component: for cutback. cross_section: specification (CrossSection, string or dict). port1: name of first optical port. port2: name of second optical port. straight_length: length of straight. kwargs: cross_section settings. """ xs = gf.get_cross_section(cross_section, **kwargs) METHOD_NAME = gf.Component() straight_component = straight( length=straight_length or xs.radius * 2, cross_section=xs ) straight_component2 = straight( length=straight_length or xs.radius * 2, cross_section=xs ) straight_r = METHOD_NAME << straight_component straight_r2 = METHOD_NAME << straight_component2.mirror((1, 0)) straight_r2 = straight_r2.move( origin=(0, 0), destination=(0, -component.ports[port1].y + component.ports[port2].y), ) METHOD_NAME.add_port("o1", port=straight_r.ports["o1"]) METHOD_NAME.add_port("o2", port=straight_r2.ports["o1"]) METHOD_NAME.add_port("o3", port=straight_r2.ports["o2"]) METHOD_NAME.add_port("o4", port=straight_r.ports["o2"]) return METHOD_NAME
[ 9590, 2152 ]
def METHOD_NAME(self): """ BaseDirectory with no existence check accepts any pathlib path. """ foo = SimpleBaseDirectory() foo.path = pathlib.Path("!!!") self.assertIsInstance(foo.path, str)
[ 9, 53, 1186, 2147, 11771 ]
def METHOD_NAME(): fmt = """ # comments are allowed > # big endian (see documentation for struct) # empty lines are allowed: ashort: h along: l abyte: b # a byte achar: c astr: 5s afloat: f; adouble: d # multiple "statements" are allowed afixed: 16.16F abool: ? apad: x """ print("size:", calcsize(fmt)) class foo(object): pass i = foo() i.ashort = 0x7FFF i.along = 0x7FFFFFFF i.abyte = 0x7F i.achar = "a" i.astr = "12345" i.afloat = 0.5 i.adouble = 0.5 i.afixed = 1.5 i.abool = True data = pack(fmt, i) print("data:", repr(data)) print(unpack(fmt, data)) i2 = foo() unpack(fmt, data, i2) print(vars(i2))
[ 9 ]
def METHOD_NAME(tmp_path): outfilename = tmp_path / "vu_tide_hourly_p0.dfs0" ds = mikeio.read("tests/testdata/vu_tide_hourly.dfs1") assert ds.n_elements > 1 ds_0 = ds.isel(0, axis="space") assert ds_0.n_elements == 1 ds_0_0 = ds_0.isel(0) assert ds_0_0.n_timesteps == 1 ds_0_0.to_dfs(outfilename) dsnew = mikeio.read(outfilename) assert dsnew.n_timesteps == 1
[ 9, 1472, 1669, 61, 97, 367, 2085 ]
def METHOD_NAME(self): self.window.show_all() self.window.present()
[ 697 ]
async def METHOD_NAME( auth: AcaPyAuth = Depends(acapy_auth),
[ 129, 1837 ]
def METHOD_NAME(): session = requests.Session() make_session_public_only(session, 'demo_domain', src="https://huggingface.co/datasets/Mlxa/testing") return session
[ 0, 1, 240 ]
def METHOD_NAME(self): self.assertEqual(build_password("plain"), "plaintext:plain")
[ 9, 235, 11129 ]
def METHOD_NAME( user_id: str ) -> List[learner_group_domain.LearnerGroup]: """Returns a list of learner groups of the given facilitator. Args: user_id: str. The id of the facilitator. Returns: list(LearnerGroup). A list of learner groups of the given facilitator. """ learner_grp_models = ( learner_group_models.LearnerGroupModel.get_by_facilitator_id(user_id)) if not learner_grp_models: return [] return [ learner_group_services.get_learner_group_from_model(model) for model in learner_grp_models ]
[ 19, 5916, 861, 47, -1 ]
def METHOD_NAME(self, value: Optional[float]) -> None: """When not draining we pass thru to the socket, since when draining we control the timeout. """ if value is not None: self._recv_timeout_sec = value if self._drain_thread is None: socket.socket.METHOD_NAME(self, value)
[ 4247 ]
def METHOD_NAME( self, description: str, params: Mapping[str, Any], url: bool | None = False, provider: ExternalProviders | None = None, ) -> str: if self.user: name = self.user.name or self.user.email else: name = "Sentry" issue_name = self.group.qualified_short_id or "an issue" if url and self.group.qualified_short_id: group_url = self.group.get_absolute_url(params={"referrer": "activity_notification"}) issue_name = f"{self.format_url(text=self.group.qualified_short_id, url=group_url, provider=provider)}" context = {"author": name, "an issue": issue_name} context.update(params) return description.format(**context)
[ 1067, 947, 526 ]
def METHOD_NAME(self, native_face): self._face = native_face self._loops = [RhinoBrepLoop(loop) for loop in native_face.Loops] self._surface = RhinoNurbsSurface.from_rhino(self._face.UnderlyingSurface().ToNurbsSurface())
[ 0, 4805 ]
def METHOD_NAME(self, user): return self.get_for_user(user, teammembership__role=TeamMembership.ROLE.OWNER)
[ 19, 2013, 6969 ]
def METHOD_NAME(): column = BigqueryColumn( name="date", field_path="date", ordinal_position=1, data_type="TIMESTAMP", is_partition_column=True, cluster_column_position=None, comment=None, is_nullable=False, ) partition_info = PartitionInfo(type="DAY", field="date", column=column) profiler = BigqueryProfiler(config=BigQueryV2Config(), report=BigQueryV2Report()) test_table = BigqueryTable( name="test_table", comment="test_comment", rows_count=1, size_in_bytes=1, last_altered=datetime.now(timezone.utc), created=datetime.now(timezone.utc), partition_info=partition_info, max_partition_id="20200101", ) query = profiler.generate_partition_profiler_query( project="test_project", schema="test_dataset", table=test_table, ) expected_query = """
[ 9, 567, 1724, 1816, 2312, 7275, 539 ]
def METHOD_NAME(self, inputs, metric, functional_metric, ref_metric, ignore_index): """Test functional implementation of metric.""" preds, target = inputs if ignore_index is not None: target = inject_ignore_index(target, ignore_index) self.run_functional_metric_test( preds=preds, target=target, metric_functional=functional_metric, reference_metric=partial(_sklearn_ranking, fn=ref_metric, ignore_index=ignore_index), metric_args={ "num_labels": NUM_CLASSES, "ignore_index": ignore_index, }, )
[ 9, 9585, 4510, 4167 ]
def METHOD_NAME(self, positions: TensorType["bs":..., 3]) -> TensorType["bs":..., 1]: """Returns only the density. Used primarily with the density grid. Args: positions: the origin of the samples/frustums """ # Need to figure out a better way to descibe positions with a ray. ray_samples = RaySamples( frustums=Frustums( origins=positions, directions=torch.ones_like(positions), starts=torch.zeros_like(positions[..., :1]), ends=torch.zeros_like(positions[..., :1]), pixel_area=torch.ones_like(positions[..., :1]), ) ) density, _ = self.get_density(ray_samples) return density
[ 2915, 667 ]
METHOD_NAME(self, old_name, new_name, merge=False):
[ 1887, 2010 ]
def METHOD_NAME(): examinee = create_upgrade_pr( from_ref=cm.ComponentReference( name='c1', componentName='c1', version='1.2.3', ), to_ref=cm.ComponentReference( name='c1', componentName='c1', version='2.0.0', ), ) cref = cm.ComponentReference( name='c1', componentName='c1', version='6.0.0', ) reference_component = cm.Component( name='c1', version='6.6.6', repositoryContexts=(), provider=None, sources=(), resources=(), componentReferences=() ) # test with reference component not declaring this dependency assert not examinee.is_obsolete(reference_component=reference_component) # add differently-named dependency with greater version reference_component.componentReferences = ( dataclasses.replace(cref, componentName='other-name'), ) assert not examinee.is_obsolete(reference_component=reference_component) # add same-named web dependency with lesser version reference_component.componentReferences = ( dataclasses.replace(cref, version='0.0.1'), ) assert not examinee.is_obsolete(reference_component=reference_component) # add same-named resource of greater version but different type # todo: we should actually also test dependencies towards resources of two different types reference_component.resources = ( cm.Resource( name='c1', version='6.0.0', type=cm.ArtefactType.BLOB, access=None, ), ) assert not examinee.is_obsolete(reference_component=reference_component) # finally, add greater dependency of matching type and name reference_component.componentReferences = ( dataclasses.replace(cref, version='9.9.9'), ) assert examinee.is_obsolete(reference_component=reference_component)
[ 9, 137, 8439 ]
def METHOD_NAME(testsystem_names, niterations=5): """ Run sampler stack on named test systems. Parameters ---------- testsystem_names : list of str Names of test systems to run niterations : int, optional, default=5 Number of iterations to run """ for testsystem_name in testsystem_names: import perses.tests.testsystems testsystem_class = getattr(perses.tests.testsystems, testsystem_name) # Instantiate test system. testsystem = testsystem_class() # Test MCMCSampler samplers. for environment in testsystem.environments: mcmc_sampler = testsystem.mcmc_samplers[environment] f = partial(mcmc_sampler.run, niterations) f.description = "Testing MCMC sampler with %s '%s'" % (testsystem_name, environment) yield f # Test ExpandedEnsembleSampler samplers. for environment in testsystem.environments: exen_sampler = testsystem.exen_samplers[environment] f = partial(exen_sampler.run, niterations) f.description = "Testing expanded ensemble sampler with %s '%s'" % (testsystem_name, environment) yield f # Test SAMSSampler samplers. for environment in testsystem.environments: sams_sampler = testsystem.sams_samplers[environment] f = partial(sams_sampler.run, niterations) f.description = "Testing SAMS sampler with %s '%s'" % (testsystem_name, environment) yield f # Test MultiTargetDesign sampler, if present. if hasattr(testsystem, 'designer') and (testsystem.designer is not None): f = partial(testsystem.designer.run, niterations) f.description = "Testing MultiTargetDesign sampler with %s transfer free energy from vacuum -> %s" % (testsystem_name, environment) yield f
[ 22, 17407 ]
def METHOD_NAME(self) -> Response: """ Get a list with all of the tabels in TDEngine """ q = 'SHOW TABLES;' return self.native_query(q)
[ 19, 2253 ]
def METHOD_NAME( self, configs: List[Config[ModelConfig]], performances: List[Performance], ) -> None: super().METHOD_NAME(configs, performances) # We need to sort by dataset to have the same ordering for each model config ordering = np.argsort([c.dataset.name() for c in configs]) performance_df = Performance.to_dataframe(performances) # Extract all metrics metric_map = defaultdict(list) for i in ordering: metric_map[configs[i].model].append( performance_df.iloc[i][self.objectives].to_numpy(), # type: ignore ) # Build the properties self.metrics = np.stack(list(metric_map.values()), axis=1) self.model_indices = {model: i for i, model in enumerate(metric_map)} # If we are in the multi-objective setting, we have to apply dataset-level quantile # normalization of each objective. Otherwise, we perform standardization. if not self.enforce_single_objective and len(self.objectives) > 1: transformer = QuantileTransformer( n_quantiles=min(1000, self.metrics.shape[0]) ) self.metrics = np.stack( [ transformer.fit_transform(dataset_metrics) for dataset_metrics in self.metrics ] ) else: transformer = StandardScaler() self.metrics = np.stack( [ transformer.fit_transform(dataset_metrics) for dataset_metrics in self.metrics ] )
[ 90 ]
async def METHOD_NAME(mock_iam_client): group = await get_group(EXAMPLE_GROUPNAME, mock_iam_client) assert group["GroupName"] == EXAMPLE_GROUPNAME
[ 9, 19, 846 ]
def METHOD_NAME(self, Paramsmulticast): # controle parameters multicast return self.api.SetMulticastMultiSessionParameters(Paramsmulticast)
[ 5315, 0, 138, 457, 240, 386 ]
f METHOD_NAME(self):
[ 9, 356, 171 ]
def METHOD_NAME(m): opt = pyo.SolverFactory('gurobi') res = opt.solve(m) assert_optimal_termination(res)
[ 283, 5295, 708 ]
def METHOD_NAME(self) -> str: """ Resource ID. """ return pulumi.get(self, "id")
[ 147 ]
def METHOD_NAME(self): return self.event.METHOD_NAME + f"/session/{self.id}"
[ 1055, 548 ]
def METHOD_NAME(colorer, s, i): return colorer.match_seq_regexp(s, i, kind="label", regexp="`[A-z0-9]+[^`]+`_{1,2}")
[ 3183, 6935 ]
def METHOD_NAME(self): for pos in self: seq = pos.l10n_es_simplified_invoice_sequence_id pos.l10n_es_simplified_invoice_number = ( seq._get_current_sequence().number_next_actual ) pos.l10n_es_simplified_invoice_prefix = seq._get_prefix_suffix()[0] pos.l10n_es_simplified_invoice_padding = seq.padding
[ 226, 8018, 2486, 771 ]
def METHOD_NAME(self): x = tensor.Tensor(np.array([1, 2, 3])) self.assertEqual(x.rank, 1)
[ 9, 1499, 137, 206, 43, 798 ]
def METHOD_NAME(): assert not np.isnan(atmosphere.get_relative_airmass(10))
[ 9, 10054, 1997 ]
def METHOD_NAME(): """Return the default filters (all available filters).""" return dict((name, set(PlayerIter(name))) for name in PlayerIter.filters)
[ 19, 235, 469 ]
def METHOD_NAME(self, token_ids: Sequence[bytes]) -> Sequence[KlerosToken]: queries = [] for token_id in token_ids: queries.append(self.kleros_contract.functions.getTokenInfo(token_id)) # name string, ticker string, addr address, symbolMultihash string, status uint8, numberOfRequests uint256 token_infos = self.ethereum_client.batch_call(queries) return [KlerosToken(*token_info) for token_info in token_infos]
[ 19, 466, 100 ]
def METHOD_NAME( self, aligned_segment_starting_times: List[List[float]], stub_test: bool = False ): """ Align the individual starting time for each video in this interface relative to the common session start time. Must be in units seconds relative to the common 'session_start_time'. Parameters ---------- aligned_segment_starting_times : list of list of floats The relative starting times of each video. Outer list is over file paths (readers). Inner list is over segments of each recording. """ number_of_files_from_starting_times = len(aligned_segment_starting_times) assert number_of_files_from_starting_times == len(self.readers_list), ( f"The length of the outer list of 'starting_times' ({number_of_files_from_starting_times}) " "does not match the number of files ({len(self.readers_list)})!" ) for file_index, (reader, aligned_segment_starting_times_by_file) in enumerate( zip(self.readers_list, aligned_segment_starting_times) ): number_of_segments = reader.header["nb_segment"][0] assert number_of_segments == len( aligned_segment_starting_times_by_file ), f"The length of starting times index {file_index} does not match the number of segments of that reader!" reader._t_starts = aligned_segment_starting_times_by_file
[ 0, 7546, 4373, 8466, 3148 ]
def METHOD_NAME(self, msg): pass
[ 69, 5862 ]
def METHOD_NAME(self, output, identifier): return self._wrapped.METHOD_NAME(output._lines, identifier)
[ 19, 99, 280, 146 ]
def METHOD_NAME(): x = np.zeros((5, 5), dtype=int) array_2d_view_assign(x[::, ::], 9) array_2d_view_assign(x[:2:2, :2:3], 10) array_2d_view_assign(x[3::2, 3::3], 11) array_2d_view_assign(x[1:2, 2:3], 12) array_1d_view_assign(x[0, :], 1) array_1d_view_assign(x[1, ::2], 2) array_1d_view_assign(x[2, 1:4:2], 3) array_1d_view_assign(x[3, 3:4], 4) array_1d_view_assign(x[:, 0], 5) array_1d_view_assign(x[::2, 1], 6) array_1d_view_assign(x[1:4:2, 2], 7) array_1d_view_assign(x[3:4, 3], 8) for i in range(np.shape(x)[0]): for j in range(np.shape(x)[1]): print(x[i][j])
[ 877, 1085, 1179 ]
def METHOD_NAME(iterable): """Test whether visitors properly set the type constraint of the a For node representing for/else statement iterating over a heterogeneous list. """ assume(type(iterable[0]) != type(iterable[1])) val_types = [type(val) for val in iterable] if int in val_types: assume(bool not in val_types) if bool in val_types: assume(int not in val_types) program = f"for elt in {iterable}:\n" f" x = elt\n" module, TypeInferrer = cs._parse_text(program) for_node = list(module.nodes_of_class(nodes.For))[0] local_type_var = module.type_environment.lookup_in_env("x") inferred_type = TypeInferrer.type_constraints.resolve(local_type_var).getValue() assert inferred_type == Any
[ 9, 43, 5565, 245 ]
def METHOD_NAME(plistpath, content): """A test utility to create a plist file with known content. Ensures that the directory for the file exists, and writes an XML plist with specific content. :param plistpath: The path for the plist file to create. :param content: A dictionary of content that plistlib can use to create the plist file. :returns: The path to the file that was created. """ plistpath.parent.mkdir(parents=True, exist_ok=True) with plistpath.open("wb") as f: plistlib.dump(content, f) return plistpath
[ 129, 5953, 171 ]
def METHOD_NAME(instance, check, aggregator): del instance['custom_queries'] with mock.patch( 'datadog_checks.ibm_was.IbmWasCheck.make_request', return_value=mock_data('perfservlet-multiple-nodes.xml') ): check = check(instance) check.check(instance) node = 'node:cmhqlvij2a04' for metric_name, metrics in aggregator._metrics.items(): for metric in metrics: if 'server:IJ2Server02' in metric.tags: assert node in metric.tags, "Expected '{}' tag in '{}' tags, found {}".format( node, metric_name, metric.tags )
[ 9, 2786, 163, 82 ]
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
11